instrumentation: add next-share/ next-share-m32
authorRazvan Deaconescu <razvan.deaconescu@cs.pub.ro>
Sun, 19 Sep 2010 08:28:53 +0000 (11:28 +0300)
committerRazvan Deaconescu <razvan.deaconescu@cs.pub.ro>
Sun, 19 Sep 2010 08:28:53 +0000 (11:28 +0300)
Next-Share M32 release (also currently - 19.09.2010 - in trunk/)

721 files changed:
instrumentation/next-share/BaseLib/Category/Category.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Category/FamilyFilter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Category/TestCategory.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Category/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Category/category.conf [new file with mode: 0644]
instrumentation/next-share/BaseLib/Category/init_category.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/API.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/DownloadImpl.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/DownloadRuntimeConfig.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/LaunchManyCore.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/SessionRuntimeConfig.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/SingleDownload.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/ThreadPool.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/UserCallbackHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/maketorrent.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/makeurl.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/APIImplementation/miscutils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Base.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Choker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Connecter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Downloader.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/DownloaderFeedback.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Encrypter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/FileSelector.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Filter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/GetRightHTTPDownloader.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/HTTPDownloader.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/HoffmanHTTPDownloader.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/MessageID.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/NatCheck.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/PiecePicker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Rerequester.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Statistics.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Storage.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/StorageWrapper.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/T2T.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Uploader.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/btformats.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/convert.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/fakeopen.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/BT1/track.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/CurrentRateMeasure.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/HTTPHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/PSYCO.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/RateLimiter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/RateMeasure.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/RawServer.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/ServerPortHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/SocketHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/bencode.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/bitfield.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/clock.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/download_bt1.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/inifile.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/iprangeparse.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/natpunch.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/parseargs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/parsedir.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/piecebuffer.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/selectpoll.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/subnetparse.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/torrentlistparse.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BitTornado/zurllib.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/TorrentCollecting.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/bartercast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/buddycast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/channelcast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/moderationcast_util.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/similarity.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/BuddyCast/votecast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/CacheDBHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/EditDist.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/MetadataDBHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/Notifier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/SqliteCacheDBHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/SqliteFriendshipStatsCacheDB.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/SqliteSeedingStatsCacheDB.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/cachedb.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/friends.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/maxflow.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/CacheDB/sqlitecachedb.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ClosedSwarm/ClosedSwarm.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ClosedSwarm/PaymentIntegration.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ClosedSwarm/Tools.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ClosedSwarm/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/MagnetLink.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/MiniBitTorrent.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/MDHT_Spec.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/Makefile [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/README [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/bencode.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/controller.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/floodbarrier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/identifier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/interactive_dht.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/kadtracker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/ktracker_example.py.no [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/ktracker_query.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/logging_conf.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/lookup_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/message.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/minitwisted.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/node.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/querier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/responder.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/routing_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/routing_table.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/rpc_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/run_dht_node_forever.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/server_dht.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_bencode.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_const.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_controller.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_floodbarrier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_identifier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_kadtracker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_lookup_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_message.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_minitwisted.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_node.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_querier.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_responder.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_routing_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_routing_table.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_rpc_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_token_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_tracker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_utils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/token_manager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/tracker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/utils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/version.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/mainlineDHT.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/mainlineDHTChecker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/repex.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DecentralizedTracking/ut_pex.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Download.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DownloadConfig.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/DownloadState.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/LiveSourceAuthConfig.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Merkle/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Merkle/merkle.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Multicast/Multicast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Multicast/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/ConnectionCheck.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/DialbackMsgHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/NatCheck.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/NatCheckMsgHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/NatTraversal.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/ReturnConnHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/TimeoutCheck.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/TimeoutFinder.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/UDPPuncture.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/UPnPThread.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/guessip.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/NATFirewall/upnp.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Overlay/MetadataHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Overlay/OverlayApps.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Overlay/OverlayThreadingBridge.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Overlay/SecureOverlay.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Overlay/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Overlay/permid.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/Coordinator.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/CoordinatorMessageHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/Helper.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/HelperMessageHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/ProxyServiceUtil.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/RatePredictor.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/ProxyService/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/RequestPolicy.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Search/KeywordSearch.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Search/Reranking.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Search/SearchManager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Search/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Session.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SessionConfig.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SocialNetwork/FriendshipMsgHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SocialNetwork/OverlapMsgHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SocialNetwork/RemoteQueryMsgHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SocialNetwork/RemoteTorrentHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SocialNetwork/SocialNetworkMsgHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/SocialNetwork/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/ChannelCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/Crawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/DatabaseCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/FriendshipCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/Logger.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/PunctureCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/RepexCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/SeedingStatsCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/Status/LivingLabReporter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/Status/Status.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/Status/XmlPrinter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/Status/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/VideoPlaybackCrawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/crawler.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/tribler_friendship_stats_sdb.sql [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/tribler_seedingstats_sdb.sql [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Statistics/tribler_videoplayback_stats.sql [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/Languages.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/MetadataDTO.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/MetadataExceptions.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/SubtitleInfo.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/res/subs_languages.csv [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/PeerHaveManager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/RichMetadataInterceptor.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/DiskManager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/SimpleTokenBucket.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/SubsMessageHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/SubtitlesHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/SubtitlesSupport.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Subtitles/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/TorrentDef.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Utilities/Crypto.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Utilities/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Utilities/timeouturlopen.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Utilities/unicode.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Utilities/utilities.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Utilities/win32regchecker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/LiveSourceAuth.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/MovieTransport.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/PiecePickerSVC.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/PiecePickerStreaming.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/SVCTransporter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/SVCVideoStatus.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/VideoOnDemand.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/VideoSource.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/VideoStatus.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/Video/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/defaults.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/exceptions.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/osutils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/simpledefs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Core/superpeer.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Debug/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Debug/console.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Debug/memory.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/SwarmPlayerIcon.ico [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/SwarmPlayerLogo.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/SwarmPluginIcon.ico [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/SwarmPluginLogo.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/SwarmServerIcon.ico [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/background.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/fullScreen.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/fullScreen_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/logoTribler.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/logoTribler_small.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/mute.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/mute_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/pause.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/pause_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/play.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/play_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/save.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/saveDisabled.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/saveDisabled_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/save_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/sliderDot.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/sliderVolume.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/splash.jpg [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/torrenticon.ico [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/tribler.ico [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/volume.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Images/volume_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/LICENSE.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Lang/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Lang/english.lang [new file with mode: 0644]
instrumentation/next-share/BaseLib/Lang/lang.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/BaseApp.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/Info.plist [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/Makefile [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/SLAResources.rsrc [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/TriblerDoc.icns [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/VolumeIcon.icns [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/background.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.psd [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_document.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_volumeicon.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.psd [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.psd [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/mkinstalldirs [new file with mode: 0755]
instrumentation/next-share/BaseLib/Player/Build/Mac/process_libs [new file with mode: 0755]
instrumentation/next-share/BaseLib/Player/Build/Mac/setuptriblermac.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/smart_lipo_merge [new file with mode: 0755]
instrumentation/next-share/BaseLib/Player/Build/Mac/smart_lipo_thin [new file with mode: 0755]
instrumentation/next-share/BaseLib/Player/Build/Mac/tribler.icns [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Mac/vlc-macosx-compile.patch [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/changelog [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/compat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/control [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/files [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/prerm [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/pycompat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/rules [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.1 [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.desktop [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.manpages [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.menu [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.postinst.debhelper [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.postrm.debhelper [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.xpm [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer_big.xpm [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Win32/heading.bmp [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Win32/setuptriblerplay.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Win32/swarmplayer.exe.manifest [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Build/Win32/triblerplay.nsi [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/EmbeddedPlayer4Frame.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/PlayerVideoFrame.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/Reporter.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/SvcTest.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/UtilityStub.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/swarmplayer-njaal.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/swarmplayer.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Player/systray.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/AtomFeedParser.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/BackgroundProcess.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/SwarmPlugin.inf [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/SwarmPlugin_IE.inf [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/heading.bmp [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/setupBGexe.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin.exe.manifest [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin.nsi [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin_FX_only.nsi [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin_IE_only.nsi [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/Search.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/SwarmEngine.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/defs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/examplepage-firefox.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/examplepage.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/pluginemulator-http.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/pluginemulator.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/searchemulator.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/searchpage-firefox.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/searchpage-ie.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Plugin/searchpage.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Policies/RateManager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Policies/SeedingManager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Policies/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Subscriptions/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Subscriptions/rss_client.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/contentdir/file.avi [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/contentdir/file.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/ec.pem [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/ecpub.pem [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/file.wmv [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/file2.wmv [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_api.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_api.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/API/test_remote_torrent.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_seeding.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_seeding_live.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_seeding_vod.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_tdef.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_tracking.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/test_vod.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/API/thumb.jpg [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/CacheDB/SimpleMetadataDB.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/CacheDB/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/CacheDB/test_MetadataDBHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_Langugages.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_MetadataDTO.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_Subtitle.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/test_DiskManager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/test_SubtitleMsgHandlerIsolation.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/simple_mocks.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/test_RichMetadataInterceptor.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/Subtitles/test_SubtitlesHandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/Core/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/TESTSUITE.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/bak_tribler_sdb.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/btconn.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_multiple.torrent [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_single.torrent [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_tribler.tar.gz [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/extend_db_dir/superpeer120070902sp7001.log [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/extend_hs_dir/dummydata.merkle.torrent [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/extend_hs_dir/proxyservice.test.torrent [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/log_parser.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/olconn.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake.srt [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake0.srt [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake1.srt [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake2.srt [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/schema_sdb_v5.sql [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/subs_languages.csv [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/wrong_subs_languages.1.csv [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/subtitles_test_res/wrong_subs_languages.2.csv [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test.sh [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_TimedTaskQueue.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_as_server.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_bartercast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast2_datahandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast4_stresstest.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast4_stresstest_queries.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast_msg.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast_msg.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast_msg.sh [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_buddycast_msg8plus.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_cachingstream.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_channelcast.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_channelcast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_channelcast.sh [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_channelcast_plus_subtitles.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_closedswarm.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_connect_overlay.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_crawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.bat [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.bat [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_dialback_reply_active2.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_dialback_request.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_dlhelp.bat [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_dlhelp.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_dlhelp.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_extend_hs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_friend.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_friendship.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_friendship.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_friendship.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_friendship_crawler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_g2g.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_gui_server.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_magnetlink.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_merkle.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_merkle_msg.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_merkle_msg.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_merkle_msg.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_miscutils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_multicast.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_na_extend_hs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_na_extend_hs.sh [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_natcheck.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_osutils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_overlay_bridge.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_overlay_bridge.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_permid.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_permid_response1.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_proxyservice.bat [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_proxyservice.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_proxyservice.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_remote_query.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.bat [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_searchgridmanager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_secure_overlay.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_secure_overlay.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_secure_overlay.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_seeding_stats.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_social_overlap.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_sqlitecachedb.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_status.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_subtitles.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_subtitles.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Test/test_subtitles_isolation.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_subtitles_msgs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_superpeers.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_threadpool.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_torrentcollecting.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_tracker_checking.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_upnp.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_url.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_url_metadata.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_ut_pex.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_video_server.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_vod.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_vod.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/test_vod.sh [new file with mode: 0644]
instrumentation/next-share/BaseLib/Test/usericon-ok.jpg [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/bitbucket-live-noauth.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/bitbucket-live.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/btshowmetainfo.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/cmdlinedl.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/createlivestream-njaal.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/createlivestream-noauth.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/createlivestream.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/createpoa.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/createtorrent.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/dirtracker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/dirtrackerseeder.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/duration2torrent.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/httpseeder.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/pingbackserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/pipe-arnocam-home.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Tools/pipe-arnocam-jip.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-aac-gop-sync.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-mp3.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-nosound-mencoder.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Tools/pipe-babscam-mpeg4-mp3-sync.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Tools/proxy-cmdline.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/seed-njaal.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/seeking.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/stunserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/superpeer.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Tools/trial_poa_server.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/TrackerChecking/TorrentChecking.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/TrackerChecking/TrackerChecking.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/TrackerChecking/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Mac/Info.plist [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Mac/setupBGapp.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/changelog [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/compat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/control [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/rules [new file with mode: 0755]
instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/source/format [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/README.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/SwarmPlayer_IE.inf [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/heading.bmp [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/patentfreevlc.bat [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/swarmplayer.exe.manifest [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/swarmplayer_IE_only.nsi [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/Build/Win32/setupBGexe.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/README.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/SwarmEngine.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/bgprocess/swarmengined [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/chrome.manifest [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/chrome/content/tribe_status_bar.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/chrome/content/tribe_status_bar.xul [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/components/SwarmTransport.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/components/TribeChannel.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/components/TribeProtocolHandler.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/icon.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/ie8.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/install.rdf [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/live-jip.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/lucid-configure-xulrunner191.sh [new file with mode: 0755]
instrumentation/next-share/BaseLib/Transport/port-to-other-browsers.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey2.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki_grey.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/swift.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/test-nofallback.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/tribeIChannel.idl [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/tribeISwarmTransport.idl [new file with mode: 0644]
instrumentation/next-share/BaseLib/Transport/wikipedia.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/asynchHTTPclient.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/asynchHTTPserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/objectconsole.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/taskrunner.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/threadhotel.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/upnplogger.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/upnpmarshal.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/common/upnpsoap.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/services/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/services/bookmarkservice.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/services/switchpower.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/services/urlservice.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/ssdp/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpclient.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpdaemon.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpmessage.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpclient/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpclient/httpserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpclient.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpclientconsole.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpservicestub.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpclient/xmldescriptionparser.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpconsole.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/httpserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/servicemanager.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpdevice.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpeventdispatcher.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpserver.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpserverconsole.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpservice.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Utilities/Instance2Instance.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Utilities/LinuxSingleInstanceChecker.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Utilities/TimedTaskQueue.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Utilities/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Utilities/configreader.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Buttons.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/CachingStream.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/4framebackground.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/4framesliderDot.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/4framesliderDot_dis.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/4framesliderVolume.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/background.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/bl.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/br.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/fullScreen-hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/fullScreen.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/fullScreen_dis.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/fullScreen_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/pause.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/pause_dis.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/pause_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/play.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/play_dis.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/play_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/sliderDot.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/sliderDot_dis.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/sliderDot_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/sliderVolume.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/tl.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/tr.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol0.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol0Enabled.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol0Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol0_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol1.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol1Enabled.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol1Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol1_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol2.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol2Enabled.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol2Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol2_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol3.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol3Enabled.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol3Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol3_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol4.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol4Enabled.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol4Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol4_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol5.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol5Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol5_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol6.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol6Enabled_clicked.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Images/vol6_hover.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Ogg.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/Progress.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/VLCWrapper.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/VideoFrame.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/VideoPlayer.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/VideoServer.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/defs.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/Video/utils.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/WebUI.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/arrows.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/favicon.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/headBG.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/p2p-next.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/pause.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/pause_red.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/progress_blue.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/progress_green.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/progress_red.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/remove.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/remove_big.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/remove_red.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/resume.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/resume_green.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/splugin.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/users_arrow.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/users_arrow_green.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red_light.png [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/index/body.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/index/head.html [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/index/stylesheet.css [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/javascript/download.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/javascript/json.min.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/javascript/tribe.interface.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/WebUI/javascript/tribe.js [new file with mode: 0644]
instrumentation/next-share/BaseLib/__init__.py [new file with mode: 0644]
instrumentation/next-share/BaseLib/binary-LICENSE-postfix.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/ns-LICENSE.txt [new file with mode: 0644]
instrumentation/next-share/BaseLib/schema_sdb_v5.sql [new file with mode: 0644]
instrumentation/next-share/LICENSE.txt [new file with mode: 0644]
instrumentation/next-share/README.txt [new file with mode: 0644]
instrumentation/next-share/clean.bat [new file with mode: 0644]
instrumentation/next-share/lucid-xpicreate.sh [new file with mode: 0644]
instrumentation/next-share/playmakedist.bat [new file with mode: 0644]
instrumentation/next-share/pluginmakedist.bat [new file with mode: 0644]
instrumentation/next-share/pluginmakedist_FX_only.bat [new file with mode: 0644]
instrumentation/next-share/pluginmakedist_IE_only.bat [new file with mode: 0644]
instrumentation/next-share/reset-keepid.bat [new file with mode: 0644]
instrumentation/next-share/reset.bat [new file with mode: 0644]
instrumentation/next-share/vlc-1.0.5-swarmplugin-switch-kcc-src-aug2010-renum110-r16968.patch [new file with mode: 0644]
instrumentation/next-share/xie8transmakedist.bat [new file with mode: 0644]
instrumentation/next-share/xpitransmakedeb.sh [new file with mode: 0644]
instrumentation/next-share/xpitransmakedist.bat [new file with mode: 0644]
instrumentation/next-share/xpitransmakedist.sh [new file with mode: 0644]
instrumentation/next-share/xpitransmakedistmac.sh [new file with mode: 0644]

diff --git a/instrumentation/next-share/BaseLib/Category/Category.py b/instrumentation/next-share/BaseLib/Category/Category.py
new file mode 100644 (file)
index 0000000..aabdcb2
--- /dev/null
@@ -0,0 +1,383 @@
+# written by Yuan Yuan, Jelle Roozenburg
+# see LICENSE.txt for license information
+
+import os, re
+from BaseLib.Category.init_category import getCategoryInfo
+from FamilyFilter import XXXFilter
+from traceback import print_exc
+    
+import sys
+
+from BaseLib.__init__ import LIBRARYNAME
+
+DEBUG=False
+category_file = "category.conf"
+    
+
+class Category:
+    
+    # Code to make this a singleton
+    __single = None
+    __size_change = 1024 * 1024 
+    
+    def __init__(self, install_dir='.'):
+        
+        if Category.__single:
+            raise RuntimeError, "Category is singleton"
+        filename = os.path.join(install_dir,LIBRARYNAME, 'Category', category_file)
+        Category.__single = self
+        self.utility = None
+        #self.torrent_db = TorrentDBHandler.getInstance() # Arno, 2009-01-30: apparently unused
+        try:
+            self.category_info = getCategoryInfo(filename)
+            self.category_info.sort(rankcmp)
+        except:
+            self.category_info = []
+            if DEBUG:
+                print_exc()
+
+        self.xxx_filter = XXXFilter(install_dir)
+        
+        
+        if DEBUG:
+            print >>sys.stderr,"category: Categories defined by user",self.getCategoryNames()
+        
+        
+    # return Category instance    
+    def getInstance(*args, **kw):
+        if Category.__single is None:
+            Category(*args, **kw)       
+        return Category.__single
+    getInstance = staticmethod(getInstance)
+       
+    def register(self,metadata_handler):
+        self.metadata_handler = metadata_handler
+        
+    def init_from_main(self, utility):
+        self.utility = utility
+        self.set_family_filter(None) # init family filter to saved state
+
+
+#     # check to see whether need to resort torrent file
+#     # return bool
+#     def checkResort(self, data_manager):
+#         data = data_manager.data
+# #===============================================================================
+# #        if not data:
+# #            data = data_manager.torrent_db.getRecommendedTorrents(all = True)
+# #===============================================================================
+#         if not data:
+#             return False
+
+# #        data = data_manager.torrent_db.getRecommendedTorrents(all = True)
+# #        self.reSortAll(data)
+# #        return True
+#         torrent = data[0]
+#         if torrent["category"] == ["?"]:
+#             #data = data_manager.torrent_db.getRecommendedTorrents(all = True)
+#             self.reSortAll(data)
+# #            del data
+#             return True
+        
+#         begin = time()
+#         for item in data:
+#             if len(item['category']) > 1:
+#                 #data = data_manager.torrent_db.getRecommendedTorrents(all = True)
+#                 self.reSortAll(data)
+# #                del data
+#                 return True
+#         if DEBUG:
+#             print >>sys.stderr,'torrcoll: Checking of %d torrents costs: %f s' % (len(data), time() - begin)
+#         return False
+        
+#     # recalculate category of all torrents, remove torrents from db if not existed
+#     def reSortAll(self, data, parent = None):
+         
+#         max = len(data)
+#         if max == 0:
+#             return
+#         import wx
+#         dlgHolder = []
+#         event = Event()
+#         def makeDialog():
+#             dlg = wx.ProgressDialog("Upgrading Database",
+#                                     "Upgrading Old Database to New Database",
+#                                     maximum = max,
+#                                     parent = None,
+#                                     style = wx.PD_AUTO_HIDE 
+#                                     | wx.PD_ELAPSED_TIME
+#                                     | wx.PD_REMAINING_TIME
+#                                     )
+#             dlgHolder.append(dlg)
+#             event.set()
+            
+            
+#         wx.CallAfter(makeDialog)
+        
+#         # Wait for dialog to be ready
+#         event.wait()
+#         dlg = dlgHolder[0]
+        
+#         count = 0
+#         step = int(float(max) / 20) + 1
+        
+#         # sort each torrent file
+#         for i in xrange(len(data)):
+#             count += 1
+#             if count % step == 0:
+#                 wx.CallAfter(dlg.Update, [count])
+#             try:
+#                 # try alternative dir if bsddb doesnt match with current Tribler install
+#                 rec = data[i]
+#                 (torrent_dir,torrent_name) = self.metadata_handler.get_std_torrent_dir_name(rec)
+                    
+#                 # read the torrent file
+#                 filesrc = os.path.join(torrent_dir,torrent_name)
+                
+# #                print filesrc
+#                 f = open(filesrc, "rb")
+#                 torrentdata = f.read()          # torrent decoded string
+#                 f.close()
+#             except IOError:                     # torrent file not found
+#                 # delete the info from db
+#                 self.torrent_db.deleteTorrent(data[i]['infohash'])
+#                 continue   
+            
+#             # decode the data
+#             torrent_dict = bencode.bdecode(torrentdata)
+#             content_name = dunno2unicode(torrent_dict["info"].get('name', '?'))
+            
+#             category_belong = []
+#             category_belong = self.calculateCategory(torrent_dict, content_name)
+            
+#             if (category_belong == []):
+#                 category_belong = ['other']
+            
+#             data[i]['category'] = category_belong    # should have updated self.data
+#             self.torrent_db.updateTorrent(data[i]['infohash'], updateFlag=False, category=category_belong)
+#         self.torrent_db.sync()
+#         wx.CallAfter(dlg.Destroy)   
+    
+    def getCategoryKeys(self):
+        if self.category_info is None:
+            return []
+        keys = []
+        keys.append("All")
+        keys.append("other")
+        for category in self.category_info:
+            keys.append(category['name'])
+        keys.sort()
+        return keys
+    
+    def getCategoryNames(self):
+        if self.category_info is None:
+            return []
+        keys = []
+        for category in self.category_info:
+            rank = category['rank']
+            if rank == -1:
+                break
+            keys.append((category['name'],category['displayname']))
+        return keys
+    
+    def hasActiveCategory(self, torrent):
+        try:
+            name = torrent['category'][0]
+        except:
+            print >> sys.stderr, 'Torrent: %s has no valid category' % `torrent['content_name']`
+            return False
+        for category in [{'name':'other', 'rank':1}]+self.category_info:
+            rank = category['rank']
+            if rank == -1:
+                break
+            if name.lower() == category['name'].lower():
+                return True
+        #print >> sys.stderr, 'Category: %s was not in %s' % (name.lower(), [a['name'].lower()  for a in self.category_info if a['rank'] != -1])
+        return False
+    
+    def getCategoryRank(self,cat):
+        for category in self.category_info:
+            if category['name'] == cat:
+                return category['rank']
+        return None
+    
+    # calculate the category for a given torrent_dict of a torrent file
+    # return list
+    def calculateCategory(self, torrent_dict, display_name):  
+        # torrent_dict is the  dict of 
+        # a torrent file
+        # return value: list of category the torrent belongs to
+        torrent_category = None
+
+        files_list = []
+        try:                                
+            # the multi-files mode
+            for ifiles in torrent_dict['info']["files"]:
+                files_list.append((ifiles['path'][-1], ifiles['length'] / float(self.__size_change)))
+        except KeyError:                    
+            # single mode
+            files_list.append((torrent_dict['info']["name"],torrent_dict['info']['length'] / float(self.__size_change)))
+
+        # Check xxx
+        try:
+            tracker = torrent_dict.get('announce')
+            if not tracker:
+                tracker = torrent_dict.get('announce-list',[['']])[0][0]
+            if self.xxx_filter.isXXXTorrent(files_list, display_name, torrent_dict.get('announce'), torrent_dict.get('comment')):
+                return ['xxx']
+        except:
+            print >> sys.stderr, 'Category: Exception in explicit terms filter in torrent: %s' % torrent_dict
+            print_exc()
+        
+        # filename_list ready
+        strongest_cat = 0.0
+        for category in self.category_info:    # for each category
+            (decision, strength) = self.judge(category, files_list, display_name)
+            if decision and (strength > strongest_cat):
+                torrent_category = [category['name']]
+                strongest_cat = strength
+        
+        if torrent_category == None:
+            torrent_category = ['other']
+        
+        return torrent_category
+
+    # judge whether a torrent file belongs to a certain category
+    # return bool
+    def judge(self, category, files_list, display_name = ''):
+    
+        # judge file keywords
+        display_name = display_name.lower()                
+        factor = 1.0
+        fileKeywords = self._getWords(display_name)
+        
+        for ikeywords in category['keywords'].keys():
+            try:
+                fileKeywords.index(ikeywords)
+                factor *= 1 - category['keywords'][ikeywords]
+            except:
+                pass
+        if (1 - factor) > 0.5:
+            if 'strength' in category:
+                return (True, category['strength'])
+            else:
+                return (True, (1- factor))
+        
+        # judge each file
+        matchSize = 0
+        totalSize = 1e-19
+        for name, length in files_list:
+            totalSize += length
+            # judge file size
+            if ( length < category['minfilesize'] ) or \
+                (category['maxfilesize'] > 0 and length > category['maxfilesize'] ):
+                continue
+        
+            # judge file suffix
+            OK = False
+            for isuffix in category['suffix']:
+                if name.lower().endswith( isuffix ):
+                    OK = True
+                    break
+            if OK:
+                matchSize += length
+                continue        
+                
+            # judge file keywords
+            factor = 1.0
+            fileKeywords = self._getWords(name.lower())
+            
+            for ikeywords in category['keywords'].keys():
+#                pass
+                try:
+                    fileKeywords.index(ikeywords)
+                    #print ikeywords
+                    factor *= 1 - category['keywords'][ikeywords]
+                except:
+                    pass
+            if factor < 0.5:
+                # print filename_list[index] + '#######################'
+                matchSize += length
+   
+        # match file   
+        if (matchSize / totalSize) >= category['matchpercentage']:
+            if 'strength' in category:
+                return (True, category['strength'])
+            else:
+                return (True, (matchSize/ totalSize))
+            
+        return (False, 0)
+    
+    
+    WORDS_REGEXP = re.compile('[a-zA-Z0-9]+')
+    def _getWords(self, string):
+        return self.WORDS_REGEXP.findall(string)
+    
+    
+    def family_filter_enabled(self):
+        """
+        Return is xxx filtering is enabled in this client
+        """
+        if self.utility is None:
+            return False
+        state = self.utility.config.Read('family_filter')
+        if state in ('1', '0'):
+            return state == '1'
+        else:
+            self.utility.config.Write('family_filter', '1')
+            self.utility.config.Flush()
+            return True
+    
+    def set_family_filter(self, b=None):
+        assert b in (True, False, None)
+        old = self.family_filter_enabled()
+        if b != old or b is None: # update category data if initial call, or if state changes
+            if b is None:
+                b=old
+            if self.utility is None:
+                return
+            #print >> sys.stderr , b
+            if b:
+                self.utility.config.Write('family_filter', '1')
+            else:
+                self.utility.config.Write('family_filter', '0')
+            self.utility.config.Flush()
+            # change category data
+            for category in self.category_info:
+                if category['name'] == 'xxx':
+                    if b:
+                        category['old-rank'] = category['rank']
+                        category['rank'] = -1
+                    elif category['rank'] == -1:
+                        category['rank'] = category['old-rank']
+                    break
+
+
+    def get_family_filter_sql(self, _getCategoryID, table_name=''):
+        if self.family_filter_enabled():
+            forbiddencats = [cat['name'] for cat in self.category_info if cat['rank'] == -1]
+            if table_name:
+                table_name+='.'
+            if forbiddencats:
+                return " and %scategory_id not in (%s)" % (table_name, ','.join([str(_getCategoryID([cat])) for cat in forbiddencats]))
+        return ''
+                
+    
+        
+        
+def rankcmp(a,b):
+    if not ('rank' in a):
+        return 1
+    elif not ('rank' in b):
+        return -1
+    elif a['rank'] == -1:
+        return 1
+    elif b['rank'] == -1:
+        return -1
+    elif a['rank'] == b['rank']:
+        return 0
+    elif a['rank'] < b['rank']:
+        return -1
+    else:
+        return 1
+    
diff --git a/instrumentation/next-share/BaseLib/Category/FamilyFilter.py b/instrumentation/next-share/BaseLib/Category/FamilyFilter.py
new file mode 100644 (file)
index 0000000..3bd79b1
--- /dev/null
@@ -0,0 +1,111 @@
+# Written by Jelle Roozenburg
+# see LICENSE.txt for license information
+
+import re, sys, os
+from traceback import print_exc
+
+from BaseLib.__init__ import LIBRARYNAME
+
+WORDS_REGEXP = re.compile('[a-zA-Z0-9]+')
+DEBUG = False
+
+class XXXFilter:
+    def __init__(self, install_dir):
+        termfilename = os.path.join(install_dir, LIBRARYNAME, 'Category','filter_terms.filter')
+        self.xxx_terms, self.xxx_searchterms = self.initTerms(termfilename)
+                
+    def initTerms(self, filename):
+        terms = set()
+        searchterms = set()
+
+        try:
+            f = file(filename, 'r')
+            lines = f.read().lower().splitlines()
+    
+            for line in lines:
+                if line.startswith('*'):
+                    searchterms.add(line[1:])
+                else:
+                    terms.add(line)
+            f.close()
+        except:
+            if DEBUG:
+                print_exc()
+            
+        if DEBUG:
+            print 'Read %d XXX terms from file %s' % (len(terms)+len(searchterms), filename)
+        return terms, searchterms
+    
+    def _getWords(self, string):
+        return [a.lower() for a in WORDS_REGEXP.findall(string)]
+    
+        
+    def isXXXTorrent(self, files_list, torrent_name, tracker, comment=None):
+        if tracker:
+            tracker = tracker.lower().replace('http://', '').replace('announce','')
+        else:
+            tracker = ''
+        terms = [a[0].lower() for a in files_list]
+        is_xxx = (len(filter(self.isXXX, terms)) > 0 or 
+                  self.isXXX(torrent_name, False) or
+                  self.isXXX(tracker, False) or
+                  (comment and self.isXXX(comment, False))
+                  )
+        if DEBUG:
+            if is_xxx:
+                print 'Torrent is XXX: %s %s' % (torrent_name, tracker)
+            else:
+                print 'Torrent is NOT XXX: %s %s' % (torrent_name, tracker)
+        return is_xxx
+                
+    
+    def isXXX(self, s, isFilename=True):
+        s = s.lower()
+        if self.isXXXTerm(s): # We have also put some full titles in the filter file
+            return True
+        if not self.isAudio(s) and self.foundXXXTerm(s):
+            return True
+        words = self._getWords(s)
+        words2 = [' '.join(words[i:i+2]) for i in xrange(0, len(words)-1)]
+        num_xxx = len([w for w in words+words2 if self.isXXXTerm(w, s)])
+        if isFilename and self.isAudio(s):
+            return num_xxx > 2 # almost never classify mp3 as porn
+        else:
+            return num_xxx > 0
+    
+    def foundXXXTerm(self, s):
+        for term in self.xxx_searchterms:
+            if term in s:
+                if DEBUG:
+                    print 'XXXFilter: Found term "%s" in %s' % (term, s)
+                return True
+        return False
+    
+    def isXXXTerm(self, s, title=None):
+        # check if term-(e)s is in xxx-terms
+        s = s.lower()
+        if s in self.xxx_terms:
+            if DEBUG:
+                print 'XXXFilter: "%s" is dirty%s' % (s, title and ' in %s' % title or '')
+            return True
+        if s.endswith('es'):
+            if s[:-2] in self.xxx_terms:
+                if DEBUG:
+                    print 'XXXFilter: "%s" is dirty%s' % (s[:-2], title and ' in %s' % title or '')
+                return True
+        elif s.endswith('s') or s.endswith('n'):
+            if s[:-1] in self.xxx_terms:
+                if DEBUG:
+                    print 'XXXFilter: "%s" is dirty%s' % (s[:-1], title and ' in %s' % title or '')
+                return True
+        
+        return False
+
+    audio_extensions = ['cda', 'flac', 'm3u', 'mp2', 'mp3', 'md5', 'vorbis', 'wav', 'wma', 'ogg']
+    def isAudio(self, s):
+        return s[s.rfind('.')+1:] in self.audio_extensions
+    
+    
+        
+
+    
diff --git a/instrumentation/next-share/BaseLib/Category/TestCategory.py b/instrumentation/next-share/BaseLib/Category/TestCategory.py
new file mode 100644 (file)
index 0000000..c923e96
--- /dev/null
@@ -0,0 +1,148 @@
+# Written by Yuan Yuan
+# see LICENSE.txt for license information
+
+import sys, os
+execpath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '..', '..')
+sys.path.append(execpath)
+#print sys.path
+from Utility.utility import getMetainfo
+from BaseLib.Category.Category import Category
+
+DEBUG = False
+
+def testFilter(catfilename, torrentpath):    
+    readCategorisationFile(catfilename)
+    #print 'Install_dir is %s' % execpath
+    c = Category.getInstance(execpath, None)
+    total = porn = fn = fp = 0
+    for tfilename,isporn in tdict.items():
+        torrent = getMetainfo(os.path.join(torrentpath,tfilename))
+        name = torrent['info']['name']
+        cat = c.calculateCategory(torrent, name)
+        fporn = (cat == ['xxx'])
+        total+= 1
+        porn += int(isporn)
+        if isporn == fporn:
+            if DEBUG:
+                print (isporn, fporn), 'good', name
+            
+        elif isporn and not fporn:
+            fn+=1
+            print 'FALSE NEGATIVE'
+            showTorrent(os.path.join(torrentpath,tfilename))
+        elif not isporn and fporn:
+            fp +=1
+            print 'FALSE POSITIVE'
+            showTorrent(os.path.join(torrentpath,tfilename))
+            
+    print """
+    Total torrents:   %(total)d
+    XXX torrents:     %(porn)d
+    Correct filtered: %(good)d
+    False negatives:  %(fn)d
+    False positives:  %(fp)d
+    """ % {'total':total, 'porn':porn, 'fn':fn,'fp':fp,'good':total-fn-fp}
+
+def readCategorisationFile(filename):
+    global tdict
+    tdict = {}
+    try:
+        f = file(filename, 'r')
+        lines = f.read().splitlines()
+        for line in lines:
+            if line:
+                parts = line.split('\t')
+                tdict[parts[0]] = bool(int(parts[1]))
+        f.close()
+    except IOError:
+        print 'No file %s found, starting with empty file' % filename
+        
+def getTorrentData(path, max_num=-1):
+    torrents= []
+    i = 0
+    for fname in os.listdir(path):
+        if fname.endswith('.torrent'):
+            torrents.append(os.path.join(path,fname))
+        if i%1000 == 0 and i:
+            print 'Loaded: %d torrents' % i
+        if i == int(max_num):
+            break
+        i+=1   
+    print 'Loaded %d torrents' % len(torrents)
+    return torrents
+
+def showTorrent(path):
+    torrent = getMetainfo(os.path.join(path))
+    name = torrent['info']['name']
+    print '------------------------------'
+    print '\tfiles  :'
+    files_list = []
+    __size_change = 1024
+    try:                                
+        # the multi-files mode
+        for ifiles in torrent['info']["files"]:
+            files_list.append((ifiles['path'][-1], ifiles['length'] / float(__size_change)))
+    except KeyError:                    
+        # single mode
+        files_list.append((torrent['info']["name"],torrent['info']['length'] / float(__size_change)))
+    for fname, fsize in files_list:
+        print'\t\t%s\t%d kb' % (fname, fsize)
+    print 'Torrent name: %s' % name
+    print '\ttracker:%s' % torrent['announce']
+    print '------------------------------'
+    
+def createTorrentDataSet(filename, torrentpath):
+    initSaveFile(filename)
+    f_out = file(filename, 'a')
+    torrents = getTorrentData(torrentpath)
+    for torrent in torrents:
+        if os.path.split(torrent)[-1] in tset: # already done
+            continue
+        showTorrent(torrent)
+        ans = None
+        while ans not in ['q', 'y','n']:
+            print 'Is this torrent porn? (y/n/q)'
+            ans = sys.stdin.readline()[:-1].lower()
+        if ans == 'q':
+            break
+        else:
+            saveTorrent(f_out, torrent, (ans=='y'))
+    f_out.close()
+    
+def saveTorrent(f_out, torrent, boolean):
+    if torrent in tset:
+        return
+    tfilename = os.path.split(torrent)[-1]
+    assert tfilename
+    f_out.write('%s\t%d\n' % (tfilename, int(boolean)))
+    f_out.flush()
+    tset.add(torrent)
+    
+def initSaveFile(filename):
+    global tset
+    tset = set()
+    try:
+        f = file(filename, 'r')
+        lines = f.read().splitlines()
+        for line in lines:
+            tset.add(line.split('\t')[0])
+        f.close()
+    except IOError:
+        print 'No file %s found, starting with empty file' % filename
+    
+    
+
+def main(args):
+    if len(args) != 4 or args[1] not in ['categorise', 'test']:
+        print 'Usage 1: %s categorise [torrent-dir] [torrent-data-file]' % args[0]
+        print 'Usage 2: %s test [torrent-dir] [torrent-data-file]' % args[0]
+        sys.exit(1)
+    if args[1] == 'categorise':
+        createTorrentDataSet(args[3], args[2])
+    elif args[1] == 'test':
+        testFilter(args[3], args[2])
+    print 'ready'
+    
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/instrumentation/next-share/BaseLib/Category/__init__.py b/instrumentation/next-share/BaseLib/Category/__init__.py
new file mode 100644 (file)
index 0000000..316dcff
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Yuan Yuan
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Category/category.conf b/instrumentation/next-share/BaseLib/Category/category.conf
new file mode 100644 (file)
index 0000000..4f9000f
--- /dev/null
@@ -0,0 +1,62 @@
+[xxx]
+rank = 10
+displayname = XXX
+matchpercentage = 0.001
+strength = 1.1
+# Keywords are in seperate file: filter_content.filter
+
+
+[Video]
+rank = 1
+displayname = Video Files
+suffix = asf, asp, avi, flc, fli, flic, mkv, mov, movie, mpeg, mpg, qicktime, ram, rm, rmvb, rpm, vob, wma, wmv
+minfilesize = 50
+maxfilesize = 10000000
+matchpercentage = 0.5
+
+*divx = 1
+*xvid = 1
+*rmvb = 1
+
+[VideoClips]
+rank = 2
+displayname = Video Clips
+suffix = asf, asp, avi, flc, fli, flic, mkv, mov, movie, mpeg, mpg, qicktime, ram, rm, rmvb, rpm, vob, wma, wmv, mp4, flv
+minfilesize = 0
+maxfilesize = 50
+matchpercentage = 0.5
+
+[Audio]
+rank = 3
+displayname = Audio
+suffix = cda, flac, m3u, mp2, mp3, vorbis, wav, wma, ogg, ape
+matchpercentage = 0.8
+
+[Document]
+rank = 5
+displayname = Documents
+suffix = doc, pdf, ppt, ps, tex, txt, vsd
+matchpercentage = 0.8
+
+[Compressed]
+rank = 4
+displayname = Compressed
+suffix = ace, bin, bwt, cab, ccd, cdi, cue, gzip, iso, jar, mdf, mds, nrg, rar, tar, vcd, z, zip
+matchpercentage = 0.8
+
+*.r0 = 1
+*.r1 = 1
+*.r2 = 1
+*.r3 = 1
+*.r4 = 1
+*.r5 = 1
+*.r6 = 1
+*.r7 = 1
+*.r8 = 1
+*.r9 = 1
+
+[Picture]
+rank = 6
+displayname = Pictures
+suffix = bmp, dib, dwg, gif, ico, jpeg, jpg, pic, png, swf, tif, tiff
+matchpercentage = 0.8
diff --git a/instrumentation/next-share/BaseLib/Category/init_category.py b/instrumentation/next-share/BaseLib/Category/init_category.py
new file mode 100644 (file)
index 0000000..1d8edfb
--- /dev/null
@@ -0,0 +1,57 @@
+# Written by Yuan Yuan
+# see LICENSE.txt for license information
+
+# give the initial category information
+
+import ConfigParser
+
+def splitList(string):
+    l = []
+    for word in string.split(","):
+        word = word.strip()
+        l.append(word)
+    return l
+
+init_fun = {}
+init_fun["minfilenumber"] = int
+init_fun["maxfilenumber"] = int
+init_fun["minfilesize"] = int
+init_fun["maxfilesize"] = int
+init_fun["suffix"] = splitList
+init_fun["matchpercentage"] = float
+init_fun["keywords"] = float
+init_fun["strength"] = float
+init_fun["displayname"] = str
+init_fun["rank"] = int
+
+def getDefault():
+    category = {}
+    category["name"] = ""
+    category["keywords"] ={}
+    category["suffix"] = []
+    category["minfilesize"] = 0
+    category["maxfilesize"] = -1
+    return category
+
+def getCategoryInfo(filename):
+    config = ConfigParser.ConfigParser()
+    config.readfp(open(filename))
+
+    cate_list = []
+    sections = config.sections()
+
+    for isection in sections:
+        category = getDefault()
+        category["name"] = isection
+        for (name, value) in config.items(isection):                
+            if name[0] != "*":
+                category[name] = init_fun[name](value)
+            else:
+                name = name[1:]
+                name = name.strip()
+                category["keywords"][name] = init_fun["keywords"](value) 
+        cate_list.append(category)   
+
+#    print cate_list
+    return cate_list
+
diff --git a/instrumentation/next-share/BaseLib/Core/API.py b/instrumentation/next-share/BaseLib/Core/API.py
new file mode 100644 (file)
index 0000000..63368aa
--- /dev/null
@@ -0,0 +1,160 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# To use the Tribler Core just do:
+# from BaseLib.Core.API import *
+#
+""" Tribler Core API v1.0.7, Aug 2010. Import this to use the API """
+
+# History:
+#
+# 1.0.7      Released with Next-Share M32
+#
+# 1.0.7rc2   Added: get_peer_id() to Download, returning the BT peer ID when
+#            the download is not stopped.
+#
+# 1.0.7rc1   Added: set_proxy_mode/get_proxy_mode to DownloadConfig and 
+#            DownloadRuntimeConfig, set_proxyservice_status/
+#            get_proxyservice_status to SessionConfig and SessionRuntimeConfig
+#
+# 1.0.6      Released with Next-Share M30
+#
+# 1.0.6rc4   Added: TorrentDef.set_initial_peers() for magnet: links support.
+#
+#            Added: Session.get_subtitles_support_facade() to get access to
+#            the subtitle-gossiping subsystem.
+#
+# 1.0.6rc3   Added: TorrentDef.set_live_ogg_headers() for live streams
+#            in Ogg container format.
+#
+# 1.0.5      Released with Next-Share M24.2
+#
+# 1.0.5rc4   Added: TorrentDef.set_metadata() for including Next-Share Core 
+#            metadata in .tstream files.
+#
+# 1.0.5rc3   Added: restartstatefilename to DownloadConfig.set_video_source()
+#
+# 1.0.5rc2   Added: TorrentDef.set_live_ogg_headers() for live streams
+#            in Ogg container format.
+#
+# 1.0.5rc1   Session.query_connected_peers() returns all names as Unicode 
+#            strings.
+#
+# 1.0.4      Released with Next-Share M24
+#
+# 1.0.4rc7   Added: DLMODE_SVC for Scalable Video Coding support following
+#            P2P-Next WP6's design. 
+#
+# 1.0.4rc6   Added: SIMPLE+METADATA query. 
+#
+# 1.0.4rc5   Added: DLSTATUS_REPEXING.
+#
+#            Added: initialdlstatus parameter to Session.start_download() to 
+#            restart a Download in a particular state, in particular, 
+#            DLSTATUS_REPEXING.
+#
+#            Added: initialdlstatus parameter to Download.restart() to 
+#            restart a Download in a particular state.
+#
+#            Added: get_swarmcache() to DownloadState.
+#
+# 1.0.4rc4   Added: get_total_transferred() to the DownloadState to
+#            retrieve the total amount of bytes that are up or
+#            downloaded for a single Download.
+#
+#            Removed: get_peerid() and get_videoinfo() from DownloadState,
+#            the former is not a state parameter and the latter exposed internal
+#            state.
+#
+# 1.0.4rc3   Added "CHANNEL" queries to query_connected_peers() to Session 
+#            class for making queries for the new channel concept.
+#
+#            Removed: ModerationCast configuration parameters from SessionConfig.
+#            Added: ChannelCast configuration parameters to SessionConfig.
+#            Added: VoteCast configuration parameters to SessionConfig.
+#
+# 1.0.4rc2   TorrentDef now supports P2P URLs.
+#
+# 1.0.4rc1   Added: torrent_size (size of the .torrent file) to the remote
+#            torrent search response, see Session.query_connected_peers().
+#
+# Timeline disruption: API v1.0.3 was released with Next-Share M16 on April 30.
+# 1.0.2rc6 was released with Tribler 5.1.0. Reintroduced as 1.0.4rc1
+#
+# 1.0.3      Released with Next-Share M16  
+#
+# 1.0.3rc1   Added: [s/g]et_multicast_local_peer_discovery() to Session API.
+#            Added: [s/g]et_moderation_cast_promote_own() to aggressively
+#            promote your own moderations (to be run by a moderator)
+#            Removed: [s/g]et_rss_*(). These were not Core/Session parameters.
+#            Removed: [s/g]et_moderationcast_upload/download_bandwidth_limit(), 
+#            no longer used.
+#
+# 1.0.2      Released with Tribler 5.0.0 Preview1
+#
+# 1.0.2rc5   Added: [s/g]et_moderationcast_*() to configure ModerationCast.
+#
+# 1.0.2rc4   Added: Session.get_externally_reachable() which tells whether the
+#            listen port is reachable from the Internet.
+#
+# 1.0.2rc3   Added: Session.has_shutdown() which tells whether it is already
+#            safe to quit the process the Session was running in.
+#
+# 1.0.2rc2   Removed: [s/g]et_puncturing_coordinators in SessionConfig.
+#            Bugfix: [s/g]et_puncturing_private_port in SessionConfig renamed to
+#            [s/g]et_puncturing_internal_port.
+#
+# 1.0.2rc1   Added: set_same_nat_try_internal(). If set Tribler will
+#            check if other Tribler peers it meets in a swarm are behind the 
+#            same NAT and if so, replace the connection with an connection over 
+#            the internal network. Also added set_unchoke_bias_for_internal()
+#
+# 1.0.1      Released with Tribler 4.5.0
+#
+# 1.0.1rc4   Added: friendship extension to Session API.
+#            Added: 'gracetime' parameter to Session shutdown.
+#
+# 1.0.1rc3   Bugfix: [s/g]et_internaltracker in SessionRuntimeConfig renamed to
+#            [s/g]et_internal_tracker.
+#
+#            Added/bugfix: [s/g]et_mainline_dht in SessionConfigInterface to
+#            control whether mainline DHT support is activated.
+#
+# 1.0.1rc2   Added: set_seeding_policy() to Download class to dynamically set
+#            different seeding policies.
+#
+#            Added: Methods to SessionConfigInterface for Network Address
+#            Translator detection, see also Session.get_nat_type()
+# 
+# 1.0.1rc1   Bugfix: The query passed to the callback function for 
+#            query_connected_peers() is now the original query, rather than
+#            the query with "SIMPLE " stripped off.
+#
+# 1.0.0      Released with SwarmPlayer 1.0
+#
+# 1.0.0rc5   Added option to define auxiliary seeding servers for live stream
+#            (=these servers are always unchoked at the source server).
+#
+# 1.0.0rc4   Changed DownloadConfig.set_vod_start_callback() to a generic 
+#            event-driven interface.
+
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.Base import *
+from BaseLib.Core.Session import *
+from BaseLib.Core.SessionConfig import *
+from BaseLib.Core.Download import *
+from BaseLib.Core.DownloadConfig import *
+from BaseLib.Core.DownloadState import *
+from BaseLib.Core.exceptions import *
+try:
+    from BaseLib.Core.RequestPolicy import *
+except ImportError:
+    pass
+from BaseLib.Core.TorrentDef import *
+try:
+    import M2Crypto
+    from BaseLib.Core.LiveSourceAuthConfig import *
+except ImportError:
+    pass
+
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/DownloadImpl.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/DownloadImpl.py
new file mode 100644 (file)
index 0000000..baa94a7
--- /dev/null
@@ -0,0 +1,649 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
+import sys
+import os
+import copy
+from traceback import print_exc,print_stack
+from threading import RLock,Condition,Event,Thread,currentThread
+
+from BaseLib.Core.DownloadState import DownloadState
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.osutils import *
+from BaseLib.Core.APIImplementation.SingleDownload import SingleDownload
+import BaseLib.Core.APIImplementation.maketorrent as maketorrent
+
+DEBUG = False
+
+class DownloadImpl:
+    
+    def __init__(self,session,tdef):
+        self.dllock = RLock()
+        # just enough so error saving and get_state() works
+        self.error = None
+        self.sd = None # hack
+        # To be able to return the progress of a stopped torrent, how far it got.
+        self.progressbeforestop = 0.0
+        self.filepieceranges = []
+        self.pstate_for_restart = None # h4x0r to remember resumedata
+
+        # Copy tdef, so we get an infohash
+        self.session = session
+        self.tdef = tdef.copy()
+        self.tdef.readonly = True
+        
+    #
+    # Creating a Download
+    #
+    def setup(self,dcfg=None,pstate=None,initialdlstatus=None,lmcreatedcallback=None,lmvodeventcallback=None):
+        """
+        Create a Download object. Used internally by Session.
+        @param dcfg DownloadStartupConfig or None (in which case 
+        a new DownloadConfig() is created and the result 
+        becomes the runtime config of this Download.
+        """
+        # Called by any thread
+        try:
+            self.dllock.acquire() # not really needed, no other threads know of this object
+
+            torrentdef = self.get_def()
+            metainfo = torrentdef.get_metainfo()
+            # H4xor this so the 'name' field is safe
+            self.correctedinfoname = fix_filebasename(torrentdef.get_name_as_unicode())
+
+            if DEBUG:
+                print >>sys.stderr,"Download: setup: piece size",metainfo['info']['piece length']
+            
+            # See if internal tracker used
+            itrackerurl = self.session.get_internal_tracker_url()
+            #infohash = self.tdef.get_infohash()
+            metainfo = self.tdef.get_metainfo()
+            usingitracker = False
+            
+            if DEBUG:
+                print >>sys.stderr,"Download: setup: internal tracker?",metainfo['announce'],itrackerurl,"#"
+
+            if itrackerurl.endswith('/'):
+                slashless = itrackerurl[:-1]
+            else:
+                slashless = itrackerurl
+            if metainfo['announce'] == itrackerurl or metainfo['announce'] == slashless:
+                usingitracker = True
+            elif 'announce-list' in metainfo:
+                for tier in metainfo['announce-list']:
+                    if itrackerurl in tier or slashless in tier:
+                         usingitracker = True
+                         break
+                     
+            if usingitracker:
+                if DEBUG:
+                    print >>sys.stderr,"Download: setup: Using internal tracker"
+                # Copy .torrent to state_dir/itracker so the tracker thread 
+                # finds it and accepts peer registrations for it.
+                #
+                self.session.add_to_internal_tracker(self.tdef) 
+            elif DEBUG:
+                print >>sys.stderr,"Download: setup: Not using internal tracker"
+            
+            # Copy dlconfig, from default if not specified
+            if dcfg is None:
+                cdcfg = DownloadStartupConfig()
+            else:
+                cdcfg = dcfg
+            self.dlconfig = copy.copy(cdcfg.dlconfig)
+            
+
+            # Copy sessconfig into dlconfig, such that BitTornado.BT1.Connecter, etc.
+            # knows whether overlay is on, etc.
+            #
+            for (k,v) in self.session.get_current_startup_config_copy().sessconfig.iteritems():
+                self.dlconfig.setdefault(k,v)
+            self.set_filepieceranges(metainfo)
+    
+            # Things that only exist at runtime
+            self.dlruntimeconfig= {}
+            self.dlruntimeconfig['max_desired_upload_rate'] = 0
+            self.dlruntimeconfig['max_desired_download_rate'] = 0
+    
+            if DEBUG:
+                print >>sys.stderr,"DownloadImpl: setup: initialdlstatus",`self.tdef.get_name_as_unicode()`,initialdlstatus
+
+            # Closed swarms config
+            self.dlconfig['cs_keys'] = self.tdef.get_cs_keys_as_ders()
+            self.dlconfig['permid'] = self.session.get_permid()
+            if self.dlconfig['cs_keys']:
+                print >> sys.stderr,"DownloadImpl: setup: This is a closed swarm"
+                #if dcfg.get_poa():
+                #    self.dlconfig['poa'] = dcfg.get_poa()
+                #else:
+                #    print >> sys.stderr,"POA not available - seeding?"
+
+            # Set progress
+            if pstate is not None and pstate.has_key('dlstate'):
+                self.progressbeforestop = pstate['dlstate'].get('progress', 0.0)
+            
+            # Note: initialdlstatus now only works for STOPPED
+            if initialdlstatus != DLSTATUS_STOPPED:
+                if pstate is None or pstate['dlstate']['status'] != DLSTATUS_STOPPED: 
+                    # Also restart on STOPPED_ON_ERROR, may have been transient
+                    self.create_engine_wrapper(lmcreatedcallback,pstate,lmvodeventcallback,initialdlstatus) # RePEX: propagate initialdlstatus
+                
+            self.pstate_for_restart = pstate
+                
+            self.dllock.release()
+        except Exception,e:
+            print_exc()
+            self.set_error(e)
+            self.dllock.release()
+
+    def create_engine_wrapper(self,lmcreatedcallback,pstate,lmvodeventcallback,initialdlstatus=None):
+        """ Called by any thread, assume dllock already acquired """
+        if DEBUG:
+            print >>sys.stderr,"Download: create_engine_wrapper()"
+        
+        # all thread safe
+        infohash = self.get_def().get_infohash()
+        metainfo = copy.deepcopy(self.get_def().get_metainfo())
+        
+        # H4xor this so the 'name' field is safe
+        metainfo['info']['name'] = self.correctedinfoname 
+        if 'name.utf-8' in metainfo['info']:
+            metainfo['info']['name.utf-8'] = self.correctedinfoname
+        
+        multihandler = self.session.lm.multihandler
+        listenport = self.session.get_listen_port()
+        vapath = self.session.get_video_analyser_path()
+
+        # Note: BT1Download is started with copy of d.dlconfig, not direct access
+        kvconfig = copy.copy(self.dlconfig)
+        
+        # RePEX: extend kvconfig with initialdlstatus
+        kvconfig['initialdlstatus'] = initialdlstatus
+        
+        # Define which file to DL in VOD mode
+        live = self.get_def().get_live()
+        vodfileindex = {
+            'index':-1,
+            'inpath':None,
+            'bitrate':0.0,
+            'live':live,
+            'usercallback':None,
+            'userevents': [],
+            'outpath':None}
+
+        # --- streaming settings
+        if self.dlconfig['mode'] == DLMODE_VOD or self.dlconfig['video_source']:
+            # video file present which is played or produced
+            multi = False
+            if 'files' in metainfo['info']:
+                multi = True
+            
+            # Determine bitrate
+            if multi and len(self.dlconfig['selected_files']) == 0:
+                # Multi-file torrent, but no file selected
+                raise VODNoFileSelectedInMultifileTorrentException() 
+            
+            if not multi:
+                # single-file torrent
+                file = self.get_def().get_name()
+                idx = -1
+                bitrate = self.get_def().get_bitrate()
+            else:
+                # multi-file torrent
+                file = self.dlconfig['selected_files'][0]
+                idx = self.get_def().get_index_of_file_in_files(file)
+                bitrate = self.get_def().get_bitrate(file)
+
+            # Determine MIME type
+            mimetype = self.get_mimetype(file)
+            # Arno: don't encode mimetype in lambda, allow for dynamic 
+            # determination by videoanalyser
+            vod_usercallback_wrapper = lambda event,params:self.session.uch.perform_vod_usercallback(self,self.dlconfig['vod_usercallback'],event,params)
+
+            vodfileindex['index'] = idx
+            vodfileindex['inpath'] = file
+            vodfileindex['bitrate'] = bitrate
+            vodfileindex['mimetype'] = mimetype
+            vodfileindex['usercallback'] = vod_usercallback_wrapper
+            vodfileindex['userevents'] = self.dlconfig['vod_userevents'][:]
+        elif live:
+            # live torrents must be streamed or produced, but not just downloaded
+            raise LiveTorrentRequiresUsercallbackException()
+        # Ric: added svc case TODO
+        elif self.dlconfig['mode'] == DLMODE_SVC:
+            # video file present which is played or produced
+            multi = False
+            if 'files' in metainfo['info']:
+                multi = True
+            
+            # Determine bitrate
+            if multi and len(self.dlconfig['selected_files']) == 0:
+                # Multi-file torrent, but no file selected
+                raise VODNoFileSelectedInMultifileTorrentException() 
+            
+            # multi-file torrent
+            # Ric: the selected files are already ordered
+            files = self.dlconfig['selected_files']
+            
+            idx = []
+            for file in files:
+                idx.append( self.get_def().get_index_of_file_in_files(file) )
+                
+            bitrate = self.get_def().get_bitrate(files[0]) 
+            
+            # Determine MIME type
+            mimetype = self.get_mimetype(file)
+            # Arno: don't encode mimetype in lambda, allow for dynamic 
+            # determination by videoanalyser
+            vod_usercallback_wrapper = lambda event,params:self.session.uch.perform_vod_usercallback(self,self.dlconfig['vod_usercallback'],event,params)
+
+            vodfileindex['index'] = idx
+            vodfileindex['inpath'] = files
+            vodfileindex['bitrate'] = bitrate
+            vodfileindex['mimetype'] = mimetype
+            vodfileindex['usercallback'] = vod_usercallback_wrapper
+            vodfileindex['userevents'] = self.dlconfig['vod_userevents'][:]
+            
+        else:
+            vodfileindex['mimetype'] = 'application/octet-stream'
+            
+        if DEBUG:
+            print >>sys.stderr,"Download: create_engine_wrapper: vodfileindex",`vodfileindex` 
+
+        # Delegate creation of engine wrapper to network thread
+        network_create_engine_wrapper_lambda = lambda:self.network_create_engine_wrapper(infohash,metainfo,kvconfig,multihandler,listenport,vapath,vodfileindex,lmcreatedcallback,pstate,lmvodeventcallback)
+        self.session.lm.rawserver.add_task(network_create_engine_wrapper_lambda,0) 
+        
+
+    def network_create_engine_wrapper(self,infohash,metainfo,kvconfig,multihandler,listenport,vapath,vodfileindex,lmcallback,pstate,lmvodeventcallback):
+        """ Called by network thread """
+        self.dllock.acquire()
+        try:
+            self.sd = SingleDownload(infohash,metainfo,kvconfig,multihandler,self.session.lm.get_ext_ip,listenport,vapath,vodfileindex,self.set_error,pstate,lmvodeventcallback,self.session.lm.hashcheck_done)
+            sd = self.sd
+            exc = self.error
+            if lmcallback is not None:
+                lmcallback(self,sd,exc,pstate)
+        finally:
+            self.dllock.release()
+
+    #
+    # Public method
+    #
+    def get_def(self):
+        # No lock because attrib immutable and return value protected
+        return self.tdef
+
+    #
+    # Retrieving DownloadState
+    #
+    def set_state_callback(self,usercallback,getpeerlist=False):
+        """ Called by any thread """
+        self.dllock.acquire()
+        try:
+            network_get_state_lambda = lambda:self.network_get_state(usercallback,getpeerlist)
+            # First time on general rawserver
+            self.session.lm.rawserver.add_task(network_get_state_lambda,0.0)
+        finally:
+            self.dllock.release()
+
+
+    def network_get_state(self,usercallback,getpeerlist,sessioncalling=False):
+        """ Called by network thread """
+        self.dllock.acquire()
+        try:
+            # RePEX: get last stored SwarmCache, if any:
+            swarmcache = None
+            if self.pstate_for_restart is not None and self.pstate_for_restart.has_key('dlstate'):
+                swarmcache = self.pstate_for_restart['dlstate'].get('swarmcache',None)
+            
+            if self.sd is None:
+                if DEBUG:
+                    print >>sys.stderr,"DownloadImpl: network_get_state: Download not running"
+                ds = DownloadState(self,DLSTATUS_STOPPED,self.error,self.progressbeforestop,swarmcache=swarmcache)
+            else:
+                # RePEX: try getting the swarmcache from SingleDownload or use our last known swarmcache:
+                swarmcache = self.sd.get_swarmcache() or swarmcache
+                
+                (status,stats,logmsgs,coopdl_helpers,coopdl_coordinator) = self.sd.get_stats(getpeerlist)
+                ds = DownloadState(self,status,self.error,0.0,stats=stats,filepieceranges=self.filepieceranges,logmsgs=logmsgs,coopdl_helpers=coopdl_helpers,coopdl_coordinator=coopdl_coordinator,swarmcache=swarmcache)
+                self.progressbeforestop = ds.get_progress()
+            
+            if sessioncalling:
+                return ds
+
+            # Invoke the usercallback function via a new thread.
+            # After the callback is invoked, the return values will be passed to
+            # the returncallback for post-callback processing.
+            self.session.uch.perform_getstate_usercallback(usercallback,ds,self.sesscb_get_state_returncallback)
+        finally:
+            self.dllock.release()
+
+
+    def sesscb_get_state_returncallback(self,usercallback,when,newgetpeerlist):
+        """ Called by SessionCallbackThread """
+        self.dllock.acquire()
+        try:
+            if when > 0.0:
+                # Schedule next invocation, either on general or DL specific
+                # TODO: ensure this continues when dl is stopped. Should be OK.
+                network_get_state_lambda = lambda:self.network_get_state(usercallback,newgetpeerlist)
+                if self.sd is None:
+                    self.session.lm.rawserver.add_task(network_get_state_lambda,when)
+                else:
+                    self.sd.dlrawserver.add_task(network_get_state_lambda,when)
+        finally:
+            self.dllock.release()
+
+    #
+    # Download stop/resume
+    #
+    def stop(self):
+        """ Called by any thread """
+        self.stop_remove(removestate=False,removecontent=False)
+
+    def stop_remove(self,removestate=False,removecontent=False):
+        """ Called by any thread """
+        if DEBUG:
+            print >>sys.stderr,"DownloadImpl: stop_remove:",`self.tdef.get_name_as_unicode()`,"state",removestate,"content",removecontent
+        self.dllock.acquire()
+        try:
+            network_stop_lambda = lambda:self.network_stop(removestate,removecontent)
+            self.session.lm.rawserver.add_task(network_stop_lambda,0.0)
+        finally:
+            self.dllock.release()
+
+    def network_stop(self,removestate,removecontent):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"DownloadImpl: network_stop",`self.tdef.get_name_as_unicode()`
+        self.dllock.acquire()
+        try:
+            infohash = self.tdef.get_infohash() 
+            pstate = self.network_get_persistent_state()
+            if self.sd is not None:
+                pstate['engineresumedata'] = self.sd.shutdown()
+                self.sd = None
+                self.pstate_for_restart = pstate
+            else:
+                # This method is also called at Session shutdown, where one may
+                # choose to checkpoint its Download. If the Download was 
+                # stopped before, pstate_for_restart contains its resumedata.
+                # and that should be written into the checkpoint.
+                #
+                if self.pstate_for_restart is not None:
+                    if DEBUG:
+                        print >>sys.stderr,"DownloadImpl: network_stop: Reusing previously saved engineresume data for checkpoint"
+                    # Don't copy full pstate_for_restart, as the torrent
+                    # may have gone from e.g. HASHCHECK at startup to STOPPED
+                    # now, at shutdown. In other words, it was never active
+                    # in this session and the pstate_for_restart still says 
+                    # HASHCHECK.
+                    pstate['engineresumedata'] = self.pstate_for_restart['engineresumedata'] 
+            
+            # Offload the removal of the content and other disk cleanup to another thread
+            if removestate:
+                contentdest = self.get_content_dest() 
+                self.session.uch.perform_removestate_callback(infohash,contentdest,removecontent)
+            
+            return (infohash,pstate)
+        finally:
+            self.dllock.release()
+
+        
+    def restart(self, initialdlstatus=None):
+        """ Restart the Download. Technically this action does not need to be
+        delegated to the network thread, but does so removes some concurrency
+        problems. By scheduling both stops and restarts via the network task 
+        queue we ensure that they are executed in the order they were called.
+        
+        Note that when a Download is downloading or seeding, calling restart 
+        is a no-op. If a Download is performing some other task, it is left 
+        up to the internal running SingleDownload to determine what a restart 
+        means. Often it means SingleDownload will abort its current task and 
+        switch to downloading/seeding.
+        
+        Called by any thread """
+        # RePEX: added initialdlstatus parameter
+        # RePEX: TODO: Should we mention the initialdlstatus behaviour in the docstring?
+        if DEBUG:
+            print >>sys.stderr,"DownloadImpl: restart:",`self.tdef.get_name_as_unicode()`
+        self.dllock.acquire()
+        try:
+            network_restart_lambda = lambda:self.network_restart(initialdlstatus)
+            self.session.lm.rawserver.add_task(network_restart_lambda,0.0)
+        finally:
+            self.dllock.release()
+
+    def network_restart(self,initialdlstatus=None):
+        """ Called by network thread """
+        # Must schedule the hash check via lm. In some cases we have batch stops
+        # and restarts, e.g. we have stop all-but-one & restart-all for VOD)
+        
+        # RePEX: added initialdlstatus parameter
+        if DEBUG:
+            print >>sys.stderr,"DownloadImpl: network_restart",`self.tdef.get_name_as_unicode()`
+        self.dllock.acquire()
+        try:
+            if self.sd is None:
+                self.error = None # assume fatal error is reproducible
+                # h4xor: restart using earlier loaded resumedata
+                # RePEX: propagate initialdlstatus
+                self.create_engine_wrapper(self.session.lm.network_engine_wrapper_created_callback,pstate=self.pstate_for_restart,lmvodeventcallback=self.session.lm.network_vod_event_callback,initialdlstatus=initialdlstatus)
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"DownloadImpl: network_restart: SingleDownload already running",`self`
+                # RePEX: leave decision what to do to SingleDownload
+                self.sd.restart(initialdlstatus)
+
+            # No exception if already started, for convenience
+        finally:
+            self.dllock.release()
+    
+    
+    #
+    # Config parameters that only exists at runtime 
+    #
+    def set_max_desired_speed(self,direct,speed):
+        if DEBUG:
+            print >>sys.stderr,"Download: set_max_desired_speed",direct,speed
+        #if speed < 10:
+        #    print_stack()
+        
+        self.dllock.acquire()
+        if direct == UPLOAD:
+            self.dlruntimeconfig['max_desired_upload_rate'] = speed
+        else:
+            self.dlruntimeconfig['max_desired_download_rate'] = speed
+        self.dllock.release()
+
+    def get_max_desired_speed(self,direct):
+        self.dllock.acquire()
+        try:
+            if direct == UPLOAD:
+                return self.dlruntimeconfig['max_desired_upload_rate']
+            else:
+                return self.dlruntimeconfig['max_desired_download_rate']
+        finally:
+            self.dllock.release()
+
+    def get_dest_files(self, exts=None):
+        """ We could get this from BT1Download.files (see BT1Download.saveAs()),
+        but that object is the domain of the network thread.
+        You can give a list of extensions to return. If None: return all dest_files
+        """
+
+        def get_ext(filename):
+            (prefix,ext) = os.path.splitext(filename)
+            if ext != '' and ext[0] == '.':
+                ext = ext[1:]
+            return ext
+        
+        self.dllock.acquire()
+        try:
+            f2dlist = []
+            metainfo = self.tdef.get_metainfo() 
+            if 'files' not in metainfo['info']:
+                # single-file torrent
+                diskfn = self.get_content_dest()
+                f2dtuple = (None, diskfn)
+                ext = get_ext(diskfn)
+                if exts is None or ext in exts:
+                    f2dlist.append(f2dtuple)
+            else:
+                # multi-file torrent
+                if len(self.dlconfig['selected_files']) > 0:
+                    fnlist = self.dlconfig['selected_files']
+                else:
+                    fnlist = self.tdef.get_files(exts=exts)
+                    
+                for filename in fnlist:
+                    filerec = maketorrent.get_torrentfilerec_from_metainfo(filename,metainfo)
+                    savepath = maketorrent.torrentfilerec2savefilename(filerec)
+                    diskfn = maketorrent.savefilenames2finaldest(self.get_content_dest(),savepath)
+                    ext = get_ext(diskfn)
+                    if exts is None or ext in exts:
+                        f2dtuple = (filename,diskfn)
+                        f2dlist.append(f2dtuple)
+                
+            return f2dlist
+        finally:
+            self.dllock.release()
+
+    
+
+
+
+    #
+    # Persistence
+    #
+    def network_checkpoint(self):
+        """ Called by network thread """
+        self.dllock.acquire()
+        try:
+            pstate = self.network_get_persistent_state() 
+            if self.sd is None:
+                resdata = None
+            else:
+                resdata = self.sd.checkpoint()
+            pstate['engineresumedata'] = resdata
+            return (self.tdef.get_infohash(),pstate)
+        finally:
+            self.dllock.release()
+        
+
+    def network_get_persistent_state(self):
+        """ Assume dllock already held """
+        pstate = {}
+        pstate['version'] = PERSISTENTSTATE_CURRENTVERSION
+        pstate['metainfo'] = self.tdef.get_metainfo() # assumed immutable
+        dlconfig = copy.copy(self.dlconfig)
+        # Reset unpicklable params
+        dlconfig['vod_usercallback'] = None
+        dlconfig['mode'] = DLMODE_NORMAL # no callback, no VOD
+        pstate['dlconfig'] = dlconfig
+
+        pstate['dlstate'] = {}
+        #ds = self.network_get_state(None,False,sessioncalling=True)
+        ds = self.network_get_state(None,True,sessioncalling=True) # RePEX: get peerlist in case of running Download
+        pstate['dlstate']['status'] = ds.get_status()
+        pstate['dlstate']['progress'] = ds.get_progress()
+        pstate['dlstate']['swarmcache'] = ds.get_swarmcache() # RePEX: store SwarmCache
+        
+        if DEBUG:
+            print >>sys.stderr,"Download: netw_get_pers_state: status",dlstatus_strings[ds.get_status()],"progress",ds.get_progress()
+        
+        pstate['engineresumedata'] = None
+        return pstate
+
+    #
+    # Coop download
+    #
+    def get_coopdl_role_object(self,role):
+        """ Called by network thread """
+        role_object = None
+        self.dllock.acquire()
+        try:
+            if self.sd is not None:
+                role_object = self.sd.get_coopdl_role_object(role)
+        finally:
+            self.dllock.release()
+        return role_object
+
+
+
+    #
+    # Internal methods
+    #
+    def set_error(self,e):
+        self.dllock.acquire()
+        self.error = e
+        self.dllock.release()
+
+
+    def set_filepieceranges(self,metainfo):
+        """ Determine which file maps to which piece ranges for progress info """
+        
+        if DEBUG:
+            print >>sys.stderr,"Download: set_filepieceranges:",self.dlconfig['selected_files']
+        (length,self.filepieceranges) = maketorrent.get_length_filepieceranges_from_metainfo(metainfo,self.dlconfig['selected_files'])
+
+    def get_content_dest(self):
+        """ Returns the file (single-file torrent) or dir (multi-file torrent)
+        to which the downloaded content is saved. """
+        return os.path.join(self.dlconfig['saveas'],self.correctedinfoname)
+    
+    # ARNOCOMMENT: better if we removed this from Core, user knows which
+    # file he selected to play, let him figure out MIME type
+    def get_mimetype(self,file):
+        (prefix,ext) = os.path.splitext(file)
+        ext = ext.lower()
+        mimetype = None
+        if sys.platform == 'win32':
+            # TODO: Use Python's mailcap facility on Linux to find player
+            try:
+                from BaseLib.Video.utils import win32_retrieve_video_play_command
+
+                [mimetype,playcmd] = win32_retrieve_video_play_command(ext,file)
+                if DEBUG:
+                    print >>sys.stderr,"DownloadImpl: Win32 reg said MIME type is",mimetype
+            except:
+                if DEBUG:
+                    print_exc()
+                pass
+        else:
+            try:
+                import mimetypes
+                # homedir = os.path.expandvars('${HOME}')
+                homedir = get_home_dir()
+                homemapfile = os.path.join(homedir,'.mimetypes')
+                mapfiles = [homemapfile] + mimetypes.knownfiles
+                mimetypes.init(mapfiles)
+                (mimetype,encoding) = mimetypes.guess_type(file)
+                
+                if DEBUG:
+                    print >>sys.stderr,"DownloadImpl: /etc/mimetypes+ said MIME type is",mimetype,file
+            except:
+                print_exc()
+
+        # if auto detect fails
+        if mimetype is None:
+            if ext == '.avi':
+                # Arno, 2010-01-08: Hmmm... video/avi is not official registered at IANA
+                mimetype = 'video/avi'
+            elif ext == '.mpegts' or ext == '.ts':
+                mimetype = 'video/mp2t'
+            elif ext == '.mkv':
+                mimetype = 'video/x-matroska'
+            elif ext in ('.ogg', '.ogv'):
+                mimetype = 'video/ogg'
+            elif ext in ('.oga'):
+                mimetype = 'audio/ogg'
+            elif ext == '.webm':
+                mimetype = 'video/webm'
+            else:
+                mimetype = 'video/mpeg'
+        return mimetype
+    
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/DownloadRuntimeConfig.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/DownloadRuntimeConfig.py
new file mode 100644 (file)
index 0000000..5472bd1
--- /dev/null
@@ -0,0 +1,586 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.DownloadConfig import DownloadConfigInterface
+from BaseLib.Core.exceptions import OperationNotPossibleAtRuntimeException
+
+DEBUG = False
+
+# 10/02/10 Boudewijn: pylint points out that member variables used in
+# DownloadRuntimeConfig do not exist.  This is because they are set in
+# BaseLib.Core.Download which is a subclass of DownloadRuntimeConfig.
+#
+# We disable this error
+# pylint: disable-msg=E1101
+
+class DownloadRuntimeConfig(DownloadConfigInterface):
+    """
+    Implements the BaseLib.Core.DownloadConfig.DownloadConfigInterface
+    
+    Use these to change the download config at runtime.
+    
+    DownloadConfigInterface: All methods called by any thread
+    """
+    def set_max_speed(self,direct,speed):
+        if DEBUG:
+            print >>sys.stderr,"Download: set_max_speed",`self.get_def().get_metainfo()['info']['name']`,direct,speed
+        #print_stack()
+        
+        self.dllock.acquire()
+        try:
+            # Don't need to throw an exception when stopped, we then just save the new value and
+            # use it at (re)startup.
+            if self.sd is not None:
+                set_max_speed_lambda = lambda:self.sd is not None and self.sd.set_max_speed(direct,speed,None)
+                self.session.lm.rawserver.add_task(set_max_speed_lambda,0)
+                
+            # At the moment we can't catch any errors in the engine that this 
+            # causes, so just assume it always works.
+            DownloadConfigInterface.set_max_speed(self,direct,speed)
+        finally:
+            self.dllock.release()
+
+    def get_max_speed(self,direct):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_speed(self,direct)
+        finally:
+            self.dllock.release()
+
+    def set_dest_dir(self,path):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def set_video_event_callback(self,usercallback,dlmode=DLMODE_VOD):
+        """ Note: this currently works only when the download is stopped. """
+        self.dllock.acquire()
+        try:
+            DownloadConfigInterface.set_video_event_callback(self,usercallback,dlmode=dlmode)
+        finally:
+            self.dllock.release()
+
+    def set_video_events(self,events):
+        """ Note: this currently works only when the download is stopped. """
+        self.dllock.acquire()
+        try:
+            DownloadConfigInterface.set_video_events(self,events)
+        finally:
+            self.dllock.release()
+
+    def set_mode(self,mode):
+        """ Note: this currently works only when the download is stopped. """
+        self.dllock.acquire()
+        try:
+            DownloadConfigInterface.set_mode(self,mode)
+        finally:
+            self.dllock.release()
+
+    def get_mode(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_mode(self)
+        finally:
+            self.dllock.release()
+
+    def get_video_event_callback(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_video_event_callback(self)
+        finally:
+            self.dllock.release()
+
+    def get_video_events(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_video_events(self)
+        finally:
+            self.dllock.release()
+
+    def set_selected_files(self,files):
+        """ Note: this currently works only when the download is stopped. """
+        self.dllock.acquire()
+        try:
+            DownloadConfigInterface.set_selected_files(self,files)
+            self.set_filepieceranges(self.tdef.get_metainfo())
+        finally:
+            self.dllock.release()
+
+
+    def get_selected_files(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_selected_files(self)
+        finally:
+            self.dllock.release()
+
+    def set_max_conns_to_initiate(self,nconns):
+        self.dllock.acquire()
+        try:
+            if self.sd is not None:
+                set_max_conns2init_lambda = lambda:self.sd is not None and self.sd.set_max_conns_to_initiate(nconns,None)
+                self.session.lm.rawserver.add_task(set_max_conns2init_lambda,0.0)
+            DownloadConfigInterface.set_max_conns_to_initiate(self,nconns)
+        finally:
+            self.dllock.release()
+
+    def get_max_conns_to_initiate(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_conns_to_initiate(self)
+        finally:
+            self.dllock.release()
+
+    def set_max_conns(self,nconns):
+        self.dllock.acquire()
+        try:
+            if self.sd is not None:
+                set_max_conns_lambda = lambda:self.sd is not None and self.sd.set_max_conns(nconns,None)
+                self.session.lm.rawserver.add_task(set_max_conns_lambda,0.0)
+            DownloadConfigInterface.set_max_conns(self,nconns)
+        finally:
+            self.dllock.release()
+    
+    def get_max_conns(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_conns(self)
+        finally:
+            self.dllock.release()
+
+    #
+    # Advanced download parameters
+    #
+    def set_max_uploads(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_max_uploads(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_uploads(self)
+        finally:
+            self.dllock.release()
+
+    def set_keepalive_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_keepalive_interval(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_keepalive_interval(self)
+        finally:
+            self.dllock.release()
+
+    def set_download_slice_size(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_download_slice_size(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_download_slice_size(self)
+        finally:
+            self.dllock.release()
+
+    def set_upload_unit_size(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_upload_unit_size(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_upload_unit_size(self)
+        finally:
+            self.dllock.release()
+
+    def set_request_backlog(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_request_backlog(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_request_backlog(self)
+        finally:
+            self.dllock.release()
+
+    def set_max_message_length(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_max_message_length(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_message_length(self)
+        finally:
+            self.dllock.release()
+
+    def set_max_slice_length(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_max_slice_length(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_slice_length(self)
+        finally:
+            self.dllock.release()
+
+    def set_max_rate_period(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_max_rate_period(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_rate_period(self)
+        finally:
+            self.dllock.release()
+
+    def set_upload_rate_fudge(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_upload_rate_fudge(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_upload_rate_fudge(self)
+        finally:
+            self.dllock.release()
+
+    def set_tcp_ack_fudge(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tcp_ack_fudge(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_tcp_ack_fudge(self)
+        finally:
+            self.dllock.release()
+
+    def set_rerequest_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_rerequest_interval(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_rerequest_interval(self)
+        finally:
+            self.dllock.release()
+
+    def set_min_peers(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_min_peers(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_min_peers(self)
+        finally:
+            self.dllock.release()
+
+    def set_http_timeout(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_http_timeout(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_http_timeout(self)
+        finally:
+            self.dllock.release()
+
+    def set_check_hashes(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_check_hashes(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_check_hashes(self)
+        finally:
+            self.dllock.release()
+
+    def set_alloc_type(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_alloc_type(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_alloc_type(self)
+        finally:
+            self.dllock.release()
+
+    def set_alloc_rate(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_alloc_rate(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_alloc_rate(self)
+        finally:
+            self.dllock.release()
+
+    def set_buffer_reads(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_buffer_reads(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_buffer_reads(self)
+        finally:
+            self.dllock.release()
+
+    def set_write_buffer_size(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_write_buffer_size(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_write_buffer_size(self)
+        finally:
+            self.dllock.release()
+
+    def set_breakup_seed_bitfield(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_breakup_seed_bitfield(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_breakup_seed_bitfield(self)
+        finally:
+            self.dllock.release()
+
+    def set_snub_time(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_snub_time(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_snub_time(self)
+        finally:
+            self.dllock.release()
+
+    def set_rarest_first_cutoff(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_rarest_first_cutoff(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_rarest_first_cutoff(self)
+        finally:
+            self.dllock.release()
+
+    def set_rarest_first_priority_cutoff(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_rarest_first_priority_cutoff(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_rarest_first_priority_cutoff(self)
+        finally:
+            self.dllock.release()
+
+    def set_min_uploads(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_min_uploads(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_min_uploads(self)
+        finally:
+            self.dllock.release()
+
+    def set_max_files_open(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_max_files_open(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_max_files_open(self)
+        finally:
+            self.dllock.release()
+
+    def set_round_robin_period(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_round_robin_period(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_round_robin_period(self)
+        finally:
+            self.dllock.release()
+
+    def set_super_seeder(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_super_seeder(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_super_seeder(self)
+        finally:
+            self.dllock.release()
+
+    def set_security(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_security(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_security(self)
+        finally:
+            self.dllock.release()
+
+    def set_auto_kick(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_auto_kick(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_auto_kick(self)
+        finally:
+            self.dllock.release()
+
+    def set_double_check_writes(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_double_check_writes(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_double_check_writes(self)
+        finally:
+            self.dllock.release()
+
+    def set_triple_check_writes(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_triple_check_writes(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_triple_check_writes(self)
+        finally:
+            self.dllock.release()
+
+    def set_lock_files(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_lock_files(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_lock_files(self)
+        finally:
+            self.dllock.release()
+
+    def set_lock_while_reading(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_lock_while_reading(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_lock_while_reading(self)
+        finally:
+            self.dllock.release()
+
+    def set_auto_flush(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_auto_flush(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_auto_flush(self)
+        finally:
+            self.dllock.release()
+
+    def set_exclude_ips(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_exclude_ips(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_exclude_ips(self)
+        finally:
+            self.dllock.release()
+
+    def set_ut_pex_max_addrs_from_peer(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_ut_pex_max_addrs_from_peer(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_ut_pex_max_addrs_from_peer(self)
+        finally:
+            self.dllock.release()
+
+    def set_poa(self, poa):
+        self.dllock.acquire()
+        try:
+            DownloadConfigInterface.set_poa(self, poa)
+        finally:
+            self.dllock.release()
+            
+
+    def get_poa(self, poa):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_poa(self)
+        finally:
+            self.dllock.release()
+    def set_same_nat_try_internal(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_same_nat_try_internal(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_same_nat_try_internal(self)
+        finally:
+            self.dllock.release()
+
+
+    def set_unchoke_bias_for_internal(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+    
+    def get_unchoke_bias_for_internal(self):
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_unchoke_bias_for_internal(self)
+        finally:
+            self.dllock.release()
+    
+    #
+    # ProxyService_
+    #
+    def set_proxy_mode(self,value):
+        """ Set the proxymode for current download
+        .
+        @param value: the proxyservice mode: PROXY_MODE_OFF, PROXY_MODE_PRIVATE or PROXY_MODE_SPEED
+        """
+        self.dllock.acquire()
+        try:
+            DownloadConfigInterface.set_proxy_mode(self, value)
+        finally:
+            self.dllock.release()
+
+    def get_proxy_mode(self):
+        """ Returns the proxymode of the client.
+        @return: one of the possible three values: PROXY_MODE_OFF, PROXY_MODE_PRIVATE, PROXY_MODE_SPEED
+        """
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_proxy_mode(self)
+        finally:
+            self.dllock.release()
+    
+    def set_no_helpers(self,value):
+        """ Set the maximum number of helpers used for a download.
+        @param value: a positive integer number
+        """
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.set_no_helpers(self, value)
+        finally:
+            self.dllock.release()
+
+    def get_no_helpers(self):
+        """ Returns the maximum number of helpers used for a download. 
+        @return: a positive integer number
+        """
+        self.dllock.acquire()
+        try:
+            return DownloadConfigInterface.get_no_helpers(self)
+        finally:
+            self.dllock.release()
+    #
+    # _ProxyService
+    #
+       
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/LaunchManyCore.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/LaunchManyCore.py
new file mode 100644 (file)
index 0000000..928ff57
--- /dev/null
@@ -0,0 +1,907 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
+import sys
+import os
+import pickle
+import socket
+import binascii
+import time as timemod
+from threading import Event,Thread,enumerate
+from traceback import print_exc, print_stack
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.BitTornado.RawServer import RawServer
+from BaseLib.Core.BitTornado.ServerPortHandler import MultiHandler
+from BaseLib.Core.BitTornado.BT1.track import Tracker
+from BaseLib.Core.BitTornado.HTTPHandler import HTTPHandler,DummyHTTPHandler
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.Download import Download
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.NATFirewall.guessip import get_my_wan_ip
+from BaseLib.Core.NATFirewall.UPnPThread import UPnPThread
+from BaseLib.Core.NATFirewall.UDPPuncture import UDPHandler
+from BaseLib.Core.DecentralizedTracking import mainlineDHT
+from BaseLib.Core.osutils import get_readable_torrent_name
+from BaseLib.Core.DecentralizedTracking.MagnetLink.MagnetLink import MagnetHandler
+
+SPECIAL_VALUE=481
+
+DEBUG = False
+PROFILE = False
+
+# Internal classes
+#
+
+class TriblerLaunchMany(Thread):
+    
+    def __init__(self):
+        """ Called only once (unless we have multiple Sessions) by MainThread """
+        Thread.__init__(self)
+        self.setDaemon(True)
+        self.setName("Network"+self.getName())
+        
+    def register(self,session,sesslock):
+        self.session = session
+        self.sesslock = sesslock
+        
+        self.downloads = {} 
+        config = session.sessconfig # Should be safe at startup
+
+        self.locally_guessed_ext_ip = self.guess_ext_ip_from_local_info()
+        self.upnp_ext_ip = None
+        self.dialback_ext_ip = None
+        self.yourip_ext_ip = None
+        self.udppuncture_handler = None
+
+        # Orig
+        self.sessdoneflag = Event()
+        
+        # Following two attributes set/get by network thread ONLY
+        self.hashcheck_queue = []
+        self.sdownloadtohashcheck = None
+        
+        # Following 2 attributes set/get by UPnPThread
+        self.upnp_thread = None
+        self.upnp_type = config['upnp_nat_access']
+        self.nat_detect = config['nat_detect']
+
+        self.rawserver = RawServer(self.sessdoneflag,
+                                   config['timeout_check_interval'],
+                                   config['timeout'],
+                                   ipv6_enable = config['ipv6_enabled'],
+                                   failfunc = self.rawserver_fatalerrorfunc,
+                                   errorfunc = self.rawserver_nonfatalerrorfunc)
+        self.rawserver.add_task(self.rawserver_keepalive,1)
+
+        self.listen_port = self.rawserver.find_and_bind(0, 
+                    config['minport'], config['maxport'], config['bind'], 
+                    reuse = True,
+                    ipv6_socket_style = config['ipv6_binds_v4'], 
+                    randomizer = config['random_port'])
+        
+        if DEBUG:
+            print >>sys.stderr,"tlm: Got listen port", self.listen_port
+        
+        self.multihandler = MultiHandler(self.rawserver, self.sessdoneflag)
+        self.shutdownstarttime = None
+         
+        # do_cache -> do_overlay -> (do_buddycast, do_download_help)
+        if config['megacache']:
+            import BaseLib.Core.CacheDB.cachedb as cachedb
+            from BaseLib.Core.CacheDB.SqliteCacheDBHandler import MyDBHandler, PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler, PreferenceDBHandler, SuperPeerDBHandler, FriendDBHandler, BarterCastDBHandler, VoteCastDBHandler, SearchDBHandler,TermDBHandler, CrawlerDBHandler, ChannelCastDBHandler, SimilarityDBHandler, PopularityDBHandler      
+            from BaseLib.Core.CacheDB.SqliteSeedingStatsCacheDB import SeedingStatsDBHandler, SeedingStatsSettingsDBHandler
+            from BaseLib.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler
+            from BaseLib.Category.Category import Category
+            
+           # 13-04-2010, Andrea: rich metadata (subtitle) db
+            from BaseLib.Core.CacheDB.MetadataDBHandler import MetadataDBHandler
+            
+            # init cache db
+            if config['nickname'] == '__default_name__':
+                config['nickname']  = socket.gethostname()
+                
+            if DEBUG:
+                print >>sys.stderr,'tlm: Reading Session state from',config['state_dir']
+                
+            cachedb.init(config, self.rawserver_fatalerrorfunc)
+            
+            self.my_db          = MyDBHandler.getInstance()
+            self.peer_db        = PeerDBHandler.getInstance()
+            # Register observer to update connection opened/closed to peer_db_handler
+            self.peer_db.registerConnectionUpdater(self.session)
+            self.torrent_db     = TorrentDBHandler.getInstance()
+            torrent_collecting_dir = os.path.abspath(config['torrent_collecting_dir'])
+            self.torrent_db.register(Category.getInstance(),torrent_collecting_dir)
+            self.mypref_db      = MyPreferenceDBHandler.getInstance()
+            self.pref_db        = PreferenceDBHandler.getInstance()
+            self.superpeer_db   = SuperPeerDBHandler.getInstance()
+            self.superpeer_db.loadSuperPeers(config)
+            self.friend_db      = FriendDBHandler.getInstance()
+            self.bartercast_db  = BarterCastDBHandler.getInstance()
+            self.bartercast_db.registerSession(self.session)
+            self.votecast_db = VoteCastDBHandler.getInstance()
+            self.votecast_db.registerSession(self.session)
+            self.channelcast_db = ChannelCastDBHandler.getInstance()
+            self.channelcast_db.registerSession(self.session)
+            self.search_db      = SearchDBHandler.getInstance()
+            self.term_db        = TermDBHandler.getInstance()
+            self.simi_db        = SimilarityDBHandler.getInstance()
+            self.pops_db = PopularityDBHandler.getInstance()
+            
+            # 13-04-2010, Andrea: rich metadata (subtitle) db
+            self.richmetadataDbHandler = MetadataDBHandler.getInstance()
+
+            # Crawling 
+            if config['crawler']:
+                # ARNOCOMMENT, 2009-10-02: Should be moved out of core, used in Main client only.
+                # initialize SeedingStats database
+                cachedb.init_seeding_stats(config, self.rawserver_fatalerrorfunc)
+    
+                # initialize VideoPlayback statistics database
+                cachedb.init_videoplayback_stats(config, self.rawserver_fatalerrorfunc)
+                
+                self.crawler_db     = CrawlerDBHandler.getInstance()
+                self.crawler_db.loadCrawlers(config)
+                self.seedingstats_db = SeedingStatsDBHandler.getInstance()
+                self.seedingstatssettings_db = SeedingStatsSettingsDBHandler.getInstance()
+                
+                if config['socnet']:
+                    # initialize Friendship statistics database
+                    cachedb.init_friendship_stats(config, self.rawserver_fatalerrorfunc)
+
+                    self.friendship_statistics_db = FriendshipStatisticsDBHandler().getInstance()
+                else:
+                    self.friendship_statistics_db = None
+            else:
+                self.crawler_db = None 
+                self.seedingstats_db = None
+                self.friendship_statistics_db = None
+
+        else:
+            config['overlay'] = 0    # turn overlay off
+            config['torrent_checking'] = 0
+            self.my_db          = None
+            self.peer_db        = None
+            self.torrent_db     = None
+            self.mypref_db      = None
+            self.pref_db        = None
+            self.superpeer_db   = None
+            self.crawler_db     = None
+            self.seedingstats_db = None
+            self.seedingstatssettings_db = None
+            self.friendship_statistics_db = None
+            self.friend_db      = None
+            self.bartercast_db  = None
+            self.votecast_db = None
+            self.channelcast_db = None
+            self.mm = None
+            # 13-04-2010, Andrea: rich metadata (subtitle) db
+            self.richmetadataDbHandler = None
+
+        if config['overlay']:
+            from BaseLib.Core.Overlay.SecureOverlay import SecureOverlay
+            from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+            from BaseLib.Core.Overlay.OverlayApps import OverlayApps
+            from BaseLib.Core.RequestPolicy import FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy
+            
+            self.secure_overlay = SecureOverlay.getInstance()
+            self.secure_overlay.register(self, config['overlay_max_message_length'])
+            
+            # Set policy for which peer requests (dl_helper, rquery) to answer and which to ignore
+                        
+            self.overlay_apps = OverlayApps.getInstance()
+            # Default policy, override with Session.set_overlay_request_policy()
+            policy = FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(self.session)
+            
+            # For the new DB layer we need to run all overlay apps in a
+            # separate thread instead of the NetworkThread as before.
+            
+            self.overlay_bridge = OverlayThreadingBridge.getInstance()
+            
+            self.overlay_bridge.register_bridge(self.secure_overlay,self.overlay_apps)
+            
+            self.overlay_apps.register(self.overlay_bridge,self.session,self,config,policy)
+            # It's important we don't start listening to the network until
+            # all higher protocol-handling layers are properly configured.
+            self.overlay_bridge.start_listening()
+
+            if config['multicast_local_peer_discovery']:
+               self.setup_multicast_discovery()
+        
+        else:
+            self.secure_overlay = None
+            self.overlay_apps = None
+            config['buddycast'] = 0
+            config['download_help'] = 0
+            config['socnet'] = 0
+            config['rquery'] = 0
+
+            try:
+                # Minimal to allow yourip external-IP address detection
+                from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+                some_dialback_handler = DialbackMsgHandler.getInstance()
+                some_dialback_handler.register_yourip(self)
+            except:
+                if DEBUG:
+                    print_exc()
+                pass
+            
+
+        if config['megacache'] or config['overlay']:
+            # Arno: THINK! whoever added this should at least have made the
+            # config files configurable via SessionConfigInterface.
+            #
+            # TODO: see if we can move this out of the core. We could make the
+            # category a parameter to TorrentDB.addExternalTorrent(), but that
+            # will not work directly for MetadataHandler, which is part of the
+            # core. 
+             
+            # Some author: First Category instantiation requires install_dir, so do it now
+            from BaseLib.Category.Category import Category
+
+            Category.getInstance(config['install_dir'])
+
+        # Internal tracker
+        self.internaltracker = None
+        if config['internaltracker']:
+            self.internaltracker = Tracker(config, self.rawserver)
+            self.httphandler = HTTPHandler(self.internaltracker.get, config['tracker_min_time_between_log_flushes'])
+        else:
+            self.httphandler = DummyHTTPHandler()
+        self.multihandler.set_httphandler(self.httphandler)
+
+
+        if config['mainline_dht']:
+            #import logging
+            # Arno,The equivalent of DEBUG=False for kadtracker
+            #logging.disable(logging.CRITICAL)
+            # New: see DecentralizedTracking/kadtracker/logging_conf.py
+            
+            # Start up KTH mainline DHT
+            #TODO: Can I get the local IP number?
+            mainlineDHT.init(('127.0.0.1', self.listen_port), config['state_dir'])
+               
+        
+        # add task for tracker checking
+        if config['torrent_checking']:
+            
+            if config['mainline_dht']:
+                # Create torrent-liveliness checker based on DHT
+                from BaseLib.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker
+                
+                c = mainlineDHTChecker.getInstance()
+                c.register(mainlineDHT.dht)
+            
+            self.torrent_checking_period = config['torrent_checking_period']
+            #self.torrent_checking_period = 5
+            self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period)
+
+        # Gertjan's UDP code
+        # OFF in P2P-Next
+        if False and config['overlay'] and config['crawler']:
+            # Gertjan's UDP code
+            self.udppuncture_handler = UDPHandler(self.rawserver, config['overlay'] and config['crawler'])
+
+        if config["magnetlink"]:
+            # initialise the first instance
+            MagnetHandler.get_instance(self.rawserver)
+
+
+    def add(self,tdef,dscfg,pstate=None,initialdlstatus=None):
+        """ Called by any thread """
+        self.sesslock.acquire()
+        try:
+            if not tdef.is_finalized():
+                raise ValueError("TorrentDef not finalized")
+            
+            infohash = tdef.get_infohash()
+            
+            # Check if running or saved on disk
+            if infohash in self.downloads:
+                raise DuplicateDownloadException()
+
+            d = Download(self.session,tdef)            
+            
+            if pstate is None and not tdef.get_live(): # not already resuming
+                pstate = self.load_download_pstate_noexc(infohash)
+                if pstate is not None:
+                    if DEBUG:
+                        print >>sys.stderr,"tlm: add: pstate is",dlstatus_strings[pstate['dlstate']['status']],pstate['dlstate']['progress']
+            
+            # Store in list of Downloads, always. 
+            self.downloads[infohash] = d
+            d.setup(dscfg,pstate,initialdlstatus,self.network_engine_wrapper_created_callback,self.network_vod_event_callback)
+
+            if self.torrent_db != None and self.mypref_db != None:
+                raw_filename = tdef.get_name_as_unicode()
+                save_name = get_readable_torrent_name(infohash, raw_filename)
+                #print >> sys.stderr, 'tlm: add', save_name, self.session.sessconfig
+                torrent_dir = self.session.sessconfig['torrent_collecting_dir']
+                save_path = os.path.join(torrent_dir, save_name)
+                if not os.path.exists(save_path):    # save the torrent to the common torrent dir
+                    tdef.save(save_path)
+                    
+                # hack, make sure these torrents are always good so they show up
+                # in TorrentDBHandler.getTorrents()
+                extra_info = {'status':'good'}
+
+                # 03/02/10 Boudewijn: addExternalTorrent now requires
+                # a torrentdef, consequently we provide the filename
+                # through the extra_info dictionary
+                extra_info['filename'] = save_name
+
+                self.torrent_db.addExternalTorrent(tdef, source='',extra_info=extra_info)
+                dest_path = d.get_dest_dir()    
+                # TODO: if user renamed the dest_path for single-file-torrent
+                data = {'destination_path':dest_path}
+                self.mypref_db.addMyPreference(infohash, data)
+                # BuddyCast is now notified of this new Download in our
+                # preferences via the Notifier mechanism. See BC.sesscb_ntfy_myprefs()
+            return d
+        finally:
+            self.sesslock.release()
+
+
+    def network_engine_wrapper_created_callback(self,d,sd,exc,pstate):
+        """ Called by network thread """
+        if exc is None:
+            # Always need to call the hashcheck func, even if we're restarting
+            # a download that was seeding, this is just how the BT engine works.
+            # We've provided the BT engine with its resumedata, so this should
+            # be fast.
+            #
+            try:
+                if sd is not None:
+                    self.queue_for_hashcheck(sd)
+                    if pstate is None and not d.get_def().get_live():
+                        # Checkpoint at startup
+                        (infohash,pstate) = d.network_checkpoint()
+                        self.save_download_pstate(infohash,pstate)
+                else:
+                    raise TriblerException("tlm: network_engine_wrapper_created_callback: sd is None!")
+            except Exception,e:
+                # There was a bug in queue_for_hashcheck that is now fixed.
+                # Leave this in place to catch unexpected errors.
+                print_exc()
+                d.set_error(e)
+                
+        
+    def remove(self,d,removecontent=False):
+        """ Called by any thread """
+        self.sesslock.acquire()
+        try:
+            d.stop_remove(removestate=True,removecontent=removecontent)
+            infohash = d.get_def().get_infohash()
+            del self.downloads[infohash]
+        finally:
+            self.sesslock.release()
+
+    def get_downloads(self):
+        """ Called by any thread """
+        self.sesslock.acquire()
+        try:
+            return self.downloads.values() #copy, is mutable
+        finally:
+            self.sesslock.release()
+    
+    def download_exists(self,infohash):
+        self.sesslock.acquire()
+        try:
+            return infohash in self.downloads
+        finally:
+            self.sesslock.release()
+    
+    
+    def rawserver_fatalerrorfunc(self,e):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"tlm: RawServer fatal error func called",e
+        print_exc()
+
+    def rawserver_nonfatalerrorfunc(self,e):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"tlm: RawServer non fatal error func called",e
+        print_exc()
+        # Could log this somewhere, or phase it out
+
+    def _run(self):
+        """ Called only once by network thread """
+
+        try:
+            try:
+                self.start_upnp()
+                self.start_multicast()
+                self.multihandler.listen_forever()
+            except:
+                print_exc()    
+        finally:
+            if self.internaltracker is not None:
+                self.internaltracker.save_state()
+            
+            self.stop_upnp()
+            self.rawserver.shutdown()
+
+    def rawserver_keepalive(self):
+        """ Hack to prevent rawserver sleeping in select() for a long time, not
+        processing any tasks on its queue at startup time 
+        
+        Called by network thread """
+        self.rawserver.add_task(self.rawserver_keepalive,1)
+
+    #
+    # TODO: called by TorrentMaker when new torrent added to itracker dir
+    # Make it such that when Session.add_torrent() is called and the internal
+    # tracker is used that we write a metainfo to itracker dir and call this.
+    #
+    def tracker_rescan_dir(self):
+        if self.internaltracker is not None:
+            self.internaltracker.parse_allowed(source='Session')
+
+    #
+    # Torrent hash checking
+    #
+    def queue_for_hashcheck(self,sd):
+        """ Schedule a SingleDownload for integrity check of on-disk data
+        
+        Called by network thread """
+        if hash:
+            self.hashcheck_queue.append(sd)
+            # Check smallest torrents first
+            self.hashcheck_queue.sort(singledownload_size_cmp)
+            
+        if not self.sdownloadtohashcheck:
+            self.dequeue_and_start_hashcheck()
+
+    def dequeue_and_start_hashcheck(self):
+        """ Start integriy check for first SingleDownload in queue
+        
+        Called by network thread """
+        self.sdownloadtohashcheck = self.hashcheck_queue.pop(0)
+        self.sdownloadtohashcheck.perform_hashcheck(self.hashcheck_done)
+
+    def hashcheck_done(self,success=True):
+        """ Integrity check for first SingleDownload in queue done
+        
+        Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"tlm: hashcheck_done, success",success
+        if success:
+            self.sdownloadtohashcheck.hashcheck_done()
+        if self.hashcheck_queue:
+            self.dequeue_and_start_hashcheck()
+        else:
+            self.sdownloadtohashcheck = None
+
+    #
+    # State retrieval
+    #
+    def set_download_states_callback(self,usercallback,getpeerlist,when=0.0):
+        """ Called by any thread """
+        network_set_download_states_callback_lambda = lambda:self.network_set_download_states_callback(usercallback,getpeerlist)
+        self.rawserver.add_task(network_set_download_states_callback_lambda,when)
+        
+    def network_set_download_states_callback(self,usercallback,getpeerlist):
+        """ Called by network thread """
+        self.sesslock.acquire()
+        try:
+            # Even if the list of Downloads changes in the mean time this is
+            # no problem. For removals, dllist will still hold a pointer to the
+            # Download, and additions are no problem (just won't be included 
+            # in list of states returned via callback.
+            #
+            dllist = self.downloads.values()
+        finally:
+            self.sesslock.release()
+
+        dslist = []
+        for d in dllist:
+            ds = d.network_get_state(None,getpeerlist,sessioncalling=True)
+            dslist.append(ds)
+            
+        # Invoke the usercallback function via a new thread.
+        # After the callback is invoked, the return values will be passed to
+        # the returncallback for post-callback processing.
+        self.session.uch.perform_getstate_usercallback(usercallback,dslist,self.sesscb_set_download_states_returncallback)
+        
+    def sesscb_set_download_states_returncallback(self,usercallback,when,newgetpeerlist):
+        """ Called by SessionCallbackThread """
+        if when > 0.0:
+            # reschedule
+            self.set_download_states_callback(usercallback,newgetpeerlist,when=when)
+
+    #
+    # Persistence methods
+    #
+    def load_checkpoint(self,initialdlstatus=None):
+        """ Called by any thread """
+        self.sesslock.acquire()
+        try:
+            dir = self.session.get_downloads_pstate_dir()
+            filelist = os.listdir(dir)
+            for basename in filelist:
+                # Make this go on when a torrent fails to start
+                filename = os.path.join(dir,basename)
+                self.resume_download(filename,initialdlstatus)
+        finally:
+            self.sesslock.release()
+
+
+    def load_download_pstate_noexc(self,infohash):
+        """ Called by any thread, assume sesslock already held """
+        try:
+            dir = self.session.get_downloads_pstate_dir()
+            basename = binascii.hexlify(infohash)+'.pickle'
+            filename = os.path.join(dir,basename)
+            return self.load_download_pstate(filename)
+        except Exception,e:
+            # TODO: remove saved checkpoint?
+            #self.rawserver_nonfatalerrorfunc(e)
+            return None
+        
+    def resume_download(self,filename,initialdlstatus=None):
+        try:
+            # TODO: filter for file not found explicitly?
+            pstate = self.load_download_pstate(filename)
+            
+            if DEBUG:
+                print >>sys.stderr,"tlm: load_checkpoint: pstate is",dlstatus_strings[pstate['dlstate']['status']],pstate['dlstate']['progress']
+                if pstate['engineresumedata'] is None:
+                    print >>sys.stderr,"tlm: load_checkpoint: resumedata None"
+                else:
+                    print >>sys.stderr,"tlm: load_checkpoint: resumedata len",len(pstate['engineresumedata'])
+            
+            tdef = TorrentDef.load_from_dict(pstate['metainfo'])
+            
+            # Activate
+            dscfg = DownloadStartupConfig(dlconfig=pstate['dlconfig'])
+            self.add(tdef,dscfg,pstate,initialdlstatus)
+        except Exception,e:
+            # TODO: remove saved checkpoint?
+            self.rawserver_nonfatalerrorfunc(e)
+
+    
+    def checkpoint(self,stop=False,checkpoint=True,gracetime=2.0):
+        """ Called by any thread, assume sesslock already held """
+        # Even if the list of Downloads changes in the mean time this is
+        # no problem. For removals, dllist will still hold a pointer to the
+        # Download, and additions are no problem (just won't be included 
+        # in list of states returned via callback.
+        #
+        dllist = self.downloads.values()
+        if DEBUG:
+            print >>sys.stderr,"tlm: checkpointing",len(dllist)
+        
+        network_checkpoint_callback_lambda = lambda:self.network_checkpoint_callback(dllist,stop,checkpoint,gracetime)
+        self.rawserver.add_task(network_checkpoint_callback_lambda,0.0)
+        # TODO: checkpoint overlayapps / friendship msg handler
+
+        
+    def network_checkpoint_callback(self,dllist,stop,checkpoint,gracetime):
+        """ Called by network thread """
+        if checkpoint:
+            for d in dllist:
+                # Tell all downloads to stop, and save their persistent state
+                # in a infohash -> pstate dict which is then passed to the user
+                # for storage.
+                #
+                if DEBUG:
+                    print >>sys.stderr,"tlm: network checkpointing:",`d.get_def().get_name()`
+                if stop:
+                    (infohash,pstate) = d.network_stop(False,False)
+                else:
+                    (infohash,pstate) = d.network_checkpoint()
+                    
+                try:
+                    self.save_download_pstate(infohash,pstate)
+                except Exception,e:
+                    self.rawserver_nonfatalerrorfunc(e)
+    
+        if stop:
+            # Some grace time for early shutdown tasks
+            if self.shutdownstarttime is not None:
+                now = timemod.time()
+                diff = now - self.shutdownstarttime
+                if diff < gracetime:
+                    print >>sys.stderr,"tlm: shutdown: delaying for early shutdown tasks",gracetime-diff
+                    delay = gracetime-diff 
+                    network_shutdown_callback_lambda = lambda:self.network_shutdown()
+                    self.rawserver.add_task(network_shutdown_callback_lambda,delay)
+                    return
+                
+            self.network_shutdown()
+            
+    def early_shutdown(self):
+        """ Called as soon as Session shutdown is initiated. Used to start
+        shutdown tasks that takes some time and that can run in parallel
+        to checkpointing, etc.
+        """
+        self.shutdownstarttime = timemod.time()
+        if self.overlay_apps is not None:
+            self.overlay_bridge.add_task(self.overlay_apps.early_shutdown,0)
+        if self.udppuncture_handler is not None:
+            self.udppuncture_handler.shutdown()
+        
+    def network_shutdown(self):
+        try:
+            # Detect if megacache is enabled
+            if self.peer_db is not None:
+                from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
+                
+                db = SQLiteCacheDB.getInstance()
+                db.commit()
+            
+            mainlineDHT.deinit()
+            
+            ts = enumerate()
+            print >>sys.stderr,"tlm: Number of threads still running",len(ts)
+            for t in ts:
+                print >>sys.stderr,"tlm: Thread still running",t.getName(),"daemon",t.isDaemon(), "instance:", t
+        except:
+            print_exc()
+        
+        # Stop network thread
+        self.sessdoneflag.set()
+        # Arno, 2010-08-09: Stop Session pool threads only after gracetime
+        self.session.uch.shutdown()
+
+    def save_download_pstate(self,infohash,pstate):
+        """ Called by network thread """
+        basename = binascii.hexlify(infohash)+'.pickle'
+        filename = os.path.join(self.session.get_downloads_pstate_dir(),basename)
+        
+        if DEBUG:
+            print >>sys.stderr,"tlm: network checkpointing: to file",filename
+        f = open(filename,"wb")
+        pickle.dump(pstate,f)
+        f.close()
+
+
+    def load_download_pstate(self,filename):
+        """ Called by any thread """
+        f = open(filename,"rb")
+        pstate = pickle.load(f)
+        f.close()
+        return pstate
+
+    #
+    # External IP address methods
+    #
+    def guess_ext_ip_from_local_info(self):
+        """ Called at creation time """
+        ip = get_my_wan_ip()
+        if ip is None:
+            host = socket.gethostbyname_ex(socket.gethostname())
+            ipaddrlist = host[2]
+            for ip in ipaddrlist:
+                return ip
+            return '127.0.0.1'
+        else:
+            return ip
+
+    def run(self):
+        if PROFILE:
+            fname = "profile-%s" % self.getName()
+            import cProfile
+            cProfile.runctx( "self._run()", globals(), locals(), filename=fname )
+            import pstats
+            print >>sys.stderr,"profile: data for %s" % self.getName()
+            pstats.Stats(fname,stream=sys.stderr).sort_stats("cumulative").print_stats(20)
+        else:
+            self._run()
+
+    def start_upnp(self):
+        """ Arno: as the UPnP discovery and calls to the firewall can be slow,
+        do it in a separate thread. When it fails, it should report popup
+        a dialog to inform and help the user. Or report an error in textmode.
+        
+        Must save type here, to handle case where user changes the type
+        In that case we still need to delete the port mapping using the old mechanism
+        
+        Called by network thread """ 
+        
+        if DEBUG:
+            print >>sys.stderr,"tlm: start_upnp()"
+        self.set_activity(NTFY_ACT_UPNP)
+        self.upnp_thread = UPnPThread(self.upnp_type,self.locally_guessed_ext_ip,self.listen_port,self.upnp_failed_callback,self.upnp_got_ext_ip_callback)
+        self.upnp_thread.start()
+
+    def stop_upnp(self):
+        """ Called by network thread """
+        if self.upnp_type > 0:
+            self.upnp_thread.shutdown()
+
+    def upnp_failed_callback(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'):
+        """ Called by UPnP thread TODO: determine how to pass to API user 
+            In principle this is a non fatal error. But it is one we wish to
+            show to the user """
+        print >>sys.stderr,"UPnP mode "+str(upnp_type)+" request to firewall failed with error "+str(error_type)+" Try setting a different mode in Preferences. Listen port was "+str(listenport)+", protocol"+listenproto,exc
+
+    def upnp_got_ext_ip_callback(self,ip):
+        """ Called by UPnP thread """
+        self.sesslock.acquire()
+        self.upnp_ext_ip = ip
+        self.sesslock.release()
+
+    def dialback_got_ext_ip_callback(self,ip):
+        """ Called by network thread """
+        self.sesslock.acquire()
+        self.dialback_ext_ip = ip
+        self.sesslock.release()
+
+    def yourip_got_ext_ip_callback(self,ip):
+        """ Called by network thread """
+        self.sesslock.acquire()
+        self.yourip_ext_ip = ip
+        if DEBUG:
+            print >> sys.stderr,"tlm: yourip_got_ext_ip_callback: others think my IP address is",ip
+        self.sesslock.release()
+
+
+    def get_ext_ip(self,unknowniflocal=False):
+        """ Called by any thread """
+        self.sesslock.acquire()
+        try:
+            if self.dialback_ext_ip is not None: 
+                # more reliable
+                return self.dialback_ext_ip # string immutable
+            elif self.upnp_ext_ip is not None: 
+                # good reliability, if known
+                return self.upnp_ext_ip
+            elif self.yourip_ext_ip is not None: 
+                # majority vote, could be rigged 
+                return self.yourip_ext_ip 
+            else: 
+                # slighly wild guess
+                if unknowniflocal:
+                    return None
+                else:
+                    return self.locally_guessed_ext_ip
+        finally:
+            self.sesslock.release()
+        
+
+    def get_int_ip(self):
+        """ Called by any thread """
+        self.sesslock.acquire()
+        try:
+            return self.locally_guessed_ext_ip
+        finally:
+            self.sesslock.release()
+
+
+    #
+    # Events from core meant for API user
+    #
+    def dialback_reachable_callback(self):
+        """ Called by overlay+network thread """
+        self.session.uch.notify(NTFY_REACHABLE, NTFY_INSERT, None, '')
+        
+        
+    def set_activity(self,type, str = '', arg2=None):
+        """ Called by overlay + network thread """
+        #print >>sys.stderr,"tlm: set_activity",type,str,arg2
+        self.session.uch.notify(NTFY_ACTIVITIES, NTFY_INSERT, type, str, arg2)
+
+        
+    def network_vod_event_callback(self,videoinfo,event,params):
+        """ Called by network thread """
+
+        if DEBUG:
+            print >>sys.stderr,"tlm: network_vod_event_callback: event %s, params %s" % (event,params)
+        
+        # Call Session threadpool to call user's callback
+        try:        
+            videoinfo['usercallback'](event,params)
+        except:
+            print_exc()
+
+
+    def update_torrent_checking_period(self):
+        # dynamically change the interval: update at least once per day
+        if self.overlay_apps and self.overlay_apps.metadata_handler:
+            ntorrents = self.overlay_apps.metadata_handler.num_torrents
+            if ntorrents > 0:
+                self.torrent_checking_period = min(max(86400/ntorrents, 15), 300)
+        #print >> sys.stderr, "torrent_checking_period", self.torrent_checking_period
+        #self.torrent_checking_period = 1    ### DEBUG, remove it before release!!    
+
+    def run_torrent_check(self):
+        """ Called by network thread """
+
+        self.update_torrent_checking_period()
+        self.rawserver.add_task(self.run_torrent_check, self.torrent_checking_period)
+        #        print "torrent_checking start"
+        try:
+            from BaseLib.TrackerChecking.TorrentChecking import TorrentChecking
+            
+            t = TorrentChecking()
+            t.start()
+        except Exception, e:
+            print_exc()
+            self.rawserver_nonfatalerrorfunc(e)
+
+    def get_coopdl_role_object(self,infohash,role):
+        """ Called by network thread """
+        role_object = None
+        self.sesslock.acquire()
+        try:
+            if infohash in self.downloads:
+                d = self.downloads[infohash]
+                role_object = d.get_coopdl_role_object(role)
+        finally:
+            self.sesslock.release()
+        return role_object
+
+        
+    def h4xor_reset_init_conn_counter(self):
+        self.rawserver.add_task(self.network_h4xor_reset,0)
+        
+    def network_h4xor_reset(self):
+        from BaseLib.Core.BitTornado.BT1.Encrypter import incompletecounter
+        print >>sys.stderr,"tlm: h4x0r Resetting outgoing TCP connection rate limiter",incompletecounter.c,"==="
+        incompletecounter.c = 0
+
+
+    def setup_multicast_discovery(self):
+        # Set up local node discovery here
+        # TODO: Fetch these from system configuration
+        mc_config = {'permid':self.session.get_permid(),
+                     'multicast_ipv4_address':'224.0.1.43',
+                     'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                     'multicast_port':'32109',
+                     'multicast_enabled':True,
+                     'multicast_ipv4_enabled':True,
+                     'multicast_ipv6_enabled':False,
+                     'multicast_announce':True}
+
+        from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_CURRENT
+        from BaseLib.Core.Multicast import Multicast
+
+        self.mc_channel = Multicast(mc_config,self.overlay_bridge,self.listen_port,OLPROTO_VER_CURRENT,self.peer_db)
+        self.mc_channel.addAnnounceHandler(self.mc_channel.handleOVERLAYSWARMAnnounce)
+
+        self.mc_sock = self.mc_channel.getSocket()
+        self.mc_sock.setblocking(0)
+
+    def start_multicast(self):
+        if not self.session.get_overlay() or not self.session.get_multicast_local_peer_discovery():
+            return
+        
+        self.rawserver.start_listening_udp(self.mc_sock, self.mc_channel)
+
+        print >>sys.stderr,"mcast: Sending node announcement"
+        params = [self.session.get_listen_port(), self.secure_overlay.olproto_ver_current]
+        self.mc_channel.sendAnnounce(params)
+        
+        
+def singledownload_size_cmp(x,y):
+    """ Method that compares 2 SingleDownload objects based on the size of the
+        content of the BT1Download (if any) contained in them. 
+    """
+    if x is None and y is None:
+        return 0
+    elif x is None:
+        return 1
+    elif y is None:
+        return -1
+    else:
+        a = x.get_bt1download()
+        b = y.get_bt1download()
+        if a is None and b is None:
+            return 0
+        elif a is None:
+            return 1
+        elif b is None:
+            return -1
+        else:
+            if a.get_datalength() == b.get_datalength():
+                return 0
+            elif a.get_datalength() < b.get_datalength():
+                return -1
+            else:
+                return 1
+
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/SessionRuntimeConfig.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/SessionRuntimeConfig.py
new file mode 100644 (file)
index 0000000..0888c79
--- /dev/null
@@ -0,0 +1,959 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+import sys
+from traceback import print_exc
+
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.SessionConfig import SessionConfigInterface
+from BaseLib.Core.Subtitles.SubtitlesHandler import SubtitlesHandler
+
+# 10/02/10 Boudewijn: pylint points out that member variables used in
+# SessionRuntimeConfig do not exist.  This is because they are set in
+# BaseLib.Core.Session which is a subclass of SessionRuntimeConfig.
+#
+# We disable this error
+# pylint: disable-msg=E1101
+
+class SessionRuntimeConfig(SessionConfigInterface):
+    """
+    Implements the BaseLib.Core.API.SessionConfigInterface
+    
+    Use these to change the session config at runtime.
+    """
+    def set_state_dir(self,statedir):
+        raise OperationNotPossibleAtRuntimeException()
+    
+    def get_state_dir(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_state_dir(self)
+        finally:
+            self.sesslock.release()
+
+    def set_install_dir(self,statedir):
+        raise OperationNotPossibleAtRuntimeException()
+    
+    def get_install_dir(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_install_dir(self)
+        finally:
+            self.sesslock.release()
+    
+    def set_permid_keypair_filename(self,keypair):
+        raise OperationNotPossibleAtRuntimeException()
+        
+    def get_permid_keypair_filename(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_permid_keypair_filename(self)
+        finally:
+            self.sesslock.release()
+        
+    def set_listen_port(self,port):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_listen_port(self):
+        # To protect self.sessconfig
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_listen_port(self)
+        finally:
+            self.sesslock.release()
+        
+    def get_video_analyser_path(self):
+        # To protect self.sessconfig
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_video_analyser_path(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_ip(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_ip(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_ip(self)
+        finally:
+            self.sesslock.release()
+
+    def set_bind_to_addresses(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_bind_to_addresses(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_bind_to_addresses(self)
+        finally:
+            self.sesslock.release()
+
+    def set_upnp_mode(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_upnp_mode(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_upnp_mode(self)
+        finally:
+            self.sesslock.release()
+
+    def set_autoclose_timeout(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_autoclose_timeout(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_autoclose_timeout(self)
+        finally:
+            self.sesslock.release()
+
+    def set_autoclose_check_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_autoclose_check_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_autoclose_check_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_megacache(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_megacache(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_megacache(self)
+        finally:
+            self.sesslock.release()
+
+    def set_overlay(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_overlay(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_overlay(self)
+        finally:
+            self.sesslock.release()
+
+    def set_buddycast(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_buddycast(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_buddycast(self)
+        finally:
+            self.sesslock.release()
+
+    def set_start_recommender(self,value):
+        self.sesslock.acquire()
+        try:
+            from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+            
+            SessionConfigInterface.set_start_recommender(self,value)
+            olbridge = OverlayThreadingBridge.getInstance()
+            task = lambda:self.olthread_set_start_recommender(value)
+            olbridge.add_task(task,0)
+        finally:
+            self.sesslock.release()
+
+    def olthread_set_start_recommender(self,value):
+        from BaseLib.Core.BuddyCast.buddycast import BuddyCastFactory
+        bcfac = BuddyCastFactory.getInstance()
+        if value:
+            bcfac.restartBuddyCast()
+        else:
+            bcfac.pauseBuddyCast()
+
+    def get_start_recommender(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_start_recommender(self)
+        finally:
+            self.sesslock.release()
+
+    #
+    # ProxyService_
+    #
+    def set_download_help(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_download_help(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_download_help(self)
+        finally:
+            self.sesslock.release()
+
+    def set_proxyservice_status(self,value):
+        """ Set the status of the proxyservice (on or off).
+        
+        ProxyService off means the current node could not be used as a proxy. ProxyService on means other nodes will be able to use it as a proxy.
+        
+        @param value: one of the possible two values: PROXYSERVICE_OFF, PROXYSERVICE_ON
+        """
+        self.sesslock.acquire()
+        try:
+            SessionConfigInterface.set_proxyservice_status(self, value)
+        finally:
+            self.sesslock.release()
+
+    def get_proxyservice_status(self):
+        """ Returns the status of the proxyservice (on or off).
+        @return: one of the possible two values: PROXYSERVICE_OFF, PROXYSERVICE_ON
+        """
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_proxyservice_status(self)
+        finally:
+            self.sesslock.release()
+    #
+    # _ProxyService
+    #
+
+
+
+    def set_torrent_collecting(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_torrent_collecting(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_torrent_collecting(self)
+        finally:
+            self.sesslock.release()
+
+
+    def set_torrent_collecting_dir(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_torrent_collecting_dir(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_torrent_collecting_dir(self)
+        finally:
+            self.sesslock.release()
+            
+    def get_subtitles_collecting_dir(self):
+        with self.sesslock:
+            return SessionConfigInterface.get_subtitles_collecting_dir(self)
+    
+    def set_subtitles_upload_rate(self, value):
+        with self.sesslock:
+            SubtitlesHandler.getInstance().setUploadRate(value)
+            SessionConfigInterface.set_subtitles_uploade_rate(self, value)
+    
+    def get_subtitles_upload_rate(self):
+        with self.sesslock:
+            return SessionConfigInterface.get_subtitles_upload_rate(self)
+    
+
+
+    def set_superpeer(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_superpeer(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_superpeer(self)
+        finally:
+            self.sesslock.release()
+
+    def set_overlay_log(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_overlay_log(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_overlay_log(self)
+        finally:
+            self.sesslock.release()
+
+    def set_buddycast_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_buddycast_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_buddycast_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_torrent_collecting_max_torrents(self,value):
+        self.sesslock.acquire()
+        try:
+            from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+            
+            SessionConfigInterface.set_torrent_collecting_max_torrents(self,value)
+            olbridge = OverlayThreadingBridge.getInstance()
+            task = lambda:self.olthread_set_torrent_collecting_max_torrents(value)
+            olbridge.add_task(task,0)
+        finally:
+            self.sesslock.release()
+
+    def olthread_set_torrent_collecting_max_torrents(self,value):
+        from BaseLib.Core.Overlay.MetadataHandler import MetadataHandler
+        mh = MetadataHandler.getInstance()
+        mh.set_overflow(value)
+        mh.delayed_check_overflow(2)
+
+
+    def get_torrent_collecting_max_torrents(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_torrent_collecting_max_torrents(self)
+        finally:
+            self.sesslock.release()
+
+    def set_buddycast_max_peers(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_buddycast_max_peers(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_buddycast_max_peers(self)
+        finally:
+            self.sesslock.release()
+
+    def set_torrent_collecting_rate(self,value):
+        self.sesslock.acquire()
+        try:
+            from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+            
+            SessionConfigInterface.set_torrent_collecting_rate(self,value)
+            olbridge = OverlayThreadingBridge.getInstance()
+            task = lambda:self.olthread_set_torrent_collecting_rate(value)
+            olbridge.add_task(task,0)
+        finally:
+            self.sesslock.release()
+
+    def olthread_set_torrent_collecting_rate(self,value):
+        from BaseLib.Core.Overlay.MetadataHandler import MetadataHandler
+        mh = MetadataHandler.getInstance()
+        mh.set_rate(value)
+
+    def get_torrent_collecting_rate(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_torrent_collecting_rate(self)
+        finally:
+            self.sesslock.release()
+
+    def set_torrent_checking(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_torrent_checking(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_torrent_checking(self)
+        finally:
+            self.sesslock.release()
+
+    def set_torrent_checking_period(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_torrent_checking_period(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_torrent_checking_period(self)
+        finally:
+            self.sesslock.release()
+
+    def set_dialback(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_dialback(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_dialback(self)
+        finally:
+            self.sesslock.release()
+
+    def set_social_networking(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_social_networking(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_social_networking(self)
+        finally:
+            self.sesslock.release()
+
+    def set_remote_query(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_remote_query(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_remote_query(self)
+        finally:
+            self.sesslock.release()
+
+    def set_stop_collecting_threshold(self,value):
+        self.sesslock.acquire()
+        try:
+            from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+            
+            SessionConfigInterface.set_stop_collecting_threshold(self,value)
+            olbridge = OverlayThreadingBridge.getInstance()
+            task = lambda:self.olthread_set_stop_collecting_threshold(value)
+            olbridge.add_task(task,0)
+        finally:
+            self.sesslock.release()
+
+    def olthread_set_stop_collecting_threshold(self,value):
+        from BaseLib.Core.Overlay.MetadataHandler import MetadataHandler
+        mh = MetadataHandler.getInstance()
+        mh.set_min_free_space(value)
+        mh.delayed_check_free_space(2)
+
+    def get_stop_collecting_threshold(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_stop_collecting_threshold(self)
+        finally:
+            self.sesslock.release()
+
+    def set_internal_tracker(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_internal_tracker(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_internal_tracker(self)
+        finally:
+            self.sesslock.release()
+
+    def set_internal_tracker_url(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    #def get_internal_tracker_url(self):
+        """ Implemented in Session.py """
+
+    def set_mainline_dht(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_mainline_dht(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_mainline_dht(self)
+        finally:
+            self.sesslock.release()
+
+    def set_nickname(self,value):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.set_nickname(self, value)
+        finally:
+            self.sesslock.release()
+            
+    def get_nickname(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_nickname(self)
+        finally:
+            self.sesslock.release()
+
+    def set_mugshot(self,value, mime='image/jpeg'):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.set_mugshot(self, value, mime)
+        finally:
+            self.sesslock.release()
+
+    def get_mugshot(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_mugshot(self)
+        finally:
+            self.sesslock.release()
+
+
+    def set_tracker_dfile(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_dfile(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_dfile(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_dfile_format(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_dfile_format(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_dfile_format(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_socket_timeout(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_socket_timeout(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_socket_timeout(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_save_dfile_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_save_dfile_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_save_dfile_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_timeout_downloaders_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_timeout_downloaders_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_timeout_downloaders_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_reannounce_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_reannounce_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_reannounce_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_response_size(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_response_size(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_response_size(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_timeout_check_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_timeout_check_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_timeout_check_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_nat_check(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_nat_check(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_nat_check(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_log_nat_checks(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_log_nat_checks(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_log_nat_checks(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_min_time_between_log_flushes(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_min_time_between_log_flushes(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_min_time_between_log_flushes(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_min_time_between_cache_refreshes(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_min_time_between_cache_refreshes(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_min_time_between_cache_refreshes(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_allowed_dir(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_allowed_dir(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_allowed_dir(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_allowed_list(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_allowed_list(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_allowed_list(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_allowed_controls(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_allowed_controls(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_allowed_controls(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_multitracker_enabled(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_multitracker_enabled(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_multitracker_enabled(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_multitracker_allowed(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_multitracker_allowed(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_multitracker_allowed(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_multitracker_reannounce_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_multitracker_reannounce_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_multitracker_reannounce_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_multitracker_maxpeers(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_multitracker_maxpeers(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_multitracker_maxpeers(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_aggregate_forward(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_aggregate_forward(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_aggregate_forward(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_aggregator(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_aggregator(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_aggregator(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_hupmonitor(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_hupmonitor(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_hupmonitor(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_multitracker_http_timeout(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_multitracker_http_timeout(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_multitracker_http_timeout(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_parse_dir_interval(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_parse_dir_interval(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_parse_dir_interval(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_show_infopage(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_show_infopage(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_show_infopage(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_infopage_redirect(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_infopage_redirect(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_infopage_redirect(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_show_names(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_show_names(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_show_names(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_favicon(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_favicon(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_favicon(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_allowed_ips(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_allowed_ips(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_allowed_ips(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_banned_ips(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_banned_ips(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_banned_ips(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_only_local_override_ip(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_only_local_override_ip(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_only_local_override_ip(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_logfile(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_logfile(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_logfile(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_allow_get(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_allow_get(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_allow_get(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_keep_dead(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_keep_dead(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_keep_dead(self)
+        finally:
+            self.sesslock.release()
+
+    def set_tracker_scrape_allowed(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_tracker_scrape_allowed(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_tracker_scrape_allowed(self)
+        finally:
+            self.sesslock.release()
+
+    def set_overlay_max_message_length(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_overlay_max_message_length(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_overlay_max_message_length(self)
+        finally:
+            self.sesslock.release()
+
+    def set_download_help_dir(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_download_help_dir(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_download_help_dir(self)
+        finally:
+            self.sesslock.release()
+
+    def set_bartercast(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_bartercast(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_bartercast(self)
+        finally:
+            self.sesslock.release()
+
+    def set_superpeer_file(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_superpeer_file(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_superpeer_file(self)
+        finally:
+            self.sesslock.release()
+
+    def set_buddycast_collecting_solution(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_buddycast_collecting_solution(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_buddycast_collecting_solution(self)
+        finally:
+            self.sesslock.release()
+
+    def set_peer_icon_path(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_peer_icon_path(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_peer_icon_path(self)
+        finally:
+            self.sesslock.release()
+
+    #
+    # NAT Puncturing servers information setting/retrieval
+    #
+    def set_nat_detect(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+    
+    def set_puncturing_private_port(self, puncturing_private_port):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def set_stun_servers(self, stun_servers):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def set_pingback_servers(self, pingback_servers):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def set_puncturing_coordinators(self, puncturing_coordinators):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_nat_detect(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_nat_detect(self)
+        finally:
+            self.sesslock.release()
+
+    def get_puncturing_internal_port(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_puncturing_internal_port(self)
+        finally:
+            self.sesslock.release()
+
+    def get_stun_servers(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_stun_servers(self)
+        finally:
+            self.sesslock.release()
+
+    def get_pingback_servers(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_pingback_servers(self)
+        finally:
+            self.sesslock.release()
+
+    #
+    # Crawler
+    #
+    def set_crawler(self, value):
+        raise OperationNotPossibleAtRuntimeException()
+
+    def get_crawler(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_crawler(self)
+        finally:
+            self.sesslock.release()
+    
+    # 
+    # Local Peer Discovery using IP Multicast
+    #
+    def set_multicast_local_peer_discovery(self,value):
+        raise OperationNotPossibleAtRuntimeException()
+        
+    def get_multicast_local_peer_discovery(self):
+        self.sesslock.acquire()
+        try:
+            return SessionConfigInterface.get_multicast_local_peer_discovery(self)
+        finally:
+            self.sesslock.release()
+
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/SingleDownload.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/SingleDownload.py
new file mode 100644 (file)
index 0000000..8daa781
--- /dev/null
@@ -0,0 +1,425 @@
+# Written by Arno Bakker, George Milescu 
+# see LICENSE.txt for license information
+
+import sys
+import os
+import time
+import copy
+import pickle
+import socket
+import binascii
+from base64 import b64encode
+from types import StringType,ListType,IntType
+from traceback import print_exc,print_stack
+from threading import Event
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.BitTornado.__init__ import createPeerID
+from BaseLib.Core.BitTornado.download_bt1 import BT1Download
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.Video.VideoStatus import VideoStatus
+from BaseLib.Core.Video.SVCVideoStatus import SVCVideoStatus
+from BaseLib.Core.DecentralizedTracking.repex import RePEXer
+from BaseLib.Core.Statistics.Status.Status import get_status_holder
+
+
+SPECIAL_VALUE = 481
+
+DEBUG = False
+
+class SingleDownload:
+    """ This class is accessed solely by the network thread """
+    
+    def __init__(self,infohash,metainfo,kvconfig,multihandler,get_extip_func,listenport,videoanalyserpath,vodfileindex,set_error_func,pstate,lmvodeventcallback,lmhashcheckcompletecallback):
+        self.dow = None
+        self.set_error_func = set_error_func
+        self.videoinfo = None
+        self.videostatus = None
+        self.lmvodeventcallback = lmvodeventcallback
+        self.lmhashcheckcompletecallback = lmhashcheckcompletecallback
+        self.logmsgs = []
+        self._hashcheckfunc = None
+        self._getstatsfunc = None
+        self.infohash = infohash
+        self.b64_infohash = b64encode(infohash)
+        self.repexer = None
+        try:
+            self.dldoneflag = Event()
+            self.dlrawserver = multihandler.newRawServer(infohash,self.dldoneflag)
+            self.lmvodeventcallback = lmvodeventcallback
+    
+            if pstate is not None:
+                self.hashcheckfrac = pstate['dlstate']['progress']
+            else:
+                self.hashcheckfrac = 0.0
+    
+            self.peerid = createPeerID()
+            
+            # LOGGING
+            event_reporter = get_status_holder("LivingLab")
+            event_reporter.create_and_add_event("peerid", [self.b64_infohash, b64encode(self.peerid)])
+            
+            #print >>sys.stderr,"SingleDownload: __init__: My peer ID is",`peerid`
+    
+            self.dow = BT1Download(self.hashcheckprogressfunc,
+                            self.finishedfunc,
+                            self.fatalerrorfunc, 
+                            self.nonfatalerrorfunc,
+                            self.logerrorfunc,
+                            self.dldoneflag,
+                            kvconfig,
+                            metainfo, 
+                            infohash,
+                            self.peerid,
+                            self.dlrawserver,
+                            get_extip_func,
+                            listenport,
+                            videoanalyserpath
+                            )
+        
+            file = self.dow.saveAs(self.save_as)
+            #if DEBUG:
+            #    print >>sys.stderr,"SingleDownload: dow.saveAs returned",file
+            
+            # Set local filename in vodfileindex
+            if vodfileindex is not None:
+                # Ric: for SVC the index is a list of indexes
+                index = vodfileindex['index']
+                if type(index) == ListType:
+                    svc = len(index) > 1
+                else:
+                    svc = False
+                
+                if svc:
+                    outpathindex = self.dow.get_dest(index[0])
+                else:
+                    if index == -1:
+                        index = 0
+                    outpathindex = self.dow.get_dest(index)
+
+                vodfileindex['outpath'] = outpathindex
+                self.videoinfo = vodfileindex
+                if 'live' in metainfo['info']:
+                    authparams = metainfo['info']['live']
+                else:
+                    authparams = None
+                if svc:
+                    self.videostatus = SVCVideoStatus(metainfo['info']['piece length'],self.dow.files,vodfileindex,authparams)
+                else:
+                    self.videostatus = VideoStatus(metainfo['info']['piece length'],self.dow.files,vodfileindex,authparams)
+                self.videoinfo['status'] = self.videostatus
+                self.dow.set_videoinfo(vodfileindex,self.videostatus)
+
+            #if DEBUG:
+            #    print >>sys.stderr,"SingleDownload: setting vodfileindex",vodfileindex
+            
+            # RePEX: Start in RePEX mode
+            if kvconfig['initialdlstatus'] == DLSTATUS_REPEXING:
+                if pstate is not None and pstate.has_key('dlstate'):
+                    swarmcache = pstate['dlstate'].get('swarmcache',{})
+                else:
+                    swarmcache = {}
+                self.repexer = RePEXer(self.infohash, swarmcache)
+            else:
+                self.repexer = None
+            
+            if pstate is None:
+                resumedata = None
+            else:
+                # Restarting download
+                resumedata=pstate['engineresumedata']
+            self._hashcheckfunc = self.dow.initFiles(resumedata=resumedata)
+
+            
+        except Exception,e:
+            self.fatalerrorfunc(e)
+    
+    def get_bt1download(self):
+        return self.dow
+    
+    def save_as(self,name,length,saveas,isdir):
+        """ Return the local filename to which to save the file 'name' in the torrent """
+        if DEBUG:
+            print >>sys.stderr,"SingleDownload: save_as(",`name`,length,`saveas`,isdir,")"
+        try:
+            if not os.access(saveas,os.F_OK):
+                os.mkdir(saveas)
+            path = os.path.join(saveas,name)
+            if isdir and not os.path.isdir(path):
+                os.mkdir(path)
+            return path
+        except Exception,e:
+            self.fatalerrorfunc(e)
+
+    def perform_hashcheck(self,complete_callback):
+        """ Called by any thread """
+        if DEBUG:
+            print >>sys.stderr,"SingleDownload: perform_hashcheck()" # ,self.videoinfo
+        try:
+            """ Schedules actually hashcheck on network thread """
+            self._getstatsfunc = SPECIAL_VALUE # signal we're hashchecking
+            # Already set, should be same
+            self.lmhashcheckcompletecallback = complete_callback
+            self._hashcheckfunc(self.lmhashcheckcompletecallback)
+        except Exception,e:
+            self.fatalerrorfunc(e)
+            
+    def hashcheck_done(self):
+        """ Called by LaunchMany when hashcheck complete and the Download can be
+            resumed
+            
+            Called by network thread
+        """
+        if DEBUG:
+            print >>sys.stderr,"SingleDownload: hashcheck_done()"
+        try:
+            self.dow.startEngine(vodeventfunc = self.lmvodeventcallback)
+            self._getstatsfunc = self.dow.startStats() # not possible earlier
+            
+            # RePEX: don't start the Rerequester in RePEX mode
+            repexer = self.repexer
+            if repexer is None:
+                # ProxyService_
+                #
+                # ProxyDevel
+                # If proxymode is PROXY_MODE_PRIVATE, deactivate the tracker support
+                download_config = self.get_bt1download().getConfig()
+                proxy_mode = download_config.get('proxy_mode',0)
+                
+                # Only activate the tracker if the proxy_mode is PROXY_MODE_OFF or PROXY_MODE_SPEED
+                if proxy_mode == PROXY_MODE_OFF or proxy_mode == PROXY_MODE_SPEED:
+                    self.dow.startRerequester()
+                    if DEBUG:
+                        print "Tracker class has been activated." + str(proxy_mode) 
+                else:
+                    #self.dow.startRerequester()
+                    if DEBUG:
+                        print "Tracker class has not been activated." + str(proxy_mode)
+                #
+                #_ProxyService
+            else:
+                self.hook_repexer()
+                
+            self.dlrawserver.start_listening(self.dow.getPortHandler())
+        except Exception,e:
+            self.fatalerrorfunc(e)
+
+
+    # DownloadConfigInterface methods
+    def set_max_speed(self,direct,speed,callback):
+        if self.dow is not None:
+            if DEBUG:
+                print >>sys.stderr,"SingleDownload: set_max_speed",`self.dow.response['info']['name']`,direct,speed
+            if direct == UPLOAD:
+                self.dow.setUploadRate(speed,networkcalling=True)
+            else:
+                self.dow.setDownloadRate(speed,networkcalling=True)
+        if callback is not None:
+            callback(direct,speed)
+
+    def set_max_conns_to_initiate(self,nconns,callback):
+        if self.dow is not None:
+            if DEBUG:
+                print >>sys.stderr,"SingleDownload: set_max_conns_to_initiate",`self.dow.response['info']['name']`
+            self.dow.setInitiate(nconns,networkcalling=True)
+        if callback is not None:
+            callback(nconns)
+
+
+    def set_max_conns(self,nconns,callback):
+        if self.dow is not None:
+            if DEBUG:
+                print >>sys.stderr,"SingleDownload: set_max_conns",`self.dow.response['info']['name']`
+            self.dow.setMaxConns(nconns,networkcalling=True)
+        if callback is not None:
+            callback(nconns)
+    
+
+    #
+    # For DownloadState
+    #
+    def get_stats(self,getpeerlist):
+        logmsgs = self.logmsgs[:] # copy
+        coopdl_helpers = []
+        coopdl_coordinator = None
+        if self.dow is not None:
+            if not self.dow.helper is None:
+                coopdl_coordinator = self.dow.helper.get_coordinator_permid() 
+            if self.dow.coordinator is not None: 
+                # No coordinator when you're a helper
+                peerreclist = self.dow.coordinator.network_get_asked_helpers_copy()
+                for peerrec in peerreclist:
+                    coopdl_helpers.append(peerrec['permid'])
+        if self._getstatsfunc is None:
+            return (DLSTATUS_WAITING4HASHCHECK,None,logmsgs,coopdl_helpers,coopdl_coordinator)
+        elif self._getstatsfunc == SPECIAL_VALUE:
+            stats = {}
+            stats['frac'] = self.hashcheckfrac
+            return (DLSTATUS_HASHCHECKING,stats,logmsgs,coopdl_helpers,coopdl_coordinator)
+        else:
+            # RePEX: if we're repexing, set our status
+            if self.repexer is not None:
+                status = DLSTATUS_REPEXING
+            else:
+                status = None
+            return (status,self._getstatsfunc(getpeerlist=getpeerlist),logmsgs,coopdl_helpers,coopdl_coordinator)
+
+    def get_infohash(self):
+        return self.infohash
+
+    #
+    # Persistent State
+    #
+    def checkpoint(self):
+        if self.dow is not None:
+            return self.dow.checkpoint()
+        else:
+            return None
+    
+    def shutdown(self):
+        if DEBUG:
+            print >>sys.stderr,"SingleDownload: shutdown"
+        resumedata = None
+        if self.dow is not None:
+            # RePEX: unhook and abort RePEXer
+            if self.repexer:
+                repexer = self.unhook_repexer()
+                repexer.repex_aborted(self.infohash, DLSTATUS_STOPPED)
+
+            self.dldoneflag.set()
+            self.dlrawserver.shutdown()
+            resumedata = self.dow.shutdown()
+            self.dow = None
+            #if DEBUG:
+            #    print >>sys.stderr,"SingleDownload: stopped dow"
+                
+        if self._getstatsfunc is None or self._getstatsfunc == SPECIAL_VALUE:
+            # Hashchecking or waiting for while being shutdown, signal LaunchMany
+            # so it can schedule a new one.
+            self.lmhashcheckcompletecallback(success=False)
+                
+        return resumedata
+    
+    #
+    # RePEX, Raynor Vliegendhart:
+    # Restarting a running Download previously was a NoOp according to 
+    # DownloadImpl, but now the decision is left up to SingleDownload.
+    def restart(self, initialdlstatus=None):
+        """
+        Called by network thread. Called when Download was already running
+        and Download.restart() was called.
+        """
+        if self.repexer and initialdlstatus != DLSTATUS_REPEXING:
+            # kill the RePEX process
+            repexer = self.unhook_repexer()
+            repexer.repex_aborted(self.infohash, initialdlstatus)
+        else:
+            pass # NoOp, continue with download as before
+    
+    
+    #
+    # RePEX: get_swarmcache
+    #
+    def get_swarmcache(self):
+        """
+        Returns the last stored swarmcache when RePEXing otherwise None.
+        
+        @return A dict mapping dns to a dict with at least 'last_seen' 
+        and 'pex' keys.
+        """
+        if self.repexer is not None:
+            return self.repexer.get_swarmcache()[0]
+        return None
+    
+    #
+    # RePEX: Hooking and unhooking the RePEXer
+    #
+    def hook_repexer(self):
+        repexer = self.repexer
+        if repexer is None:
+            return
+        self.dow.Pause()
+        
+        # create Rerequester in BT1D just to be sure, but don't start it
+        # (this makes sure that Encoder.rerequest != None)
+        self.dow.startRerequester(paused=True)
+        
+        connecter, encoder = self.dow.connecter, self.dow.encoder
+        connecter.repexer = repexer
+        encoder.repexer = repexer
+        rerequest = self.dow.createRerequester(repexer.rerequester_peers)
+        repexer.repex_ready(self.infohash, connecter, encoder, rerequest)
+    
+    def unhook_repexer(self):
+        repexer = self.repexer
+        if repexer is None:
+            return
+        self.repexer = None
+        if self.dow is not None:
+            connecter, encoder = self.dow.connecter, self.dow.encoder
+            connecter.repexer = None
+            encoder.repexer = None
+            self.dow.startRerequester() # not started, so start it.
+            self.dow.Unpause()
+        return repexer
+    
+    #
+    # Cooperative download
+    #
+    def ask_coopdl_helpers(self,peerreclist):
+        if self.dow is not None:
+            self.dow.coordinator.send_ask_for_help(peerreclist)
+
+    def stop_coopdl_helpers(self,peerreclist):
+        if self.dow is not None:
+            self.dow.coordinator.send_stop_helping(peerreclist,force=True)
+
+    def get_coopdl_role_object(self,role):
+        # Used by Coordinator/HelperMessageHandler indirectly
+        if self.dow is not None:
+            if role == COOPDL_ROLE_COORDINATOR:
+                return self.dow.coordinator
+            else:
+                return self.dow.helper
+        else:
+            return None
+
+    #
+    # Internal methods
+    #
+    def hashcheckprogressfunc(self,activity = '', fractionDone = 0.0):
+        """ Allegedly only used by StorageWrapper during hashchecking """
+        #print >>sys.stderr,"SingleDownload::statusfunc called",activity,fractionDone
+        self.hashcheckfrac = fractionDone
+
+    def finishedfunc(self):
+        """ Download is complete """
+        if DEBUG:
+            print >>sys.stderr,"SingleDownload::finishedfunc called: Download is complete *******************************"
+        pass
+
+    def fatalerrorfunc(self,data):
+        print >>sys.stderr,"SingleDownload::fatalerrorfunc called",data
+        if type(data) == StringType:
+            print >>sys.stderr,"LEGACY CORE FATAL ERROR",data
+            print_stack()
+            self.set_error_func(TriblerLegacyException(data))
+        else:
+            print_exc()
+            self.set_error_func(data)
+        self.shutdown()
+
+    def nonfatalerrorfunc(self,e):
+        print >>sys.stderr,"SingleDownload::nonfatalerrorfunc called",e
+        # Could log this somewhere, or phase it out (only used in Rerequester)
+
+    def logerrorfunc(self,msg):
+        t = time.time()
+        self.logmsgs.append((t,msg))
+        
+        # Keep max 10 log entries, API user should save them if he wants 
+        # complete history
+        if len(self.logmsgs) > 10:
+            self.logmsgs.pop(0)
+            
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/ThreadPool.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/ThreadPool.py
new file mode 100644 (file)
index 0000000..99b0a10
--- /dev/null
@@ -0,0 +1,180 @@
+# Written by Jelle Roozenburg, Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import time
+from traceback import print_exc
+import threading
+
+class ThreadPool:
+
+    """Flexible thread pool class.  Creates a pool of threads, then
+    accepts tasks that will be dispatched to the next available
+    thread."""
+    
+    def __init__(self, numThreads):
+
+        """Initialize the thread pool with numThreads workers."""
+        
+        self.__threads = []
+        self.__resizeLock = threading.Condition(threading.Lock())
+        self.__taskCond = threading.Condition(threading.Lock())
+        self.__tasks = []
+        self.__isJoiningStopQueuing = False
+        self.__isJoining = False
+        self.setThreadCount(numThreads)
+
+    def setThreadCount(self, newNumThreads):
+
+        """ External method to set the current pool size.  Acquires
+        the resizing lock, then calls the internal version to do real
+        work."""
+        
+        # Can't change the thread count if we're shutting down the pool!
+        if self.__isJoining:
+            return False
+        
+        self.__resizeLock.acquire()
+        try:
+            self.__setThreadCountNolock(newNumThreads)
+        finally:
+            self.__resizeLock.release()
+        return True
+
+    def __setThreadCountNolock(self, newNumThreads):
+        
+        """Set the current pool size, spawning or terminating threads
+        if necessary.  Internal use only; assumes the resizing lock is
+        held."""
+        
+        # If we need to grow the pool, do so
+        while newNumThreads > len(self.__threads):
+            newThread = ThreadPoolThread(self)
+            self.__threads.append(newThread)
+            newThread.start()
+        # If we need to shrink the pool, do so
+        while newNumThreads < len(self.__threads):
+            self.__threads[0].goAway()
+            del self.__threads[0]
+
+    def getThreadCount(self):
+
+        """Return the number of threads in the pool."""
+        
+        self.__resizeLock.acquire()
+        try:
+            return len(self.__threads)
+        finally:
+            self.__resizeLock.release()
+
+    def queueTask(self, task, args=(), taskCallback=None):
+
+        """Insert a task into the queue.  task must be callable;
+        args and taskCallback can be None."""
+        
+        if self.__isJoining == True or self.__isJoiningStopQueuing:
+            return False
+        if not callable(task):
+            return False
+        
+        self.__taskCond.acquire()
+        try:
+            self.__tasks.append((task, args, taskCallback))
+            # Arno, 2010-04-07: Use proper notify()+wait()
+            self.__taskCond.notifyAll()
+            return True
+        finally:
+            self.__taskCond.release()
+
+    def getNextTask(self):
+
+        """ Retrieve the next task from the task queue.  For use
+        only by ThreadPoolThread objects contained in the pool."""
+        
+        self.__taskCond.acquire()
+        try:
+            while self.__tasks == [] and not self.__isJoining:
+                self.__taskCond.wait()
+            if self.__isJoining:
+                return (None, None, None)
+            else:
+                return self.__tasks.pop(0)
+        finally:
+            self.__taskCond.release()
+    
+    def joinAll(self, waitForTasks = True, waitForThreads = True):
+
+        """ Clear the task queue and terminate all pooled threads,
+        optionally allowing the tasks and threads to finish."""
+        
+        # Mark the pool as joining to prevent any more task queueing
+        self.__isJoiningStopQueuing = True
+
+        # Wait for tasks to finish
+        if waitForTasks:
+            while self.__tasks != []:
+                time.sleep(.1)
+
+        # Mark the pool as joining to make all threads stop executing tasks
+        self.__isJoining = True
+
+        # Tell all the threads to quit
+        self.__resizeLock.acquire()
+        try:
+            self.__setThreadCountNolock(0)
+            self.__isJoining = True
+
+            # Wait until all threads have exited
+            if waitForThreads:
+                for t in self.__threads:
+                    t.join()
+                    del t
+
+            # Reset the pool for potential reuse
+            self.__isJoining = False
+        finally:
+            self.__resizeLock.release()
+
+
+        
+class ThreadPoolThread(threading.Thread):
+
+    """ Pooled thread class. """
+    
+    def __init__(self, pool):
+
+        """ Initialize the thread and remember the pool. """
+        
+        threading.Thread.__init__(self)
+        self.setName('SessionPool'+self.getName())
+        self.setDaemon(True)
+        self.__pool = pool
+        self.__isDying = False
+        
+    def run(self):
+
+        """ Until told to quit, retrieve the next task and execute
+        it, calling the callback if any.  """
+
+        # Arno, 2010-04-07: Dying only used when shrinking pool now. 
+        while self.__isDying == False:
+            # Arno, 2010-01-28: add try catch block. Sometimes tasks lists grow,
+            # could be because all Threads are dying.
+            try:
+                cmd, args, callback = self.__pool.getNextTask()
+                if cmd is None:
+                    break
+                elif callback is None:
+                    cmd(*args)
+                else:
+                    callback(cmd(args))
+            except:
+                print_exc()
+            
+    
+    def goAway(self):
+
+        """ Exit the run loop next time through."""
+        
+        self.__isDying = True
+        
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/UserCallbackHandler.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/UserCallbackHandler.py
new file mode 100644 (file)
index 0000000..ef5b8f4
--- /dev/null
@@ -0,0 +1,133 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
+import sys
+import os
+import shutil
+import binascii
+from threading import currentThread
+from traceback import print_exc
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.APIImplementation.ThreadPool import ThreadPool
+from BaseLib.Core.CacheDB.Notifier import Notifier
+
+DEBUG = False
+
+class UserCallbackHandler:
+    
+    def __init__(self,session):
+        self.session = session
+        self.sesslock = session.sesslock
+        self.sessconfig = session.sessconfig
+
+        # Notifier for callbacks to API user
+        self.threadpool = ThreadPool(2)
+        self.notifier = Notifier.getInstance(self.threadpool)
+
+    def shutdown(self):
+        # stop threadpool
+        self.threadpool.joinAll()
+
+    def perform_vod_usercallback(self,d,usercallback,event,params):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"Session: perform_vod_usercallback()",`d.get_def().get_name_as_unicode()`
+        def session_vod_usercallback_target():
+            try:
+                usercallback(d,event,params)
+            except:
+                print_exc()
+        self.perform_usercallback(session_vod_usercallback_target)
+
+    def perform_getstate_usercallback(self,usercallback,data,returncallback):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"Session: perform_getstate_usercallback()"
+        def session_getstate_usercallback_target():
+            try:
+                (when,getpeerlist) = usercallback(data)
+                returncallback(usercallback,when,getpeerlist)
+            except:
+                print_exc()
+        self.perform_usercallback(session_getstate_usercallback_target)
+
+
+    def perform_removestate_callback(self,infohash,contentdest,removecontent):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"Session: perform_removestate_callback()"
+        def session_removestate_callback_target():
+            if DEBUG:
+                print >>sys.stderr,"Session: session_removestate_callback_target called",currentThread().getName()
+            try:
+                self.sesscb_removestate(infohash,contentdest,removecontent)
+            except:
+                print_exc()
+        self.perform_usercallback(session_removestate_callback_target)
+        
+    def perform_usercallback(self,target):
+        self.sesslock.acquire()
+        try:
+            # TODO: thread pool, etc.
+            self.threadpool.queueTask(target)
+            
+        finally:
+            self.sesslock.release()
+
+    def sesscb_removestate(self,infohash,contentdest,removecontent):
+        """  See DownloadImpl.setup().
+        Called by SessionCallbackThread """
+        if DEBUG:
+            print >>sys.stderr,"Session: sesscb_removestate called",`infohash`,`contentdest`,removecontent
+        self.sesslock.acquire()
+        try:
+            if self.session.lm.download_exists(infohash):
+                print >>sys.stderr,"Session: sesscb_removestate: Download is back, restarted? Canceling removal!",`infohash`
+                return
+            
+            dlpstatedir = os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR)
+        finally:
+            self.sesslock.release()
+
+        # See if torrent uses internal tracker
+        try:
+            self.session.remove_from_internal_tracker_by_infohash(infohash)
+        except:
+            # Show must go on
+            print_exc()
+
+        # Remove checkpoint
+        hexinfohash = binascii.hexlify(infohash)
+        try:
+            basename = hexinfohash+'.pickle'
+            filename = os.path.join(dlpstatedir,basename)
+            if DEBUG:
+                print >>sys.stderr,"Session: sesscb_removestate: removing dlcheckpoint entry",filename
+            if os.access(filename,os.F_OK):
+                os.remove(filename)
+        except:
+            # Show must go on
+            print_exc()
+
+        # Remove downloaded content from disk
+        if removecontent:
+            if DEBUG:
+                print >>sys.stderr,"Session: sesscb_removestate: removing saved content",contentdest
+            if not os.path.isdir(contentdest):
+                # single-file torrent
+                os.remove(contentdest)
+            else:
+                # multi-file torrent
+                shutil.rmtree(contentdest,True) # ignore errors
+
+
+    def notify(self, subject, changeType, obj_id, *args):
+        """
+        Notify all interested observers about an event with threads from the pool
+        """
+        if DEBUG:
+            print >>sys.stderr,"ucb: notify called:",subject,changeType,`obj_id`, args
+        self.notifier.notify(subject,changeType,obj_id,*args)
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/__init__.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/maketorrent.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/maketorrent.py
new file mode 100644 (file)
index 0000000..0084480
--- /dev/null
@@ -0,0 +1,630 @@
+# Written by Arno Bakker, Bram Cohen
+# multitracker extensions by John Hoffman
+# modified for Merkle hashes and digital signatures by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import os
+import md5
+import zlib
+
+from BaseLib.Core.Utilities.Crypto import sha
+from copy import copy
+from time import time
+from traceback import print_exc
+from types import LongType
+
+from BaseLib.Core.BitTornado.bencode import bencode
+from BaseLib.Core.BitTornado.BT1.btformats import check_info
+from BaseLib.Core.Merkle.merkle import MerkleTree
+from BaseLib.Core.Utilities.unicode import str2unicode,bin2unicode
+from BaseLib.Core.APIImplementation.miscutils import parse_playtime_to_secs,offset2piece
+from BaseLib.Core.osutils import fix_filebasename
+from BaseLib.Core.defaults import tdefdictdefaults
+
+
+ignore = [] # Arno: was ['core', 'CVS']
+
+DEBUG = False
+
+def make_torrent_file(input, userabortflag = None, userprogresscallback = lambda x: None):
+    """ Create a torrent file from the supplied input. 
+    
+    Returns a (infohash,metainfo) pair, or (None,None) on userabort. """
+    
+    (info,piece_length) = makeinfo(input,userabortflag,userprogresscallback)
+    if userabortflag is not None and userabortflag.isSet():
+        return (None,None)
+    if info is None:
+        return (None,None)
+
+    #if DEBUG:
+    #    print >>sys.stderr,"mktorrent: makeinfo returned",`info`
+    
+    check_info(info)
+    metainfo = {'info': info, 'encoding': input['encoding'], 'creation date': long(time())}
+
+    # http://www.bittorrent.org/DHT_protocol.html says both announce and nodes
+    # are not allowed, but some torrents (Azureus?) apparently violate this.
+    if input['nodes'] is None and input['announce'] is None:
+        raise ValueError('No tracker set')
+    
+    for key in ['announce','announce-list','nodes','comment','created by','httpseeds', 'url-list']:
+        if input[key] is not None and len(input[key]) > 0:
+            metainfo[key] = input[key]
+            if key == 'comment':
+                metainfo['comment.utf-8'] = uniconvert(input['comment'],'utf-8')
+        
+    # Assuming 1 file, Azureus format no support multi-file torrent with diff
+    # bitrates 
+    bitrate = None
+    for file in input['files']:
+        if file['playtime'] is not None:
+            secs = parse_playtime_to_secs(file['playtime'])
+            bitrate = file['length']/secs
+            break
+        if input.get('bps') is not None:
+            bitrate = input['bps']
+            break
+
+    if bitrate is not None or input['thumb'] is not None:
+        mdict = {}
+        mdict['Publisher'] = 'Tribler'
+        if input['comment'] is None:
+            descr = ''
+        else:
+            descr = input['comment']
+        mdict['Description'] = descr
+
+        if bitrate is not None:
+            mdict['Progressive'] = 1
+            mdict['Speed Bps'] = int(bitrate) # bencode fails for float
+        else:
+            mdict['Progressive'] = 0
+
+        mdict['Title'] = metainfo['info']['name']
+        mdict['Creation Date'] = long(time())
+        # Azureus client source code doesn't tell what this is, so just put in random value from real torrent
+        mdict['Content Hash'] = 'PT3GQCPW4NPT6WRKKT25IQD4MU5HM4UY'
+        mdict['Revision Date'] = long(time())
+        if input['thumb'] is not None:
+            mdict['Thumbnail'] = input['thumb']
+        cdict = {}
+        cdict['Content'] = mdict
+        metainfo['azureus_properties'] = cdict
+
+    if input['torrentsigkeypairfilename'] is not None:
+        from BaseLib.Core.Overlay.permid import create_torrent_signature
+
+        create_torrent_signature(metainfo,input['torrentsigkeypairfilename'])
+
+    if 'url-compat' in input:
+        metainfo['info']['url-compat'] = input['url-compat']
+     
+    # Arno, 2010-03-02:   
+    # Theoretically should go into 'info' field, to get infohash protection
+    # because the video won't play without them. In the future we'll sign
+    # the whole .torrent IMHO so it won't matter. Keeping it out of 'info'
+    # at the moment makes the .tstream files more stable (in case you restart
+    # the live source, and the Ogg header generated contains some date or
+    # what not, we'd need a new .tstream to be distributed to all.
+    #
+    if 'ogg-headers' in input:
+        metainfo['ogg-headers'] = input['ogg-headers']
+
+
+    # Two places where infohash calculated, here and in TorrentDef.
+    # Elsewhere: must use TorrentDef.get_infohash() to allow P2PURLs.
+    
+    infohash = sha(bencode(info)).digest()
+    return (infohash,metainfo)
+
+
+def uniconvertl(l, e):
+    """ Convert a pathlist to a list of strings encoded in encoding "e" using
+    uniconvert. """
+    r = []
+    try:
+        for s in l:
+            r.append(uniconvert(s, e))
+    except UnicodeError:
+        raise UnicodeError('bad filename: '+os.path.join(l))
+    return r
+
+def uniconvert(s, enc):
+    """ Convert 's' to a string containing a Unicode sequence encoded using
+    encoding "enc". If 's' is not a Unicode object, we first try to convert
+    it to one, guessing the encoding if necessary. """
+    if not isinstance(s, unicode):
+        try:
+            s = bin2unicode(s,enc)
+        except UnicodeError:
+            raise UnicodeError('bad filename: '+s)
+    return s.encode(enc)
+
+
+def makeinfo(input,userabortflag,userprogresscallback):
+    """ Calculate hashes and create torrent file's 'info' part """
+    encoding = input['encoding']
+
+    pieces = []
+    sh = sha()
+    done = 0L
+    fs = []
+    totalsize = 0L
+    totalhashed = 0L
+    
+    # 1. Determine which files should go into the torrent (=expand any dirs
+    # specified by user in input['files']
+    subs = []
+    for file in input['files']:
+        inpath = file['inpath']
+        outpath = file['outpath']
+        
+        if DEBUG:
+            print >>sys.stderr,"makeinfo: inpath",inpath,"outpath",outpath
+        
+        if os.path.isdir(inpath):
+            dirsubs = subfiles(inpath)
+            subs.extend(dirsubs)
+        else:
+            if outpath is None:
+                subs.append(([os.path.basename(inpath)],inpath))
+            else:
+                subs.append((filename2pathlist(outpath,skipfirst=True),inpath))
+            
+    subs.sort()
+    
+    # 2. Calc total size
+    newsubs = []
+    for p, f in subs:
+        if 'live' in input:
+            size = input['files'][0]['length']
+        else:
+            size = os.path.getsize(f)
+        totalsize += size
+        newsubs.append((p,f,size))
+    subs = newsubs
+
+    # 3. Calc piece length from totalsize if not set
+    if input['piece length'] == 0:
+        if input['createmerkletorrent']:
+            # used to be 15=32K, but this works better with slow python
+            piece_len_exp = 18 
+        else:
+            if totalsize > 8L*1024*1024*1024:    # > 8 gig =
+                piece_len_exp = 21          #   2 meg pieces
+            elif totalsize > 2*1024*1024*1024:   # > 2 gig =
+                piece_len_exp = 20          #   1 meg pieces
+            elif totalsize > 512*1024*1024:      # > 512M =
+                piece_len_exp = 19          #   512K pieces
+            elif totalsize > 64*1024*1024:       # > 64M =
+                piece_len_exp = 18          #   256K pieces
+            elif totalsize > 16*1024*1024:       # > 16M =
+                piece_len_exp = 17          #   128K pieces
+            elif totalsize > 4*1024*1024:        # > 4M =
+                piece_len_exp = 16          #   64K pieces
+            else:                           # < 4M =
+                piece_len_exp = 15          #   32K pieces
+        piece_length = 2 ** piece_len_exp
+    else:
+        piece_length = input['piece length']
+
+    # 4. Read files and calc hashes, if not live
+    if 'live' not in input:
+        for p, f, size in subs:
+            pos = 0L
+    
+            h = open(f, 'rb')
+    
+            if input['makehash_md5']:
+                hash_md5 = md5.new()
+            if input['makehash_sha1']:
+                hash_sha1 = sha()
+            if input['makehash_crc32']:
+                hash_crc32 = zlib.crc32('')
+            
+            while pos < size:
+                a = min(size - pos, piece_length - done)
+    
+                # See if the user cancelled
+                if userabortflag is not None and userabortflag.isSet():
+                    return (None,None)
+                
+                readpiece = h.read(a)
+    
+                # See if the user cancelled
+                if userabortflag is not None and userabortflag.isSet():
+                    return (None,None)
+                
+                sh.update(readpiece)
+    
+                if input['makehash_md5']:                
+                    # Update MD5
+                    hash_md5.update(readpiece)
+    
+                if input['makehash_crc32']:                
+                    # Update CRC32
+                    hash_crc32 = zlib.crc32(readpiece, hash_crc32)
+                
+                if input['makehash_sha1']:                
+                    # Update SHA1
+                    hash_sha1.update(readpiece)
+                
+                done += a
+                pos += a
+                totalhashed += a
+                
+                if done == piece_length:
+                    pieces.append(sh.digest())
+                    done = 0
+                    sh = sha()
+                    
+                if userprogresscallback is not None:
+                    userprogresscallback(float(totalhashed) / float(totalsize))
+    
+            newdict = {'length': num2num(size),
+                       'path': uniconvertl(p,encoding),
+                       'path.utf-8': uniconvertl(p, 'utf-8') }
+            
+            # Find and add playtime
+            for file in input['files']:
+                if file['inpath'] == f:
+                    if file['playtime'] is not None:
+                        newdict['playtime'] = file['playtime']
+                    break
+            
+            if input['makehash_md5']:
+                newdict['md5sum'] = hash_md5.hexdigest()
+            if input['makehash_crc32']:
+                newdict['crc32'] = "%08X" % hash_crc32
+            if input['makehash_sha1']:
+                newdict['sha1'] = hash_sha1.digest()
+            
+            fs.append(newdict)
+                
+            h.close()
+                
+        if done > 0:
+            pieces.append(sh.digest())
+       
+    # 5. Create info dict         
+    if len(subs) == 1:
+        flkey = 'length'
+        flval = num2num(totalsize)
+        name = subs[0][0][0]
+    else:
+        flkey = 'files'
+        flval = fs
+
+        outpath = input['files'][0]['outpath']
+        l = filename2pathlist(outpath)
+        name = l[0]
+        
+    infodict =  { 'piece length':num2num(piece_length), flkey: flval, 
+            'name': uniconvert(name,encoding),
+            'name.utf-8': uniconvert(name,'utf-8')}
+    
+    if 'live' not in input:
+        
+        if input['createmerkletorrent']:
+            merkletree = MerkleTree(piece_length,totalsize,None,pieces)
+            root_hash = merkletree.get_root_hash()
+            infodict.update( {'root hash': root_hash } )
+        else:
+            infodict.update( {'pieces': ''.join(pieces) } )
+    else:
+        # With source auth, live is a dict
+        infodict['live'] = input['live']
+
+    if 'cs_keys' in input:
+        # This is a closed swarm - add torrent keys
+        infodict['cs_keys'] = input['cs_keys']
+
+    if 'ns-metadata' in input:
+        # This has P2P-Next metadata, store in info field to make it
+        # immutable.
+        infodict['ns-metadata'] = input['ns-metadata']
+
+    if len(subs) == 1:
+        # Find and add playtime
+        for file in input['files']:
+            if file['inpath'] == f:
+                if file['playtime'] is not None:
+                    infodict['playtime'] = file['playtime']
+
+    return (infodict,piece_length)
+
+
+def subfiles(d):
+    """ Return list of (pathlist,local filename) tuples for all the files in
+    directory 'd' """
+    r = []
+    stack = [([], d)]
+    while stack:
+        p, n = stack.pop()
+        if os.path.isdir(n):
+            for s in os.listdir(n):
+                if s not in ignore and s[:1] != '.':
+                    stack.append((copy(p) + [s], os.path.join(n, s)))
+        else:
+            r.append((p, n))
+    return r
+
+
+def filename2pathlist(path,skipfirst=False):
+    """ Convert a filename to a 'path' entry suitable for a multi-file torrent 
+    file """ 
+    #if DEBUG:
+    #    print >>sys.stderr,"mktorrent: filename2pathlist:",path,skipfirst
+    
+    h = path
+    l = []
+    while True:
+        #if DEBUG:
+        #    print >>sys.stderr,"mktorrent: filename2pathlist: splitting",h
+        
+        (h,t) = os.path.split(h)
+        if h == '' and t == '':
+            break
+        if h == '' and skipfirst:
+            continue
+        if t != '': # handle case where path ends in / (=path separator)
+            l.append(t)
+            
+    l.reverse()
+    #if DEBUG:
+    #    print >>sys.stderr,"mktorrent: filename2pathlist: returning",l
+
+    return l
+
+
+def pathlist2filename(pathlist):
+    """ Convert a multi-file torrent file 'path' entry to a filename. """
+    fullpath = ''
+    for elem in pathlist:
+        fullpath = os.path.join(fullpath,elem)
+    return fullpath
+
+def pathlist2savefilename(pathlist,encoding):
+    fullpath = u''
+    for elem in pathlist:
+        u = bin2unicode(elem,encoding)
+        b = fix_filebasename(u)
+        fullpath = os.path.join(fullpath,b)
+    return fullpath
+
+def torrentfilerec2savefilename(filerec,length=None):
+    if length is None:
+        length = len(filerec['path'])
+    if 'path.utf-8' in filerec:
+        key = 'path.utf-8' 
+        encoding = 'utf-8'
+    else:
+        key = 'path'
+        encoding = None
+        
+    return pathlist2savefilename(filerec[key][:length],encoding)
+
+def savefilenames2finaldest(fn1,fn2):
+    """ Returns the join of two savefilenames, possibly shortened
+    to adhere to OS specific limits.
+    """
+    j = os.path.join(fn1,fn2)
+    if sys.platform == 'win32':
+        # Windows has a maximum path length of 260
+        # http://msdn2.microsoft.com/en-us/library/aa365247.aspx
+        j = j[:259] # 260 don't work.
+    return j
+
+
+def num2num(num):
+    """ Converts long to int if small enough to fit """
+    if type(num) == LongType and num < sys.maxint:
+        return int(num)
+    else:
+        return num
+
+def get_torrentfilerec_from_metainfo(filename,metainfo):
+    info = metainfo['info']
+    if filename is None:
+        return info
+
+    if filename is not None and 'files' in info:
+        for i in range(len(info['files'])):
+            x = info['files'][i]
+                
+            intorrentpath = pathlist2filename(x['path'])
+            if intorrentpath == filename:
+                return x
+            
+        raise ValueError("File not found in torrent")
+    else:
+        raise ValueError("File not found in single-file torrent")
+
+def get_bitrate_from_metainfo(file,metainfo):
+    info = metainfo['info']
+    if file is None:
+        bitrate = None
+        try:
+            playtime = None
+            if info.has_key('playtime'):
+                #print >>sys.stderr,"TorrentDef: get_bitrate: Bitrate in info field"
+                playtime = parse_playtime_to_secs(info['playtime'])
+            elif 'playtime' in metainfo: # HACK: encode playtime in non-info part of existing torrent
+                #print >>sys.stderr,"TorrentDef: get_bitrate: Bitrate in metainfo"
+                playtime = parse_playtime_to_secs(metainfo['playtime'])
+            elif 'azureus_properties' in metainfo:
+                azprop = metainfo['azureus_properties']
+                if 'Content' in azprop:
+                    content = metainfo['azureus_properties']['Content']
+                    if 'Speed Bps' in content:
+                        bitrate = float(content['Speed Bps'])
+                        #print >>sys.stderr,"TorrentDef: get_bitrate: Bitrate in Azureus metainfo",bitrate
+            if playtime is not None:
+                bitrate = info['length']/playtime
+                if DEBUG:
+                    print >>sys.stderr,"TorrentDef: get_bitrate: Found bitrate",bitrate
+        except:
+            print_exc()
+
+        return bitrate
+
+    if file is not None and 'files' in info:
+        for i in range(len(info['files'])):
+            x = info['files'][i]
+                
+            intorrentpath = ''
+            for elem in x['path']:
+                intorrentpath = os.path.join(intorrentpath,elem)
+            bitrate = None
+            try:
+                playtime = None
+                if x.has_key('playtime'):
+                    playtime = parse_playtime_to_secs(x['playtime'])
+                elif 'playtime' in metainfo: # HACK: encode playtime in non-info part of existing torrent
+                    playtime = parse_playtime_to_secs(metainfo['playtime'])
+                elif 'azureus_properties' in metainfo:
+                    azprop = metainfo['azureus_properties']
+                    if 'Content' in azprop:
+                        content = metainfo['azureus_properties']['Content']
+                        if 'Speed Bps' in content:
+                            bitrate = float(content['Speed Bps'])
+                            #print >>sys.stderr,"TorrentDef: get_bitrate: Bitrate in Azureus metainfo",bitrate
+                    
+                if playtime is not None:
+                    bitrate = x['length']/playtime
+            except:
+                print_exc()
+                
+            if intorrentpath == file:
+                return bitrate
+            
+        raise ValueError("File not found in torrent")
+    else:
+        raise ValueError("File not found in single-file torrent: "+file)
+
+
+def get_length_filepieceranges_from_metainfo(metainfo,selectedfiles):
+    
+    if 'files' not in metainfo['info']:
+        # single-file torrent
+        return (metainfo['info']['length'],None)
+    else:
+        # multi-file torrent
+        files = metainfo['info']['files']
+        piecesize = metainfo['info']['piece length']
+        
+        total = 0L
+        filepieceranges = []
+        for i in xrange(len(files)):
+            path = files[i]['path']
+            length = files[i]['length']
+            filename = pathlist2filename(path)
+            
+            if length > 0 and (not selectedfiles or (selectedfiles and filename in selectedfiles)):
+                range = (offset2piece(total,piecesize), offset2piece(total + length,piecesize),filename)
+                filepieceranges.append(range)
+            total += length
+        return (total,filepieceranges)
+
+
+def copy_metainfo_to_input(metainfo,input):
+    
+    keys = tdefdictdefaults.keys()
+    # Arno: For magnet link support
+    keys.append("initial peers")
+    for key in keys:
+        if key in metainfo:
+            input[key] = metainfo[key]
+            
+    infokeys = ['name','piece length','live','url-compat']
+    for key in infokeys:
+        if key in metainfo['info']:
+            input[key] = metainfo['info'][key]
+        
+    # Note: don't know inpath, set to outpath
+    if 'length' in metainfo['info']:
+        outpath = metainfo['info']['name']
+        if 'playtime' in metainfo['info']:
+            playtime = metainfo['info']['playtime']
+        else:
+            playtime = None
+        length = metainfo['info']['length'] 
+        d = {'inpath':outpath,'outpath':outpath,'playtime':playtime,'length':length}
+        input['files'].append(d)
+    else: # multi-file torrent
+        files = metainfo['info']['files']
+        for file in files:
+            outpath = pathlist2filename(file['path'])
+            if 'playtime' in file:
+                playtime = file['playtime']
+            else:
+                playtime = None
+            length = file['length'] 
+            d = {'inpath':outpath,'outpath':outpath,'playtime':playtime,'length':length}
+            input['files'].append(d)
+    
+    if 'azureus_properties' in metainfo:
+        azprop = metainfo['azureus_properties']
+        if 'Content' in azprop:
+            content = metainfo['azureus_properties']['Content']
+            if 'Thumbnail' in content:
+                input['thumb'] = content['Thumbnail']
+      
+    if 'live' in metainfo['info']:
+        input['live'] = metainfo['info']['live'] 
+
+    if 'cs_keys' in metainfo['info']:
+        input['cs_keys'] = metainfo['info']['cs_keys']
+
+    if 'url-compat' in metainfo['info']:
+        input['url-compat'] = metainfo['info']['url-compat'] 
+
+    if 'ogg-headers' in metainfo:
+        input['ogg-headers'] = metainfo['ogg-headers']
+
+    if 'ns-metadata' in metainfo['info']:
+        input['ns-metadata'] = metainfo['info']['ns-metadata']
+
+    # Diego : we want web seeding
+    if 'url-list' in metainfo:
+        input['url-list'] = metainfo['url-list'] 
+
+    if 'httpseeds' in metainfo:
+        input['httpseeds'] = metainfo['httpseeds']
+
+
+def get_files(metainfo,exts):
+    # 01/02/10 Boudewijn: now returns (file, length) tuples instead of files
+    
+    videofiles = []
+    if 'files' in metainfo['info']:
+        # Multi-file torrent
+        files = metainfo['info']['files']
+        for file in files:
+            
+            p = file['path']
+            #print >>sys.stderr,"TorrentDef: get_files: file is",p
+            filename = ''
+            for elem in p:
+                #print >>sys.stderr,"TorrentDef: get_files: elem is",elem
+                filename = os.path.join(filename,elem)
+            
+            #print >>sys.stderr,"TorrentDef: get_files: composed filename is",filename    
+            (prefix,ext) = os.path.splitext(filename)
+            if ext != '' and ext[0] == '.':
+                ext = ext[1:]
+            #print >>sys.stderr,"TorrentDef: get_files: ext",ext
+            if exts is None or ext.lower() in exts:
+                videofiles.append((filename, file['length']))
+    else:
+        #print >>sys.stderr,"TorrentDef: get_files: Single-torrent file"
+        
+        filename = metainfo['info']['name'] # don't think we need fixed name here
+        (prefix,ext) = os.path.splitext(filename)
+        if ext != '' and ext[0] == '.':
+            ext = ext[1:]
+        if exts is None or ext.lower() in exts:
+            videofiles.append((filename, metainfo['info']['length']))
+    return videofiles
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/makeurl.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/makeurl.py
new file mode 100644 (file)
index 0000000..34a5ae8
--- /dev/null
@@ -0,0 +1,354 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# TODO: \r
+# * Test suite\r
+# * Tracker support: how do they determine which files to seed.\r
+#\r
+# * Reverse support for URL-compat: URLs that do use infohash.\r
+#   - Make sure internal tracker understands URL-compat torrentfiles\r
+#   - Make sure internal tracker understands P2P URLs\r
+# \r
+# ISSUE: what if trackers have query parts? Is that officially/practically allowed?\r
+\r
+\r
+import sys\r
+import urlparse\r
+import urllib\r
+import math\r
+if sys.platform != "win32":\r
+    import curses.ascii\r
+from types import IntType, LongType\r
+from struct import pack, unpack\r
+from base64 import b64encode, b64decode\r
+from M2Crypto import Rand # TODO REMOVE FOR LICHT\r
+from traceback import print_exc,print_stack\r
+\r
+from BaseLib.Core.simpledefs import *\r
+from BaseLib.Core.Utilities.Crypto import sha\r
+\r
+\r
+DEBUG = False\r
+\r
+\r
+def metainfo2p2purl(metainfo):\r
+    """ metainfo must be a Merkle torrent or a live torrent with an\r
+    'encoding' field set. \r
+    @return URL\r
+    """\r
+    info = metainfo['info']\r
+    \r
+    bitrate = None\r
+    if 'azureus_properties' in metainfo:\r
+        azprops = metainfo['azureus_properties']\r
+        if 'Content' in azprops:\r
+            content = metainfo['azureus_properties']['Content']\r
+            if 'Speed Bps' in content:\r
+                bitrate = content['Speed Bps']\r
+                   \r
+    if 'encoding' not in metainfo:\r
+        encoding = 'utf-8'\r
+    else:\r
+        encoding = metainfo['encoding']\r
+                                                \r
+    urldict = {}\r
+\r
+    urldict['s'] = p2purl_encode_piecelength(info['piece length'])\r
+    # Warning: mbcs encodings sometimes don't work well under python!\r
+    urldict['n'] = p2purl_encode_name2url(info['name'],encoding)\r
+    \r
+    if info.has_key('length'):\r
+        urldict['l'] = p2purl_encode_nnumber(info['length'])\r
+    else:\r
+        raise ValueError("Multi-file torrents currently not supported")\r
+        #list = []\r
+        #for filedict in info['files']:\r
+        #    newdict = {}\r
+        #    newdict['p'] = list_filename_escape(filedict['path'])\r
+        #    newdict['l'] = p2purl_encode_nnumber(filedict['length'])\r
+        #    list.append(newdict)\r
+        #urldict['f'] = '' # TODO bencode(list)\r
+    if info.has_key('root hash'):\r
+        urldict['r'] = b64urlencode(info['root hash'])\r
+    elif info.has_key('live'):\r
+        urldict['k'] = b64urlencode(info['live']['pubkey'])\r
+        urldict['a'] = info['live']['authmethod']\r
+    else:\r
+        raise ValueError("url-compat and Merkle torrent must be on to create URL")\r
+        \r
+    if bitrate is not None:\r
+        urldict['b'] = p2purl_encode_nnumber(bitrate)\r
+        \r
+    query = ''\r
+    for k in ['n','r','k','l','s','a','b']:\r
+        if k in urldict:\r
+            if query != "":\r
+                query += '&'\r
+            v = urldict[k]\r
+            if k == 'n': \r
+                s = v\r
+            else:\r
+                s = k+"="+v\r
+            query += s\r
+        \r
+    sidx = metainfo['announce'].find(":")\r
+    hierpart = metainfo['announce'][sidx+1:]\r
+    url = P2PURL_SCHEME+':'+hierpart+"?"+query\r
+    return url\r
+\r
+\r
+\r
+def p2purl2metainfo(url):\r
+    """ Returns (metainfo,swarmid) """\r
+    \r
+    if DEBUG:\r
+        print >>sys.stderr,"p2purl2metainfo: URL",url\r
+        \r
+    # Python's urlparse only supports a defined set of schemes, if not\r
+    # recognized, everything becomes path. Handy.\r
+    colidx = url.find(":")\r
+    scheme = url[0:colidx]\r
+    qidx = url.find("?")\r
+    if qidx == -1:\r
+        # Compact form, no authority part and path rootless\r
+        authority = None\r
+        path = None\r
+        query = url[colidx+1:]\r
+        fragment = None\r
+    else:\r
+        # Long form, with authority\r
+        authoritypath = url[colidx+3:qidx]\r
+        pidx = authoritypath.find("/")\r
+        authority = authoritypath[0:pidx]\r
+        path = authoritypath[pidx:]\r
+        fidx = url.find("#")\r
+        if fidx == -1:\r
+            # No fragment\r
+            query = url[qidx+1:]\r
+            fragment = None\r
+        else:\r
+            query = url[qidx+1:fidx]\r
+            fragment = url[fidx:]\r
+\r
+        # Check port no.\r
+        csbidx = authority.find("]")\r
+        if authority.startswith("[") and csbidx != -1:\r
+            # Literal IPv6 address\r
+            if csbidx == len(authority)-1:\r
+                port = None\r
+            else:\r
+                port = authority[csbidx+1:]\r
+        else:\r
+            cidx = authority.find(":")\r
+            if cidx != -1:\r
+                port = authority[cidx+1:]\r
+            else:\r
+                port = None\r
+        if port is not None and not port.isdigit():\r
+            raise ValueError("Port not int")\r
+    \r
+    \r
+    if scheme != P2PURL_SCHEME:\r
+        raise ValueError("Unknown scheme "+P2PURL_SCHEME)\r
+\r
+    metainfo = {}\r
+    if authority and path:\r
+        metainfo['announce'] = 'http://'+authority+path\r
+        # Check for malformedness\r
+        result = urlparse.urlparse(metainfo['announce'])\r
+        if result[0] != "http":\r
+            raise ValueError("Malformed tracker URL")\r
+        \r
+        \r
+    reqinfo = p2purl_parse_query(query)\r
+    metainfo.update(reqinfo)\r
+\r
+    swarmid = metainfo2swarmid(metainfo)\r
+\r
+    if DEBUG:\r
+        print >>sys.stderr,"p2purl2metainfo: parsed",`metainfo`\r
+\r
+    \r
+    return (metainfo,swarmid)\r
+\r
+def metainfo2swarmid(metainfo):\r
+    if 'live' in metainfo['info']:\r
+        swarmid = pubkey2swarmid(metainfo['info']['live'])\r
+    else:\r
+        swarmid = metainfo['info']['root hash']\r
+    return swarmid\r
+\r
+\r
+def p2purl_parse_query(query):\r
+    if DEBUG:\r
+        print >>sys.stderr,"p2purl_parse_query: query",query\r
+\r
+    gotname = False\r
+    gotkey = False\r
+    gotrh = False\r
+    gotlen = False\r
+    gotps = False\r
+    gotam = False\r
+    gotbps = False\r
+    \r
+    reqinfo = {}\r
+    reqinfo['info'] = {}\r
+    \r
+    # Hmmm... could have used urlparse.parse_qs\r
+    kvs = query.split('&')\r
+    for kv in kvs:\r
+        if '=' not in kv:\r
+            # Must be name\r
+            reqinfo['info']['name'] = p2purl_decode_name2utf8(kv)\r
+            reqinfo['encoding'] = 'UTF-8'\r
+            gotname = True\r
+            continue\r
+        \r
+        k,v = kv.split('=')\r
+        \r
+        if k =='k' or k == 'a' and not ('live' in reqinfo['info']):\r
+            reqinfo['info']['live'] = {}\r
+        \r
+        if k == 'n':\r
+            reqinfo['info']['name'] = p2purl_decode_name2utf8(v)\r
+            reqinfo['encoding'] = 'UTF-8'\r
+            gotname = True\r
+        elif k == 'r':\r
+            reqinfo['info']['root hash'] = p2purl_decode_base64url(v)\r
+            gotrh = True\r
+        elif k == 'k':\r
+            reqinfo['info']['live']['pubkey'] = p2purl_decode_base64url(v)\r
+            # reqinfo['info']['live']['authmethod'] = pubkey2authmethod(reqinfo['info']['live']['pubkey'])\r
+            gotkey = True\r
+        elif k == 'l':\r
+            reqinfo['info']['length'] = p2purl_decode_nnumber(v)\r
+            gotlen = True\r
+        elif k == 's':\r
+            reqinfo['info']['piece length'] = p2purl_decode_piecelength(v)\r
+            gotps = True\r
+        elif k == 'a':\r
+            reqinfo['info']['live']['authmethod'] = v\r
+            gotam = True\r
+        elif k == 'b':\r
+            bitrate = p2purl_decode_nnumber(v)\r
+            reqinfo['azureus_properties'] = {}\r
+            reqinfo['azureus_properties']['Content'] = {}\r
+            reqinfo['azureus_properties']['Content']['Speed Bps'] = bitrate\r
+            gotbps = True\r
+            \r
+    if not gotname:\r
+        raise ValueError("Missing name field")\r
+    if not gotrh and not gotkey:\r
+        raise ValueError("Missing root hash or live pub key field")\r
+    if gotrh and gotkey:\r
+        raise ValueError("Found both root hash and live pub key field")\r
+    if not gotlen:\r
+        raise ValueError("Missing length field")\r
+    if not gotps:\r
+        raise ValueError("Missing piece size field")\r
+    if gotkey and not gotam:\r
+        raise ValueError("Missing live authentication method field")\r
+    if gotrh and gotam:\r
+        raise ValueError("Inconsistent: root hash and live authentication method field")\r
+\r
+    if not gotbps:\r
+        raise ValueError("Missing bitrate field")\r
+\r
+    return reqinfo\r
+            \r
+\r
+def pubkey2swarmid(livedict):\r
+    """ Calculate SHA1 of pubkey (or cert). \r
+    Make X.509 Subject Key Identifier compatible? \r
+    """\r
+    if DEBUG:\r
+        print >>sys.stderr,"pubkey2swarmid:",livedict.keys()\r
+    \r
+    if livedict['authmethod'] == "None":\r
+        # No live-source auth\r
+        return Rand.rand_bytes(20)\r
+    else:\r
+        return sha(livedict['pubkey']).digest()\r
+\r
+\r
+def p2purl_decode_name2utf8(v):\r
+    """ URL decode name to UTF-8 encoding """\r
+    if sys.platform != "win32":\r
+        for c in v:\r
+            if not curses.ascii.isascii(c):\r
+                raise ValueError("Name contains unescaped 8-bit value "+`c`)\r
+    return urllib.unquote_plus(v)\r
+\r
+def p2purl_encode_name2url(name,encoding):\r
+    """ Encode name in specified encoding to URL escaped UTF-8 """\r
+    \r
+    if encoding.lower() == 'utf-8':\r
+        utf8name = name\r
+    else:\r
+        uname = unicode(name, encoding)\r
+        utf8name = uname.encode('utf-8')\r
+    return urllib.quote_plus(utf8name)\r
+\r
+\r
+\r
+def p2purl_decode_base64url(v):\r
+    return b64urldecode(v)\r
+\r
+#\r
+# Convert Python number to binary value of sufficient bytes, \r
+# in network-byte order and BASE64-URL encode that binary value, or vice versa.\r
+#\r
+def p2purl_decode_nnumber(s):\r
+    b = b64urldecode(s)\r
+    if len(b) == 2:\r
+       format = "H"\r
+    elif len(b) == 4:\r
+       format = "l"\r
+    else:\r
+       format = "Q"\r
+    format = "!"+format # network-byte order       \r
+    return unpack(format,b)[0]\r
+\r
+def p2purl_encode_nnumber(s):\r
+    if type(s) == IntType:\r
+        if s < 2 ** 16:\r
+           format = "H"\r
+        elif s < 2 ** 32:\r
+           format = "l"\r
+    else:\r
+        format = "Q"\r
+    format = "!"+format # network-byte order\r
+    return b64urlencode(pack(format,s))\r
+\r
+\r
+#\r
+# Convert Python power-of-two piecelength to text value, or vice versa.\r
+#\r
+def p2purl_decode_piecelength(s):\r
+    return int(math.pow(2.0,float(s)))\r
+\r
+def p2purl_encode_piecelength(s):\r
+    return str(int(math.log(float(s),2.0)))\r
+\r
+#\r
+# "Modified BASE64 for URL" as informally specified in\r
+# http://en.wikipedia.org/wiki/Base64#URL_applications\r
+#\r
+def b64urlencode(input):\r
+    output = b64encode(input)\r
+    output = output.rstrip('=')\r
+    output = output.replace('+','-')\r
+    output = output.replace('/','_')\r
+    return output\r
+    \r
+def b64urldecode(input):\r
+    inter = input[:]\r
+    # readd padding.\r
+    padlen = 4 - (len(inter) - ((len(inter) / 4) * 4))\r
+    padstr = '=' * padlen\r
+    inter += padstr\r
+    inter = inter.replace('-','+')\r
+    inter = inter.replace('_','/')\r
+    output = b64decode(inter)\r
+    return output\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Core/APIImplementation/miscutils.py b/instrumentation/next-share/BaseLib/Core/APIImplementation/miscutils.py
new file mode 100644 (file)
index 0000000..40d3f37
--- /dev/null
@@ -0,0 +1,43 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import re
+from threading import Timer
+
+DEBUG = False
+
+def parse_playtime_to_secs(hhmmss):
+    if DEBUG:
+        print >>sys.stderr,"miscutils: Playtime is",hhmmss
+    r = re.compile("([0-9\.]+):*")
+    occ = r.findall(hhmmss)
+    t = None
+    if len(occ) > 0:
+        if len(occ) == 3:
+            # hours as well
+            t = int(occ[0])*3600 + int(occ[1])*60 + float(occ[2])
+        elif len(occ) == 2:
+            # minutes and seconds
+            t = int(occ[0])*60 + float(occ[1])
+        elif len(occ) == 1:
+            # seconds
+            t = float(occ[0])
+    # Arno, 2010-07-05: Bencode doesn't support floats
+    return int(t)
+    
+
+def offset2piece(offset,piecesize):
+    
+    p = offset / piecesize 
+    if offset % piecesize > 0:
+        p += 1
+    return p
+
+
+    
+def NamedTimer(*args,**kwargs):
+    t = Timer(*args,**kwargs)
+    t.setDaemon(True)
+    t.setName("NamedTimer"+t.getName())
+    return t
diff --git a/instrumentation/next-share/BaseLib/Core/Base.py b/instrumentation/next-share/BaseLib/Core/Base.py
new file mode 100644 (file)
index 0000000..5a1ac93
--- /dev/null
@@ -0,0 +1,30 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+""" Base classes for the Core API """
+
+from BaseLib.Core.exceptions import *
+
+DEBUG = False
+
+#
+# Tribler API base classes
+#
+class Serializable:
+    """
+    Interface to signal that the object is pickleable.
+    """
+    def __init__(self):
+        pass
+
+class Copyable:
+    """
+    Interface for copying an instance (or rather signaling that it can be 
+    copied) 
+    """
+    def copy(self):
+        """
+        Copies the instance.
+        @param self     an unbound instance of the class
+        @return Returns a copy of "self"
+        """
+        raise NotYetImplementedException()
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Choker.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Choker.py
new file mode 100644 (file)
index 0000000..515ea7b
--- /dev/null
@@ -0,0 +1,269 @@
+# Written by Bram Cohen, Pawel Garbacki, Boxun Zhang
+# see LICENSE.txt for license information
+
+from random import randrange, shuffle
+import sys
+
+from BaseLib.Core.BitTornado.clock import clock
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+class Choker:
+    def __init__(self, config, schedule, picker, seeding_selector, done = lambda: False):
+        self.config = config
+        self.round_robin_period = config['round_robin_period']
+        self.schedule = schedule
+        self.picker = picker
+        self.connections = []
+        self.last_preferred = 0
+        self.last_round_robin = clock()
+        self.done = done
+        self.super_seed = False
+        self.paused = False
+        schedule(self._round_robin, 5)
+        
+        # SelectiveSeeding
+        self.seeding_manager = None
+
+        
+    def set_round_robin_period(self, x):
+        self.round_robin_period = x
+
+    def _round_robin(self):
+        self.schedule(self._round_robin, 5)
+        if self.super_seed:
+            cons = range(len(self.connections))
+            to_close = []
+            count = self.config['min_uploads']-self.last_preferred
+            if count > 0:   # optimization
+                shuffle(cons)
+            for c in cons:
+                # SelectiveSeeding
+                if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c):
+
+                    i = self.picker.next_have(self.connections[c], count > 0)
+                    if i is None:
+                        continue
+                    if i < 0:
+                        to_close.append(self.connections[c])
+                        continue
+                    self.connections[c].send_have(i)
+                    count -= 1
+                else:
+                    # Drop non-eligible connections 
+                    to_close.append(self.connections[c])
+            for c in to_close:
+                c.close()
+        if self.last_round_robin + self.round_robin_period < clock():
+            self.last_round_robin = clock()
+            for i in xrange(1, len(self.connections)):
+                c = self.connections[i]
+                
+                # SelectiveSeeding
+                if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c):
+                    u = c.get_upload()
+                    if u.is_choked() and u.is_interested():
+                        self.connections = self.connections[i:] + self.connections[:i]
+                        break
+        self._rechoke()
+
+    def _rechoke(self):
+        # 2fast
+        helper = self.picker.helper
+        if helper is not None and helper.coordinator is None and helper.is_complete():
+            for c in self.connections:
+                if not c.connection.is_coordinator_con():
+                    u = c.get_upload()
+                    u.choke()
+            return
+
+        if self.paused:
+            for c in self.connections:
+                c.get_upload().choke()
+            return
+
+        # NETWORK AWARE
+        if 'unchoke_bias_for_internal' in self.config:
+            checkinternalbias = self.config['unchoke_bias_for_internal']
+        else:
+            checkinternalbias = 0
+
+        if DEBUG:
+            print >>sys.stderr,"choker: _rechoke: checkinternalbias",checkinternalbias
+            
+        # 0. Construct candidate list
+        preferred = []
+        maxuploads = self.config['max_uploads']
+        if maxuploads > 1:
+            
+            # 1. Get some regular candidates
+            for c in self.connections:
+
+                # g2g: unchoke some g2g peers later
+                if c.use_g2g:
+                    continue
+
+                # SelectiveSeeding
+                if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c):
+                    u = c.get_upload()
+                    if not u.is_interested():
+                        continue
+                    if self.done():
+                        r = u.get_rate()
+                    else:
+                        d = c.get_download()
+                        r = d.get_rate()
+                        if r < 1000 or d.is_snubbed():
+                            continue
+                       
+                    # NETWORK AWARENESS 
+                    if checkinternalbias and c.na_get_address_distance() == 0:
+                        r += checkinternalbias
+                        if DEBUG:
+                            print >>sys.stderr,"choker: _rechoke: BIASING",c.get_ip(),c.get_port()
+
+                    preferred.append((-r, c))
+                    
+            self.last_preferred = len(preferred)
+            preferred.sort()
+            del preferred[maxuploads-1:]
+            if DEBUG:
+                print >>sys.stderr,"choker: _rechoke: NORMAL UNCHOKE",preferred
+            preferred = [x[1] for x in preferred]
+
+            # 2. Get some g2g candidates 
+            g2g_preferred = []
+            for c in self.connections:
+                if not c.use_g2g:
+                    continue
+
+                # SelectiveSeeding
+                if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c):
+
+                    u = c.get_upload()
+                    if not u.is_interested():
+                        continue
+    
+                    r = c.g2g_score()
+                    if checkinternalbias and c.na_get_address_distance() == 0:
+                        r[0] += checkinternalbias
+                        r[1] += checkinternalbias
+                        if DEBUG:
+                            print >>sys.stderr,"choker: _rechoke: G2G BIASING",c.get_ip(),c.get_port()
+                   
+                    g2g_preferred.append((-r[0], -r[1], c))
+                    
+            g2g_preferred.sort()
+            del g2g_preferred[maxuploads-1:]
+            if DEBUG:
+                print  >>sys.stderr,"choker: _rechoke: G2G UNCHOKE",g2g_preferred
+            g2g_preferred = [x[2] for x in g2g_preferred]
+
+            preferred += g2g_preferred
+
+
+        # 
+        count = len(preferred)
+        hit = False
+        to_unchoke = []
+        
+        # 3. The live source must always unchoke its auxiliary seeders
+        # LIVESOURCE
+        if 'live_aux_seeders' in self.config:
+            
+            for hostport in self.config['live_aux_seeders']:
+                for c in self.connections:
+                    if c.get_ip() == hostport[0]:
+                        u = c.get_upload()
+                        to_unchoke.append(u)
+                        #print >>sys.stderr,"Choker: _rechoke: LIVE: Permanently unchoking aux seed",hostport
+
+        # 4. Select from candidate lists, aux seeders always selected
+        for c in self.connections:
+            u = c.get_upload()
+            if c in preferred:
+                to_unchoke.append(u)
+            else:
+                if count < maxuploads or not hit:
+                    if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c):
+                        to_unchoke.append(u)
+                        if u.is_interested():
+                            count += 1
+                            if DEBUG and not hit: print  >>sys.stderr,"choker: OPTIMISTIC UNCHOKE",c
+                            hit = True
+                        
+                else:
+                    if not c.connection.is_coordinator_con() and not c.connection.is_helper_con():
+                        u.choke()
+                    elif u.is_choked():
+                        to_unchoke.append(u)
+
+        # 5. Unchoke selected candidates
+        for u in to_unchoke:
+            u.unchoke()
+
+
+    def add_connection(self, connection, p = None):
+        """
+        Just add a connection, do not start doing anything yet
+        Must call "start_connection" later!
+        """
+        print >>sys.stderr, "Added connection",connection
+        if p is None:
+            p = randrange(-2, len(self.connections) + 1)
+        connection.get_upload().choke()
+        self.connections.insert(max(p, 0), connection)
+        self.picker.got_peer(connection)
+        self._rechoke()
+        
+    def start_connection(self, connection):
+        connection.get_upload().unchoke()
+    
+    def connection_made(self, connection, p = None):
+        if p is None:
+            p = randrange(-2, len(self.connections) + 1)
+        self.connections.insert(max(p, 0), connection)
+        self.picker.got_peer(connection)
+        self._rechoke()
+
+    def connection_lost(self, connection): 
+        """ connection is a Connecter.Connection """
+        # Raynor Vliegendhart, RePEX:
+        # The RePEX code can close a connection right after the handshake 
+        # but before the Choker has been informed via connection_made. 
+        # However, Choker.connection_lost is still called when a connection
+        # is closed, so we should check whether Choker knows the connection:
+        if connection in self.connections:
+            self.connections.remove(connection)
+            self.picker.lost_peer(connection)
+            if connection.get_upload().is_interested() and not connection.get_upload().is_choked():
+                self._rechoke()
+
+    def interested(self, connection):
+        if not connection.get_upload().is_choked():
+            self._rechoke()
+
+    def not_interested(self, connection):
+        if not connection.get_upload().is_choked():
+            self._rechoke()
+
+    def set_super_seed(self):
+        while self.connections:             # close all connections
+            self.connections[0].close()
+        self.picker.set_superseed()
+        self.super_seed = True
+
+    def pause(self, flag):
+        self.paused = flag
+        self._rechoke()
+    
+    # SelectiveSeeding
+    def set_seeding_manager(self, manager):
+        # When seeding starts, a non-trivial seeding manager will be set
+        self.seeding_manager = manager
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Connecter.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Connecter.py
new file mode 100644 (file)
index 0000000..e24929c
--- /dev/null
@@ -0,0 +1,1698 @@
+# Written by Bram Cohen, Pawel Garbacki, Arno Bakker and Njaal Borch, George Milescu
+# see LICENSE.txt for license information
+
+import time
+import sys
+from types import DictType,IntType,LongType,ListType,StringType
+from random import shuffle
+from traceback import print_exc,print_stack
+from math import ceil
+import socket
+import urlparse
+
+from threading import Event # Wait for CS to complete
+
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from BaseLib.Core.BitTornado.clock import clock
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.__init__ import version_short,decodePeerID,TRIBLER_PEERID_LETTER
+from BaseLib.Core.BitTornado.BT1.convert import tobinary,toint
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.DecentralizedTracking.MagnetLink.__init__ import *
+
+from BaseLib.Core.DecentralizedTracking.ut_pex import *
+from BaseLib.Core.BitTornado.BT1.track import compact_ip,decompact_ip
+
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.ClosedSwarm import ClosedSwarm
+from BaseLib.Core.Statistics.Status import Status
+
+KICK_OLD_CLIENTS=False
+
+DEBUG = False
+DEBUG_NORMAL_MSGS = False
+DEBUG_UT_PEX = False
+DEBUG_MESSAGE_HANDLING = False
+DEBUG_CS = False # Debug closed swarms
+
+UNAUTH_PERMID_PERIOD = 3600
+
+# allow FACTOR times the metadata to be uploaded each PERIOD.
+# Example:
+# FACTOR = 2 and PERIOD = 60 will allow all the metadata to be
+# uploaded 2 times every 60 seconds.
+UT_METADATA_FLOOD_FACTOR = 1            
+UT_METADATA_FLOOD_PERIOD = 5 * 60 * 60  
+
+"""
+Arno: 2007-02-16:
+uTorrent and Bram's BitTorrent now support an extension to the protocol,
+documented on http://www.bittorrent.org/beps/bep_0010.html (previously
+http://www.rasterbar.com/products/libtorrent/extension_protocol.html)
+
+The problem is that the bit they use in the options field of the BT handshake
+is the same as we use to indicate a peer supports the overlay-swarm connection.
+The new clients will send an EXTEND message with ID 20 after the handshake to
+inform the otherside what new messages it supports.
+
+As a result, Tribler <= 3.5.0 clients won't be confused, but can't talk to these 
+new clients either or vice versa. The new client will think we understand the 
+message, send it. But because we don't know that message ID, we will close 
+the connection. Our attempts to establish a new overlay connection with the new
+client will gracefully fail, as the new client will not know of infohash=00000...
+and close the connection.
+
+We solve this conflict by adding support for the EXTEND message. We are now be 
+able to receive it, and send our own. Our message will contain one method name, 
+i.e. Tr_OVERLAYSWARM=253. Processing is now as follows:
+
+* If bit 43 is set and the peerID is from an old Tribler (<=3.5.0)
+  peer, we initiate an overlay-swarm connection.
+* If bit 43 is set and the peer's EXTEND hs message contains method Tr_OVERLAYSWARM,
+  it's a new Tribler peer, and we initiate an overlay-swarm connection.
+* If bit 43 is set, and the EXTEND hs message does not contain Tr_OVERLAYSWARM
+  it's not a Tribler client and we do not initiate an overlay-swarm
+  connection.
+
+N.B. The EXTEND message is poorly designed, it lacks protocol versioning
+support which is present in the Azureus Extended Messaging Protocol
+and our overlay-swarm protocol.
+
+"""
+EXTEND_MSG_HANDSHAKE_ID = chr(0)
+EXTEND_MSG_OVERLAYSWARM = 'Tr_OVERLAYSWARM'
+EXTEND_MSG_G2G_V1       = 'Tr_G2G'
+EXTEND_MSG_G2G_V2       = 'Tr_G2G_v2'
+EXTEND_MSG_HASHPIECE    = 'Tr_hashpiece'
+EXTEND_MSG_CS           = 'NS_CS'
+
+CURRENT_LIVE_VERSION=1
+EXTEND_MSG_LIVE_PREFIX  = 'Tr_LIVE_v'
+LIVE_FAKE_MESSAGE_ID    = chr(254)
+
+
+
+G2G_CALLBACK_INTERVAL = 4
+
+def show(s):
+    text = []
+    for i in xrange(len(s)): 
+        text.append(ord(s[i]))
+    return text
+
+    
+class Connection:
+    def __init__(self, connection, connecter):
+        self.connection = connection    
+        self.connecter = connecter
+        self.got_anything = False
+        self.next_upload = None
+        self.outqueue = []
+        self.partial_message = None
+        self.download = None
+        self.upload = None
+        self.send_choke_queued = False
+        self.just_unchoked = None
+        self.unauth_permid = None
+        self.looked_for_permid = UNAUTH_PERMID_PERIOD-3
+        self.closed = False
+        self.extend_hs_dict = {}        # what extended messages does this peer support
+        self.initiated_overlay = False
+
+        # G2G
+        self.use_g2g = False # set to true if both sides use G2G, indicated by self.connector.use_g2g
+        self.g2g_version = None 
+        self.perc_sent = {}
+        # batch G2G_XFER information and periodically send it out.
+        self.last_perc_sent = {}
+
+        config = self.connecter.config
+        self.forward_speeds = [Measure(config['max_rate_period'], config['upload_rate_fudge']),
+                               Measure(config['max_rate_period'], config['upload_rate_fudge'])]
+        
+        # BarterCast counters
+        self.total_downloaded = 0
+        self.total_uploaded = 0
+        
+        self.ut_pex_first_flag = True # first time we sent a ut_pex to this peer?
+        self.na_candidate_ext_ip = None
+        
+        self.na_candidate_ext_ip = None
+        
+        # RePEX counters and repexer instance field
+        self.pex_received = 0 # number of PEX messages received
+
+        # Closed swarm stuff
+        # Closed swarms
+        self.is_closed_swarm = False
+        self.cs_complete = False # Arno, 2010-08-24; no need for thread safety
+        self.remote_is_authenticated = False
+        self.remote_supports_cs = False
+        status = Status.get_status_holder("LivingLab")
+        self.cs_status = status.create_event("CS_protocol")
+        # This is a total for all
+        self.cs_status_unauth_requests = status.get_or_create_status_element("unauthorized_requests", 0)
+        self.cs_status_supported = status.get_or_create_status_element("nodes_supporting_cs", 0)
+        self.cs_status_not_supported = status.get_or_create_status_element("nodes_not_supporting_cs", 0)
+
+        if not self.connecter.is_closed_swarm:
+            self.cs_complete = True # Don't block anything if we're not a CS
+
+        if self.connecter.is_closed_swarm:
+            if DEBUG_CS:
+                print >>sys.stderr,"connecter: conn: CS: This is a closed swarm"
+            self.is_closed_swarm = True
+            if 'poa' in self.connecter.config:
+                try:
+                    from base64 import decodestring
+                    #poa = self.connecter.config.get_poa()
+                    poa = ClosedSwarm.POA.deserialize(decodestring(self.connecter.config['poa']))
+                    #poa = self.connecter.config['poa']
+                except Exception,e:
+                    print_exc()
+                    poa = None
+            else:
+                print >>sys.stderr,"connecter: conn: CS: Missing POA"
+                poa = None
+        
+            # Need to also get the rest of the info, like my keys
+            # and my POA
+            my_keypair = ClosedSwarm.read_cs_keypair(self.connecter.config['eckeypairfilename'])
+            self.closed_swarm_protocol = ClosedSwarm.ClosedSwarm(my_keypair,
+                                                                 self.connecter.infohash,
+                                                                 self.connecter.config['cs_keys'],
+                                                                 poa)
+            if DEBUG_CS:                                                                 
+                print >>sys.stderr,"connecter: conn: CS: Closed swarm ready to start handshake"
+
+
+    def get_myip(self, real=False):
+        return self.connection.get_myip(real)
+    
+    def get_myport(self, real=False):
+        return self.connection.get_myport(real)
+        
+    def get_ip(self, real=False):
+        return self.connection.get_ip(real)
+
+    def get_port(self, real=False):
+        return self.connection.get_port(real)
+
+    def get_id(self):
+        return self.connection.get_id()
+
+    def get_readable_id(self):
+        return self.connection.get_readable_id()
+
+    def can_send_to(self):
+        if self.is_closed_swarm and not self.remote_is_authenticated:
+            return False
+        return True
+
+    def close(self):
+        if DEBUG:
+            if self.get_ip() == self.connecter.tracker_ip:
+                print >>sys.stderr,"connecter: close: live: WAAH closing SOURCE"
+
+        self.connection.close()
+        self.closed = True
+
+        
+    def is_closed(self):
+        return self.closed
+
+    def is_locally_initiated(self):
+        return self.connection.is_locally_initiated()
+
+    def send_interested(self):
+        self._send_message(INTERESTED)
+
+    def send_not_interested(self):
+        self._send_message(NOT_INTERESTED)
+
+    def send_choke(self):
+        if self.partial_message:
+            self.send_choke_queued = True
+        else:
+            self._send_message(CHOKE)
+            self.upload.choke_sent()
+            self.just_unchoked = 0
+
+    def send_unchoke(self):
+        if not self.cs_complete:
+            if DEBUG_CS:
+                print >> sys.stderr, 'Connection: send_unchoke: Not sending UNCHOKE, closed swarm handshanke not done'
+            return False
+
+        if self.send_choke_queued:
+            self.send_choke_queued = False
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,'Connection: send_unchoke: CHOKE SUPPRESSED'
+        else:
+            self._send_message(UNCHOKE)
+            if (self.partial_message or self.just_unchoked is None
+                or not self.upload.interested or self.download.active_requests):
+                self.just_unchoked = 0
+            else:
+                self.just_unchoked = clock()
+        return True
+
+    def send_request(self, index, begin, length):
+        self._send_message(REQUEST + tobinary(index) + 
+            tobinary(begin) + tobinary(length))
+        if DEBUG_NORMAL_MSGS:
+            print >>sys.stderr,"sending REQUEST to",self.get_ip()
+            print >>sys.stderr,'sent request: '+str(index)+': '+str(begin)+'-'+str(begin+length)
+
+    def send_cancel(self, index, begin, length):
+        self._send_message(CANCEL + tobinary(index) + 
+            tobinary(begin) + tobinary(length))
+        if DEBUG_NORMAL_MSGS:
+            print >>sys.stderr,'sent cancel: '+str(index)+': '+str(begin)+'-'+str(begin+length)
+
+    def send_bitfield(self, bitfield):
+        if not self.cs_complete:
+            print >> sys.stderr, "Connection: send_bitfield: Not sending bitfield - CS handshake not done"
+            return 
+
+        if self.can_send_to():
+            self._send_message(BITFIELD + bitfield)
+        else:
+            self.cs_status_unauth_requests.inc()
+            print >>sys.stderr,"Connection: send_bitfield: Sending empty bitfield to unauth node"
+            self._send_message(BITFIELD + Bitfield(self.connecter.numpieces).tostring())
+
+
+    def send_have(self, index):
+        if self.can_send_to():
+            self._send_message(HAVE + tobinary(index))
+        #elif DEBUG_CS:
+        #    print >>sys.stderr,"Supressing HAVE messages"
+
+    def send_keepalive(self):
+        self._send_message('')
+
+    def _send_message(self, s):
+        s = tobinary(len(s))+s
+        if self.partial_message:
+            self.outqueue.append(s)
+        else:
+            self.connection.send_message_raw(s)
+
+    def send_partial(self, bytes):
+        if self.connection.closed:
+            return 0
+        if not self.can_send_to():
+            return 0
+        if self.partial_message is None:
+            s = self.upload.get_upload_chunk()
+            if s is None:
+                return 0
+            # Merkle: send hashlist along with piece in HASHPIECE message
+            index, begin, hashlist, piece = s
+
+            if self.use_g2g:
+                # ----- G2G: record who we send this to
+                self.g2g_sent_piece_part( self, index, begin, hashlist, piece )
+
+                # ---- G2G: we are uploading len(piece) data of piece #index
+                for c in self.connecter.connections.itervalues():
+                    if not c.use_g2g:
+                        continue
+
+                    # include sending to self, because it should not be excluded from the statistics
+
+                    c.queue_g2g_piece_xfer( index, begin, piece )
+
+            if self.connecter.merkle_torrent:
+                hashpiece_msg_id = self.his_extend_msg_name_to_id(EXTEND_MSG_HASHPIECE)
+                bhashlist = bencode(hashlist)
+                if hashpiece_msg_id is None:
+                    # old Tribler <= 4.5.2 style
+                    self.partial_message = ''.join((
+                                    tobinary(1+4+4+4+len(bhashlist)+len(piece)), HASHPIECE,
+                                    tobinary(index), tobinary(begin), tobinary(len(bhashlist)), bhashlist, piece.tostring() ))
+                else:
+                    # Merkle BEP
+                    self.partial_message = ''.join((
+                                    tobinary(2+4+4+4+len(bhashlist)+len(piece)), EXTEND, hashpiece_msg_id,
+                                    tobinary(index), tobinary(begin), tobinary(len(bhashlist)), bhashlist, piece.tostring() ))
+                    
+            else:
+                self.partial_message = ''.join((
+                            tobinary(len(piece) + 9), PIECE, 
+                            tobinary(index), tobinary(begin), piece.tostring()))
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,'sending chunk: '+str(index)+': '+str(begin)+'-'+str(begin+len(piece))
+
+        if bytes < len(self.partial_message):
+            self.connection.send_message_raw(self.partial_message[:bytes])
+            self.partial_message = self.partial_message[bytes:]
+            return bytes
+
+        q = [self.partial_message]
+        self.partial_message = None
+        if self.send_choke_queued:
+            self.send_choke_queued = False
+            self.outqueue.append(tobinary(1)+CHOKE)
+            self.upload.choke_sent()
+            self.just_unchoked = 0
+        q.extend(self.outqueue)
+        self.outqueue = []
+        q = ''.join(q)
+        self.connection.send_message_raw(q)
+        return len(q)
+
+    def get_upload(self):
+        return self.upload
+
+    def get_download(self):
+        return self.download
+
+    def set_download(self, download):
+        self.download = download
+
+    def backlogged(self):
+        return not self.connection.is_flushed()
+
+    def got_request(self, i, p, l):
+        self.upload.got_request(i, p, l)
+        if self.just_unchoked:
+            self.connecter.ratelimiter.ping(clock() - self.just_unchoked)
+            self.just_unchoked = 0
+
+    #
+    # Extension protocol support
+    #
+    def supports_extend_msg(self,msg_name):
+        if 'm' in self.extend_hs_dict:
+            return msg_name in self.extend_hs_dict['m']
+        else:
+            return False
+    
+    def got_extend_handshake(self,d):
+        if DEBUG:
+            print >>sys.stderr,"connecter: Got EXTEND handshake:",d
+        if 'm' in d:
+            if type(d['m']) != DictType:
+                raise ValueError('Key m does not map to a dict')
+            m = d['m']
+            newm = {}
+            for key,val in m.iteritems():
+                if type(val) != IntType:
+                    # Fix for BitTorrent 4.27.2e
+                    if type(val) == StringType:
+                        newm[key]= ord(val)
+                        continue
+                    else:
+                        raise ValueError('Message ID in m-dict not int')
+                newm[key]= val
+
+            if not 'm' in self.extend_hs_dict:
+                self.extend_hs_dict['m'] = {}
+            # Note: we store the dict without converting the msg IDs to bytes.
+            self.extend_hs_dict['m'].update(newm)
+            if self.connecter.overlay_enabled and EXTEND_MSG_OVERLAYSWARM in self.extend_hs_dict['m']:
+                # This peer understands our overlay swarm extension
+                if self.connection.locally_initiated:
+                    if DEBUG:
+                        print >>sys.stderr,"connecter: Peer supports Tr_OVERLAYSWARM, attempt connection"
+                    self.connect_overlay()
+                    
+            if EXTEND_MSG_CS in self.extend_hs_dict['m']:
+                self.remote_supports_cs = True
+                self.cs_status_supported.inc()
+                if DEBUG_CS:
+                    print >>sys.stderr,"connecter: Peer supports Closed swarms"
+
+                if self.is_closed_swarm and self.connection.locally_initiated:
+                    if DEBUG_CS:
+                        print >>sys.stderr,"connecter: Initiating Closed swarm handshake"
+                    self.start_cs_handshake()
+            else:
+                self.remote_supports_cs = False
+                self.cs_status_not_supported.inc()
+                if DEBUG_CS:
+                    print >>sys.stderr,"connecter: conn: Remote node does not support CS, flagging CS as done"
+                self.connecter.cs_handshake_completed()
+                status = Status.get_status_holder("LivingLab")
+                status.add_event(self.cs_status)
+                self.cs_status = status.create_event("CS_protocol")
+                
+                
+            if self.connecter.use_g2g and (EXTEND_MSG_G2G_V1 in self.extend_hs_dict['m'] or EXTEND_MSG_G2G_V2 in self.extend_hs_dict['m']):
+                # Both us and the peer want to use G2G
+                if self.connection.locally_initiated:
+                    if DEBUG:
+                        print >>sys.stderr,"connecter: Peer supports Tr_G2G"
+
+                self.use_g2g = True
+                if EXTEND_MSG_G2G_V2 in self.extend_hs_dict['m']:
+                    self.g2g_version = EXTEND_MSG_G2G_V2
+                else:
+                    self.g2g_version = EXTEND_MSG_G2G_V1
+            
+            # LIVEHACK
+            if KICK_OLD_CLIENTS:
+                peerhaslivekey = False
+                for key in self.extend_hs_dict['m']:
+                    if key.startswith(EXTEND_MSG_LIVE_PREFIX):
+                        peerhaslivekey = True
+                        livever = int(key[len(EXTEND_MSG_LIVE_PREFIX):])
+                        if livever < CURRENT_LIVE_VERSION:
+                            raise ValueError("Too old LIVE VERSION "+livever)
+                        else:
+                            print >>sys.stderr,"Connecter: live: Keeping connection to up-to-date peer v",livever,self.get_ip()
+                        
+                if not peerhaslivekey:
+                    if self.get_ip() == self.connecter.tracker_ip:
+                        # Keep connection to tracker / source
+                        print >>sys.stderr,"Connecter: live: Keeping connection to SOURCE",self.connecter.tracker_ip 
+                    else:
+                        raise ValueError("Kicking old LIVE peer "+self.get_ip())
+
+        # 'p' is peer's listen port, 'v' is peer's version, all optional
+        # 'e' is used by uTorrent to show it prefers encryption (whatever that means)
+        # See http://www.bittorrent.org/beps/bep_0010.html
+        for key in ['p','e', 'yourip','ipv4','ipv6','reqq']:
+            if key in d:
+                self.extend_hs_dict[key] = d[key]
+        
+        #print >>sys.stderr,"connecter: got_extend_hs: keys",d.keys()
+
+        # If he tells us our IP, record this and see if we get a majority vote on it
+        if 'yourip' in d:
+            try:
+                yourip = decompact_ip(d['yourip'])
+
+                try:
+                    from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+                    dmh = DialbackMsgHandler.getInstance()
+                    dmh.network_btengine_extend_yourip(yourip)
+                except:
+                    if DEBUG:
+                        print_exc()
+                    pass
+                
+                if 'same_nat_try_internal' in self.connecter.config and self.connecter.config['same_nat_try_internal']:
+                    if 'ipv4' in d:
+                        self.na_check_for_same_nat(yourip)
+            except:
+                print_exc()
+        
+        # RePEX: Tell repexer we have received an extended handshake
+        repexer = self.connecter.repexer
+        if repexer:
+            try:
+                version = d.get('v',None)
+                repexer.got_extend_handshake(self, version)
+            except:
+                print_exc()
+
+    def his_extend_msg_name_to_id(self,ext_name):
+        """ returns the message id (byte) for the given message name or None """
+        val = self.extend_hs_dict['m'].get(ext_name)
+        if val is None:
+            return val
+        else:
+            return chr(val)
+
+    def get_extend_encryption(self):
+        return self.extend_hs_dict.get('e',0)
+    
+    def get_extend_listenport(self):
+        return self.extend_hs_dict.get('p')
+
+    def is_tribler_peer(self):
+        client, version = decodePeerID(self.connection.id)
+        return client == TRIBLER_PEERID_LETTER
+
+    def send_extend_handshake(self):
+
+        # NETWORK AWARE
+        hisip = self.connection.get_ip(real=True)
+        ipv4 = None
+        if self.connecter.config.get('same_nat_try_internal',0):
+            is_tribler_peer = self.is_tribler_peer()
+            print >>sys.stderr,"connecter: send_extend_hs: Peer is Tribler client",is_tribler_peer
+            if is_tribler_peer:
+                # If we're connecting to a Tribler peer, show our internal IP address
+                # as 'ipv4'.
+                ipv4 = self.get_ip(real=True)
+        
+        # See: http://www.bittorrent.org/beps/bep_0010.html
+        d = {}
+        d['m'] = self.connecter.EXTEND_HANDSHAKE_M_DICT
+        d['p'] = self.connecter.mylistenport
+        ver = version_short.replace('-',' ',1)
+        d['v'] = ver
+        d['e'] = 0  # Apparently this means we don't like uTorrent encryption
+        d['yourip'] = compact_ip(hisip)
+        if ipv4 is not None:
+            # Only send IPv4 when necessary, we prefer this peer to use this addr.
+            d['ipv4'] = compact_ip(ipv4) 
+        if self.connecter.ut_metadata_enabled:
+            # todo: set correct size if known
+            d['metadata_size'] = self.connecter.ut_metadata_size
+            
+        self._send_message(EXTEND + EXTEND_MSG_HANDSHAKE_ID + bencode(d))
+        if DEBUG:
+            print >>sys.stderr,'connecter: sent extend: id=0+',d,"yourip",hisip,"ipv4",ipv4
+
+    #
+    # ut_pex support
+    #
+    def got_ut_pex(self,d):
+        if DEBUG_UT_PEX:
+            print >>sys.stderr,"connecter: Got uTorrent PEX:",d
+        (same_added_peers,added_peers,dropped_peers) = check_ut_pex(d)
+        
+        # RePEX: increase counter
+        self.pex_received += 1
+        
+        # RePEX: for now, we pass the whole PEX dict to the repexer and
+        # let it decode it. The reason is that check_ut_pex's interface
+        # has recently changed, currently returning a triple to prefer
+        # Tribler peers. The repexer, however, might have different 
+        # interests (e.g., storinng all flags). To cater to both interests,
+        # check_ut_pex needs to be rewritten. 
+        repexer = self.connecter.repexer
+        if repexer:
+            try:
+                repexer.got_ut_pex(self, d)
+            except:
+                print_exc()
+            return
+        
+        # DoS protection: we're accepting IP addresses from 
+        # an untrusted source, so be a bit careful
+        mx = self.connecter.ut_pex_max_addrs_from_peer
+        if DEBUG_UT_PEX:
+            print >>sys.stderr,"connecter: Got",len(added_peers),"peers via uTorrent PEX, using max",mx
+            
+        # for now we have a strong bias towards Tribler peers
+        if self.is_tribler_peer():
+            shuffle(same_added_peers)
+            shuffle(added_peers)
+            sample_peers = same_added_peers
+            sample_peers.extend(added_peers)
+        else:
+            sample_peers = same_added_peers
+            sample_peers.extend(added_peers)
+            shuffle(sample_peers)
+        
+        # Take random sample of mx peers
+        sample_added_peers_with_id = []
+
+        # Put the sample in the format desired by Encoder.start_connections()
+        for dns in sample_peers[:mx]:
+            peer_with_id = (dns, 0)
+            sample_added_peers_with_id.append(peer_with_id)
+        if len(sample_added_peers_with_id) > 0:
+            if DEBUG_UT_PEX:
+                print >>sys.stderr,"connecter: Starting ut_pex conns to",len(sample_added_peers_with_id)
+            self.connection.Encoder.start_connections(sample_added_peers_with_id)
+
+    def send_extend_ut_pex(self,payload):
+        msg = EXTEND+self.his_extend_msg_name_to_id(EXTEND_MSG_UTORRENT_PEX)+payload
+        self._send_message(msg)
+            
+    def first_ut_pex(self):
+        if self.ut_pex_first_flag:
+            self.ut_pex_first_flag = False
+            return True
+        else:
+            return False
+
+    def _send_cs_message(self, cs_list):
+        blist = bencode(cs_list)
+        self._send_message(EXTEND + self.his_extend_msg_name_to_id(EXTEND_MSG_CS) + blist)
+        
+    def got_cs_message(self, cs_list):
+        if not self.is_closed_swarm:
+            raise Exception("Got ClosedSwarm message, but this swarm is not closed")
+
+        # Process incoming closed swarm messages
+        t = cs_list[0]
+        if t == CS_CHALLENGE_A:
+            if DEBUG_CS:
+                print >>sys.stderr,"connecter: conn: CS: Got initial challenge"
+            # Got a challenge to authenticate to participate in a closed swarm
+            try:
+                response = self.closed_swarm_protocol.b_create_challenge(cs_list)
+                self._send_cs_message(response)
+            except Exception,e:
+                self.cs_status.add_value("CS_bad_initial_challenge")
+                if DEBUG_CS:
+                    print >>sys.stderr,"connecter: conn: CS: Bad initial challenge:",e
+        elif t == CS_CHALLENGE_B:
+            if DEBUG_CS:
+                print >>sys.stderr,"connecter: conn: CS: Got return challenge"
+            try:
+                response = self.closed_swarm_protocol.a_provide_poa_message(cs_list)
+                if DEBUG_CS and not response:
+                    print >> sys.stderr, "connecter: I'm not intererested in data"
+                self._send_cs_message(response)
+            except Exception,e:
+                self.cs_status.add_value("CS_bad_return_challenge")
+                if DEBUG_CS:
+                    print >>sys.stderr,"connecter: conn: CS: Bad return challenge",e
+                print_exc()
+                
+        elif t == CS_POA_EXCHANGE_A:
+            if DEBUG_CS:
+               print >>sys.stderr,"connecter: conn: CS:Got POA from A"
+            try:
+                response = self.closed_swarm_protocol.b_provide_poa_message(cs_list)
+                self.remote_is_authenticated = self.closed_swarm_protocol.is_remote_node_authorized()
+                if DEBUG_CS:
+                    print >>sys.stderr,"connecter: conn: CS: Remote node authorized:",self.remote_is_authenticated
+                if response:
+                    self._send_cs_message(response)
+            except Exception,e:
+                self.cs_status.add_value("CS_bad_POA_EXCHANGE_A")
+                if DEBUG_CS:
+                   print >>sys.stderr,"connecter: conn: CS: Bad POA from A:",e
+                
+        elif t == CS_POA_EXCHANGE_B:
+            try:
+                self.closed_swarm_protocol.a_check_poa_message(cs_list)
+                self.remote_is_authenticated = self.closed_swarm_protocol.is_remote_node_authorized()
+                if DEBUG_CS:
+                   print >>sys.stderr,"connecter: conn: CS: Remote node authorized:",self.remote_is_authenticated
+            except Exception,e:
+                self.cs_status.add_value("CS_bad_POA_EXCHANGE_B")
+                if DEBUG_CS:
+                   print >>sys.stderr,"connecter: conn: CS: Bad POA from B:",e
+
+        if not self.closed_swarm_protocol.is_incomplete():
+            self.connecter.cs_handshake_completed()
+            status = Status.get_status_holder("LivingLab")
+            self.cs_complete = True  # Flag CS as completed
+            # Don't need to add successful CS event
+
+    #
+    # Give-2-Get
+    #
+    def g2g_sent_piece_part( self, c, index, begin, hashlist, piece ):
+        """ Keeps a record of the fact that we sent piece index[begin:begin+chunk]. """
+
+        wegaveperc = float(len(piece))/float(self.connecter.piece_size)
+        if index in self.perc_sent:
+            self.perc_sent[index] = self.perc_sent[index] + wegaveperc 
+        else:
+            self.perc_sent[index] = wegaveperc
+    
+    
+    def queue_g2g_piece_xfer(self,index,begin,piece):
+        """ Queue the fact that we sent piece index[begin:begin+chunk] for
+        tranmission to peers 
+        """
+        if self.g2g_version == EXTEND_MSG_G2G_V1:
+            self.send_g2g_piece_xfer_v1(index,begin,piece)
+            return
+        
+        perc = float(len(piece))/float(self.connecter.piece_size)
+        if index in self.last_perc_sent:
+            self.last_perc_sent[index] = self.last_perc_sent[index] + perc 
+        else:
+            self.last_perc_sent[index] = perc
+
+    def dequeue_g2g_piece_xfer(self):
+        """ Send queued information about pieces we sent to peers. Called
+        periodically.
+        """ 
+        psf = float(self.connecter.piece_size)
+        ppdict = {}
+        
+        #print >>sys.stderr,"connecter: g2g dq: orig",self.last_perc_sent
+        
+        for index,perc in self.last_perc_sent.iteritems():
+            # due to rerequests due to slow pieces the sum can be above 1.0
+            capperc = min(1.0,perc) 
+            percb = chr(int((100.0 * capperc)))
+            # bencode can't deal with int keys
+            ppdict[str(index)] = percb
+        self.last_perc_sent = {}
+        
+        #print >>sys.stderr,"connecter: g2g dq: dest",ppdict
+        
+        if len(ppdict) > 0:
+            self.send_g2g_piece_xfer_v2(ppdict)
+
+    def send_g2g_piece_xfer_v1(self,index,begin,piece):
+        """ Send fact that we sent piece index[begin:begin+chunk] to a peer
+        to all peers (G2G V1).
+        """
+        self._send_message(self.his_extend_msg_name_to_id(EXTEND_MSG_G2G_V1) + tobinary(index) + tobinary(begin) + tobinary(len(piece)))
+
+    def send_g2g_piece_xfer_v2(self,ppdict):
+        """ Send list of facts that we sent pieces to all peers (G2G V2). """
+        blist = bencode(ppdict)
+        self._send_message(EXTEND + self.his_extend_msg_name_to_id(EXTEND_MSG_G2G_V2) + blist)
+
+    def got_g2g_piece_xfer_v1(self,index,begin,length):
+        """ Got a G2G_PIECE_XFER message in V1 format. """
+        hegaveperc = float(length)/float(self.connecter.piece_size)
+        self.g2g_peer_forwarded_piece_part(index,hegaveperc)
+
+    def got_g2g_piece_xfer_v2(self,ppdict):
+        """ Got a G2G_PIECE_XFER message in V2 format. """
+        for indexstr,hegavepercb in ppdict.iteritems():
+            index = int(indexstr)
+            hegaveperc = float(ord(hegavepercb))/100.0
+            self.g2g_peer_forwarded_piece_part(index,hegaveperc)
+
+    def g2g_peer_forwarded_piece_part(self,index,hegaveperc):
+        """ Processes this peer forwarding piece i[begin:end] to a grandchild. """
+        # Reward for forwarding data in general
+        length = ceil(hegaveperc * float(self.connecter.piece_size))
+        self.forward_speeds[1].update_rate(length)
+
+        if index not in self.perc_sent:
+            # piece came from disk
+            return
+
+        # Extra reward if its data we sent
+        wegaveperc = self.perc_sent[index]
+        overlapperc = wegaveperc * hegaveperc
+        overlap = ceil(overlapperc * float(self.connecter.piece_size))
+        if overlap > 0:
+            self.forward_speeds[0].update_rate( overlap )
+
+    def g2g_score( self ):
+        return [x.get_rate() for x in self.forward_speeds]
+
+
+    #
+    # SecureOverlay support
+    #
+    def connect_overlay(self):
+        if DEBUG:
+            print >>sys.stderr,"connecter: Initiating overlay connection"
+        if not self.initiated_overlay:
+            from BaseLib.Core.Overlay.SecureOverlay import SecureOverlay
+            
+            self.initiated_overlay = True
+            so = SecureOverlay.getInstance()
+            so.connect_dns(self.connection.dns,self.network_connect_dns_callback)
+
+    def network_connect_dns_callback(self,exc,dns,permid,selversion):
+        # WARNING: WILL BE CALLED BY NetworkThread
+        if exc is not None:
+            print >>sys.stderr,"connecter: peer",dns,"said he supported overlay swarm, but we can't connect to him",exc
+
+    def start_cs_handshake(self):
+        try:
+            if DEBUG_CS:
+                print >>sys.stderr,"connecter: conn: CS: Initiating Closed Swarm Handshake"
+            challenge = self.closed_swarm_protocol.a_create_challenge()
+            self._send_cs_message(challenge)
+        except Exception,e:
+            print >>sys.stderr,"connecter: conn: CS: Bad initial challenge:",e
+        
+
+    #
+    # NETWORK AWARE
+    #
+    def na_check_for_same_nat(self,yourip):
+        """ See if peer is local, e.g. behind same NAT, same AS or something.
+        If so, try to optimize:
+        - Same NAT -> reconnect to use internal network 
+        """
+        hisip = self.connection.get_ip(real=True)
+        if hisip == yourip:
+            # Do we share the same NAT?
+            myextip = self.connecter.get_extip_func(unknowniflocal=True)
+            myintip = self.get_ip(real=True)
+
+            if DEBUG:
+                print >>sys.stderr,"connecter: na_check_for_same_nat: his",hisip,"myext",myextip,"myint",myintip
+            
+            if hisip != myintip or hisip == '127.0.0.1': # to allow testing
+                # He can't fake his source addr, so we're not running on the
+                # same machine,
+
+                # He may be quicker to determine we should have a local
+                # conn, so prepare for his connection in advance.
+                #
+                if myextip is None:
+                    # I don't known my external IP and he's not on the same
+                    # machine as me. yourip could be our real external IP, test.
+                    if DEBUG:
+                        print >>sys.stderr,"connecter: na_check_same_nat: Don't know my ext ip, try to loopback to",yourip,"to see if that's me"
+                    self.na_start_loopback_connection(yourip)
+                elif hisip == myextip:
+                    # Same NAT. He can't fake his source addr.
+                    # Attempt local network connection
+                    if DEBUG:
+                        print >>sys.stderr,"connecter: na_check_same_nat: Yes, trying to connect via internal"
+                    self.na_start_internal_connection()
+                else: 
+                    # hisip != myextip
+                    # He claims we share the same IP, but I think my ext IP
+                    # is something different. Either he is lying or I'm
+                    # mistaken, test
+                    if DEBUG:
+                        print >>sys.stderr,"connecter: na_check_same_nat: Maybe, me thinks not, try to loopback to",yourip
+                    self.na_start_loopback_connection(yourip)
+                
+                
+    def na_start_loopback_connection(self,yourip):
+        """ Peer claims my external IP is "yourip". Try to connect back to myself """
+        if DEBUG:
+            print >>sys.stderr,"connecter: na_start_loopback: Checking if my ext ip is",yourip
+        self.na_candidate_ext_ip = yourip
+        
+        dns = (yourip,self.connecter.mylistenport)
+        self.connection.Encoder.start_connection(dns,0,forcenew=True)
+
+    def na_got_loopback(self,econnection):
+        """ Got a connection with my peer ID. Check that this is indeed me looping
+        back to myself. No man-in-the-middle attacks protection. This is complex
+        if we're also connecting to ourselves because of a stale tracker 
+        registration. Window of opportunity is small. 
+        """
+        himismeip = econnection.get_ip(real=True)
+        if DEBUG:
+            print >>sys.stderr,"connecter: conn: na_got_loopback:",himismeip,self.na_candidate_ext_ip
+        if self.na_candidate_ext_ip == himismeip:
+            self.na_start_internal_connection()
+                    
+
+    def na_start_internal_connection(self):
+        """ Reconnect to peer using internal network """
+        if DEBUG:
+            print >>sys.stderr,"connecter: na_start_internal_connection"
+        
+        # Doesn't really matter who initiates. Letting other side do it makes
+        # testing easier.
+        if not self.is_locally_initiated():
+            
+            hisip = decompact_ip(self.extend_hs_dict['ipv4'])
+            hisport = self.extend_hs_dict['p']
+            
+            # For testing, see Tribler/Test/test_na_extend_hs.py
+            if hisip == '224.4.8.1' and hisport == 4810:
+                hisip = '127.0.0.1'
+                hisport = 4811
+                
+            self.connection.na_want_internal_conn_from = hisip        
+            
+            hisdns = (hisip,hisport)
+            if DEBUG:
+                print >>sys.stderr,"connecter: na_start_internal_connection to",hisdns
+            self.connection.Encoder.start_connection(hisdns,0)
+
+    def na_get_address_distance(self):
+        return self.connection.na_get_address_distance()
+
+    def is_live_source(self):
+        if self.connecter.live_streaming:
+            if self.get_ip() == self.connecter.tracker_ip:
+                return True
+        return False
+            
+
+class Connecter:
+# 2fastbt_
+    def __init__(self, metadata, make_upload, downloader, choker, numpieces, piece_size,
+            totalup, config, ratelimiter, merkle_torrent, sched = None, 
+            coordinator = None, helper = None, get_extip_func = lambda: None, mylistenport = None, use_g2g = False, infohash=None, tracker=None, live_streaming = False):
+
+        self.downloader = downloader
+        self.make_upload = make_upload
+        self.choker = choker
+        self.numpieces = numpieces
+        self.piece_size = piece_size
+        self.config = config
+        self.ratelimiter = ratelimiter
+        self.rate_capped = False
+        self.sched = sched
+        self.totalup = totalup
+        self.rate_capped = False
+        self.connections = {}
+        self.external_connection_made = 0
+        self.merkle_torrent = merkle_torrent
+        self.use_g2g = use_g2g
+        # 2fastbt_
+        self.coordinator = coordinator
+        self.helper = helper
+        self.round = 0
+        self.get_extip_func = get_extip_func
+        self.mylistenport = mylistenport
+        self.infohash = infohash
+        self.live_streaming = live_streaming
+        self.tracker = tracker
+        self.tracker_ip = None
+        if self.live_streaming:
+            try:
+                (scheme, netloc, path, pars, query, _fragment) = urlparse.urlparse(self.tracker)
+                host = netloc.split(':')[0] 
+                self.tracker_ip = socket.getaddrinfo(host,None)[0][4][0]
+            except:
+                print_exc()
+                self.tracker_ip = None
+            #print >>sys.stderr,"Connecter: live: source/tracker is",self.tracker_ip
+        self.overlay_enabled = 0
+        if self.config['overlay']:
+            self.overlay_enabled = True
+
+        if DEBUG:
+            if self.overlay_enabled:
+                print >>sys.stderr,"connecter: Enabling overlay"
+            else:
+                print >>sys.stderr,"connecter: Disabling overlay"
+            
+        self.ut_pex_enabled = 0
+        if 'ut_pex_max_addrs_from_peer' in self.config:
+            self.ut_pex_max_addrs_from_peer = self.config['ut_pex_max_addrs_from_peer']
+            self.ut_pex_enabled = self.ut_pex_max_addrs_from_peer > 0
+        self.ut_pex_previous_conns = [] # last value of 'added' field for all peers
+
+        self.ut_metadata_enabled = self.config["magnetlink"]
+        if self.ut_metadata_enabled:
+            # metadata (or self.responce as its called in download_bt1) is
+            # a dic containing the metadata.  Ut_metadata shares the
+            # bencoded 'info' part of this metadata in 16kb pieces.
+            infodata = bencode(metadata["info"])
+            self.ut_metadata_size = len(infodata)
+            self.ut_metadata_list = [infodata[index:index+16*1024] for index in xrange(0, len(infodata), 16*1024)]
+            # history is a list containing previous request served (to
+            # limit our bandwidth usage)
+            self.ut_metadata_history = []
+            if DEBUG: print >> sys.stderr,"connecter.__init__: Enable ut_metadata"
+        
+        if DEBUG_UT_PEX:
+            if self.ut_pex_enabled:
+                print >>sys.stderr,"connecter: Enabling uTorrent PEX",self.ut_pex_max_addrs_from_peer
+            else:
+                print >>sys.stderr,"connecter: Disabling uTorrent PEX"
+
+        # The set of messages we support. Note that the msg ID is an int not a byte in 
+        # this dict.
+        self.EXTEND_HANDSHAKE_M_DICT = {}
+        
+        # Say in the EXTEND handshake that we support Closed swarms
+        if DEBUG:
+            print >>sys.stderr,"connecter: I support Closed Swarms"
+        d = {EXTEND_MSG_CS:ord(CS_CHALLENGE_A)}
+        self.EXTEND_HANDSHAKE_M_DICT.update(d)
+
+        if self.overlay_enabled:
+            # Say in the EXTEND handshake we support the overlay-swarm ext.
+            d = {EXTEND_MSG_OVERLAYSWARM:ord(CHALLENGE)}
+            self.EXTEND_HANDSHAKE_M_DICT.update(d)
+        if self.ut_pex_enabled:
+            # Say in the EXTEND handshake we support uTorrent's peer exchange ext.
+            d = {EXTEND_MSG_UTORRENT_PEX:ord(EXTEND_MSG_UTORRENT_PEX_ID)}
+            self.EXTEND_HANDSHAKE_M_DICT.update(d)
+            self.sched(self.ut_pex_callback,6)
+        if self.use_g2g:
+            # Say in the EXTEND handshake we want to do G2G.
+            d = {EXTEND_MSG_G2G_V2:ord(G2G_PIECE_XFER)}
+            self.EXTEND_HANDSHAKE_M_DICT.update(d)
+            self.sched(self.g2g_callback,G2G_CALLBACK_INTERVAL)
+        if self.merkle_torrent:
+            d = {EXTEND_MSG_HASHPIECE:ord(HASHPIECE)}
+            self.EXTEND_HANDSHAKE_M_DICT.update(d)
+        if self.ut_metadata_enabled:
+            d = {EXTEND_MSG_METADATA:ord(EXTEND_MSG_METADATA_ID)}
+            self.EXTEND_HANDSHAKE_M_DICT.update(d)
+            
+            
+        # LIVEHACK
+        livekey = EXTEND_MSG_LIVE_PREFIX+str(CURRENT_LIVE_VERSION)
+        d = {livekey:ord(LIVE_FAKE_MESSAGE_ID)}
+        self.EXTEND_HANDSHAKE_M_DICT.update(d)
+
+        if DEBUG:
+            print >>sys.stderr,"Connecter: EXTEND: my dict",self.EXTEND_HANDSHAKE_M_DICT
+
+        # BarterCast
+        if config['overlay']:
+            from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+            
+            self.overlay_bridge = OverlayThreadingBridge.getInstance()
+        else:
+            self.overlay_bridge = None
+            
+        # RePEX
+        self.repexer = None # Should this be called observer instead?
+            
+        # Closed Swarm stuff
+        self.is_closed_swarm = False
+        self.cs_post_func = None
+        if 'cs_keys' in self.config:
+            if self.config['cs_keys'] != None:
+                if len(self.config['cs_keys']) == 0:
+                    if DEBUG_CS:
+                        print >>sys.stderr, "connecter: cs_keys is empty"
+                else:
+                    if DEBUG_CS:
+                       print >>sys.stderr, "connecter: This is a closed swarm  - has cs_keys"
+                    self.is_closed_swarm = True
+
+
+    def how_many_connections(self):
+        return len(self.connections)
+
+    def connection_made(self, connection):
+
+        assert connection
+        c = Connection(connection, self)
+        self.connections[connection] = c
+        
+        # RePEX: Inform repexer connection is made
+        repexer = self.repexer
+        if repexer:
+            try:
+                repexer.connection_made(c,connection.supports_extend_messages())
+                if c.closed:
+                    # The repexer can close the connection in certain cases.
+                    # If so, we abort further execution of this function.
+                    return c
+            except:
+                print_exc()
+        
+        if connection.supports_extend_messages():
+            # The peer either supports our overlay-swarm extension or 
+            # the utorrent extended protocol.
+            
+            [client,version] = decodePeerID(connection.id)
+            
+            if DEBUG:
+                print >>sys.stderr,"connecter: Peer is client",client,"version",version,c.get_ip(),c.get_port()
+            
+            if self.overlay_enabled and client == TRIBLER_PEERID_LETTER and version <= '3.5.0' and connection.locally_initiated:
+                # Old Tribler, establish overlay connection<
+                if DEBUG:
+                    print >>sys.stderr,"connecter: Peer is previous Tribler version, attempt overlay connection"
+                c.connect_overlay()
+            elif self.ut_pex_enabled:
+                # EXTEND handshake must be sent just after BT handshake, 
+                # before BITFIELD even
+                c.send_extend_handshake()
+                
+        #TODO: overlay swarm also needs upload and download to control transferring rate
+        # If this is a closed swarm, don't do this now - will be done on completion of the CS protocol!
+        c.upload = self.make_upload(c, self.ratelimiter, self.totalup)
+        c.download = self.downloader.make_download(c)
+        if not self.is_closed_swarm:
+            if DEBUG_CS:
+               print >>sys.stderr,"connecter: connection_made: Freeing choker!"
+            self.choker.connection_made(c)
+        else:
+            if DEBUG_CS:
+                print >>sys.stderr,"connecter: connection_made: Will free choker later"
+            self.choker.add_connection(c)
+            #self.cs_post_func = lambda:self.choker.connection_made(c)
+            #self.cs_post_func = lambda:self.choker.start_connection(c)
+            self.cs_post_func = lambda:self._cs_completed(c)
+
+        return c
+
+    def connection_lost(self, connection):
+        c = self.connections[connection]
+
+        # RePEX: inform repexer of closed connection
+        repexer = self.repexer
+        if repexer:
+            try:
+                repexer.connection_closed(c)
+            except:
+                print_exc()
+
+        ######################################
+        # BarterCast
+        if self.overlay_bridge is not None:
+            ip = c.get_ip(False)       
+            port = c.get_port(False)   
+            down_kb = int(c.total_downloaded / 1024)
+            up_kb = int(c.total_uploaded / 1024)
+            
+            if DEBUG:
+                print >> sys.stderr, "bartercast: attempting database update, adding olthread"
+            
+            olthread_bartercast_conn_lost_lambda = lambda:olthread_bartercast_conn_lost(ip,port,down_kb,up_kb)
+            self.overlay_bridge.add_task(olthread_bartercast_conn_lost_lambda,0)
+        else:
+            if DEBUG:
+                print >> sys.stderr, "bartercast: no overlay bridge found"
+            
+        #########################
+        
+        if DEBUG:
+            if c.get_ip() == self.tracker_ip:
+                print >>sys.stderr,"connecter: connection_lost: live: WAAH2 closing SOURCE"
+            
+        del self.connections[connection]
+        if c.download:
+            c.download.disconnected()
+        self.choker.connection_lost(c)
+
+    def connection_flushed(self, connection):
+        conn = self.connections[connection]
+        if conn.next_upload is None and (conn.partial_message is not None
+               or conn.upload.buffer):
+            self.ratelimiter.queue(conn)
+
+    def got_piece(self, i):
+        for co in self.connections.values():
+            co.send_have(i)
+
+    def our_extend_msg_id_to_name(self,ext_id):
+        """ find the name for the given message id (byte) """
+        for key,val in self.EXTEND_HANDSHAKE_M_DICT.iteritems():
+            if val == ord(ext_id):
+                return key
+        return None
+
+    def get_ut_pex_conns(self):
+        conns = []
+        for conn in self.connections.values():
+            if conn.get_extend_listenport() is not None:
+                conns.append(conn)
+        return conns
+            
+    def get_ut_pex_previous_conns(self):
+        return self.ut_pex_previous_conns
+
+    def set_ut_pex_previous_conns(self,conns):
+        self.ut_pex_previous_conns = conns
+
+    def ut_pex_callback(self):
+        """ Periocially send info about the peers you know to the other peers """
+        if DEBUG_UT_PEX:
+            print >>sys.stderr,"connecter: Periodic ut_pex update"
+            
+        currconns = self.get_ut_pex_conns()
+        (addedconns,droppedconns) = ut_pex_get_conns_diff(currconns,self.get_ut_pex_previous_conns())
+        self.set_ut_pex_previous_conns(currconns)
+        if DEBUG_UT_PEX:
+            for conn in addedconns:
+                print >>sys.stderr,"connecter: ut_pex: Added",conn.get_ip(),conn.get_extend_listenport()
+            for conn in droppedconns:
+                print >>sys.stderr,"connecter: ut_pex: Dropped",conn.get_ip(),conn.get_extend_listenport()
+            
+        for c in currconns:
+            if c.supports_extend_msg(EXTEND_MSG_UTORRENT_PEX):
+                try:
+                    if DEBUG_UT_PEX:
+                        print >>sys.stderr,"connecter: ut_pex: Creating msg for",c.get_ip(),c.get_extend_listenport()
+                    if c.first_ut_pex():
+                        aconns = currconns
+                        dconns = []
+                    else:
+                        aconns = addedconns
+                        dconns = droppedconns
+                    payload = create_ut_pex(aconns,dconns,c)    
+                    c.send_extend_ut_pex(payload)
+                except:
+                    print_exc()
+        self.sched(self.ut_pex_callback,60)
+
+    def g2g_callback(self):
+        try:
+            self.sched(self.g2g_callback,G2G_CALLBACK_INTERVAL)
+            for c in self.connections.itervalues():
+                if not c.use_g2g:
+                    continue
+    
+                c.dequeue_g2g_piece_xfer()
+        except:
+            print_exc()
+
+    def got_ut_metadata(self, connection, dic, message):
+        """
+        CONNECTION: The connection instance where we received this message
+        DIC: The bdecoded dictionary
+        MESSAGE: The entire message: <EXTEND-ID><METADATA-ID><BENCODED-DIC><OPTIONAL-DATA>
+        """
+        if DEBUG: print >> sys.stderr, "connecter.got_ut_metadata:", dic
+
+        msg_type = dic.get("msg_type", None)
+        if not type(msg_type) in (int, long):
+            raise ValueError("Invalid ut_metadata.msg_type")
+        piece = dic.get("piece", None)
+        if not type(piece) in (int, long):
+            raise ValueError("Invalid ut_metadata.piece type")
+        if not 0 <= piece < len(self.ut_metadata_list):
+            raise ValueError("Invalid ut_metadata.piece value")
+
+        if msg_type == 0: # request
+            if DEBUG: print >> sys.stderr, "connecter.got_ut_metadata: Received request for piece", piece
+
+            # our flood protection policy is to upload all metadata
+            # once every n minutes.
+            now = time.time()
+            deadline = now - UT_METADATA_FLOOD_PERIOD
+            # remove old history
+            self.ut_metadata_history = [timestamp for timestamp in self.ut_metadata_history if timestamp > deadline]
+
+            if len(self.ut_metadata_history) > UT_METADATA_FLOOD_FACTOR * len(self.ut_metadata_list):
+                # refuse to upload at this time
+                reply = bencode({"msg_type":2, "piece":piece})
+            else:
+                reply = bencode({"msg_type":1, "piece":piece, "data":self.ut_metadata_list[piece]})
+                self.ut_metadata_history.append(now)
+            connection._send_message(EXTEND + connection.his_extend_msg_name_to_id(EXTEND_MSG_METADATA) + reply)
+        
+        elif msg_type == 1: # data
+            # at this point in the code we must assume that the
+            # metadata is already there, everything is designed in
+            # such a way that metadata is required.  data replies can
+            # therefore never occur.
+            raise ValueError("Invalid ut_metadata: we did not request data")
+
+        elif msg_type == 2: # reject
+            # at this point in the code we must assume that the
+            # metadata is already there, everything is designed in
+            # such a way that metadata is required.  rejects can
+            # therefore never occur.
+            raise ValueError("Invalid ut_metadata: we did not request data that can be rejected")
+
+        else:
+            raise ValueError("Invalid ut_metadata.msg_type value")
+
+    def got_hashpiece(self, connection, message):
+        """ Process Merkle hashpiece message. Note: EXTEND byte has been 
+        stripped, it starts with peer's Tr_hashpiece id for historic reasons ;-)
+        """
+        try:
+            c = self.connections[connection]
+            
+            if len(message) <= 13:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad HASHPIECE: msg len"
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad HASHPIECE: index out of range"
+                connection.close()
+                return
+            begin = toint(message[5:9])
+            len_hashlist = toint(message[9:13])
+            bhashlist = message[13:13+len_hashlist]
+            hashlist = bdecode(bhashlist)
+            if not isinstance(hashlist, list):
+                raise AssertionError, "hashlist not list"
+            for oh in hashlist:
+                if not isinstance(oh,list) or \
+                not (len(oh) == 2) or \
+                not isinstance(oh[0],int) or \
+                not isinstance(oh[1],str) or \
+                not ((len(oh[1])==20)): \
+                    raise AssertionError, "hashlist entry invalid"
+            piece = message[13+len_hashlist:]
+
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got HASHPIECE",i,begin
+
+            if c.download.got_piece(i, begin, hashlist, piece):
+                self.got_piece(i)
+        except Exception,e:
+            if DEBUG:
+                print >>sys.stderr,"Close on bad HASHPIECE: exception",str(e)
+                print_exc()
+            connection.close()
+
+    # NETWORK AWARE
+    def na_got_loopback(self,econnection):
+        if DEBUG:
+            print >>sys.stderr,"connecter: na_got_loopback: Got connection from",econnection.get_ip(),econnection.get_port()
+        for c in self.connections.itervalues():
+            ret = c.na_got_loopback(econnection)
+            if ret is not None:
+                return ret
+        return False
+
+    def na_got_internal_connection(self,origconn,newconn):
+        """ This is called only at the initiator side of the internal conn.
+        Doesn't matter, only one is enough to close the original connection.
+        """
+        if DEBUG:
+            print >>sys.stderr,"connecter: na_got_internal: From",newconn.get_ip(),newconn.get_port()
+        
+        origconn.close()
+
+
+    def got_message(self, connection, message):
+        # connection: Encrypter.Connection; c: Connecter.Connection
+        c = self.connections[connection]    
+        t = message[0]
+        # EXTEND handshake will be sent just after BT handshake, 
+        # before BITFIELD even
+
+        if DEBUG_MESSAGE_HANDLING:
+            st = time.time()
+
+        if DEBUG_NORMAL_MSGS:
+            print >>sys.stderr,"connecter: Got",getMessageName(t),connection.get_ip()
+        
+        if t == EXTEND:
+            self.got_extend_message(connection,c,message,self.ut_pex_enabled)
+            return
+
+        # If this is a closed swarm and we have not authenticated the 
+        # remote node, we must NOT GIVE IT ANYTHING!
+        #if self.is_closed_swarm and c.closed_swarm_protocol.is_incomplete():
+            #print >>sys.stderr, "connecter: Remote node not authorized, ignoring it"
+            #return
+
+        if self.is_closed_swarm and c.can_send_to():
+            c.got_anything = False # Is this correct or does it break something?
+            
+        if t == BITFIELD and c.got_anything:
+            if DEBUG:
+                print >>sys.stderr,"Close on BITFIELD"
+            connection.close()
+            return
+        c.got_anything = True
+        if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and 
+                len(message) != 1):
+            if DEBUG:
+                print >>sys.stderr,"Close on bad (UN)CHOKE/(NOT_)INTERESTED",t
+            connection.close()
+            return
+        if t == CHOKE:
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got CHOKE from",connection.get_ip()
+            c.download.got_choke()
+        elif t == UNCHOKE:
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got UNCHOKE from",connection.get_ip()
+            c.download.got_unchoke()
+        elif t == INTERESTED:
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got INTERESTED from",connection.get_ip()
+            if c.upload is not None:
+                c.upload.got_interested()
+        elif t == NOT_INTERESTED:
+            c.upload.got_not_interested()
+        elif t == HAVE:
+            if len(message) != 5:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad HAVE: msg len"
+                connection.close()
+                return
+            i = toint(message[1:])
+            if i >= self.numpieces:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad HAVE: index out of range"
+                connection.close()
+                return
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got HAVE(",i,") from",connection.get_ip()
+            c.download.got_have(i)
+        elif t == BITFIELD:
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got BITFIELD from",connection.get_ip()
+            try:
+                b = Bitfield(self.numpieces, message[1:],calcactiveranges=self.live_streaming)
+            except ValueError:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad BITFIELD"
+                connection.close()
+                return
+            if c.download is not None:
+                c.download.got_have_bitfield(b)
+        elif t == REQUEST:
+            if not c.can_send_to():
+                c.cs_status_unauth_requests.inc()
+                print >> sys.stderr,"Got REQUEST but remote node is not authenticated"
+                return # TODO: Do this better
+
+            if len(message) != 13:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad REQUEST: msg len"
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad REQUEST: index out of range"
+                connection.close()
+                return
+            if DEBUG_NORMAL_MSGS:
+                print >>sys.stderr,"connecter: Got REQUEST(",i,") from",connection.get_ip()
+            c.got_request(i, toint(message[5:9]), toint(message[9:]))
+        elif t == CANCEL:
+            if len(message) != 13:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad CANCEL: msg len"
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad CANCEL: index out of range"
+                connection.close()
+                return
+            c.upload.got_cancel(i, toint(message[5:9]), 
+                toint(message[9:]))
+        elif t == PIECE:
+            if len(message) <= 9:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad PIECE: msg len"
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad PIECE: msg len"
+                connection.close()
+                return
+            if DEBUG_NORMAL_MSGS: # or connection.get_ip().startswith("192"):
+                print >>sys.stderr,"connecter: Got PIECE(",i,") from",connection.get_ip()
+            #if connection.get_ip().startswith("192"):
+            #    print >>sys.stderr,"@",
+            try:
+                if c.download.got_piece(i, toint(message[5:9]), [], message[9:]):
+                    self.got_piece(i)
+            except Exception,e:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad PIECE: exception",str(e)
+                    print_exc()
+                connection.close()
+                return
+            
+        elif t == HASHPIECE:
+            # Merkle: Handle pieces with hashes, old Tribler<= 4.5.2 style
+            self.got_hashpiece(connection,message)
+            
+        elif t == G2G_PIECE_XFER: 
+            # EXTEND_MSG_G2G_V1 only, V2 is proper EXTEND msg 
+            if len(message) <= 12:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad G2G_PIECE_XFER: msg len"
+                connection.close()
+                return
+            if not c.use_g2g:
+                if DEBUG:
+                    print >>sys.stderr,"Close on receiving G2G_PIECE_XFER over non-g2g connection"
+                connection.close()
+                return
+
+            index = toint(message[1:5])
+            begin = toint(message[5:9])
+            length = toint(message[9:13])
+            c.got_g2g_piece_xfer_v1(index,begin,length)
+
+        else:
+            connection.close()
+
+        if DEBUG_MESSAGE_HANDLING:
+            et = time.time()
+            diff = et - st
+            if diff > 0.1:
+                print >>sys.stderr,"connecter: $$$$$$$$$$$$",getMessageName(t),"took",diff
+
+
+    def got_extend_message(self,connection,c,message,ut_pex_enabled):
+        # connection: Encrypter.Connection; c: Connecter.Connection
+        if DEBUG:
+            print >>sys.stderr,"connecter: Got EXTEND message, len",len(message)
+            print >>sys.stderr,"connecter: his handshake",c.extend_hs_dict,c.get_ip()
+            
+        try:
+            if len(message) < 4:
+                if DEBUG:
+                    print >>sys.stderr,"Close on bad EXTEND: msg len"
+                connection.close()
+                return
+            ext_id = message[1]
+            if DEBUG:
+                print >>sys.stderr,"connecter: Got EXTEND message, id",ord(ext_id)
+            if ext_id == EXTEND_MSG_HANDSHAKE_ID: 
+                # Message is Handshake
+                d = bdecode(message[2:])
+                if type(d) == DictType:
+                    c.got_extend_handshake(d)
+                else:
+                    if DEBUG:
+                        print >>sys.stderr,"Close on bad EXTEND: payload of handshake is not a bencoded dict"
+                    connection.close()
+                    return
+            else:
+                # Message is regular message e.g ut_pex
+                ext_msg_name = self.our_extend_msg_id_to_name(ext_id)
+                if ext_msg_name is None:
+                    if DEBUG:
+                        print >>sys.stderr,"Close on bad EXTEND: peer sent ID we didn't define in handshake"
+                    connection.close()
+                    return
+                elif ext_msg_name == EXTEND_MSG_OVERLAYSWARM:
+                    if DEBUG:
+                        print >>sys.stderr,"Not closing EXTEND+CHALLENGE: peer didn't read our spec right, be liberal"
+                elif ext_msg_name == EXTEND_MSG_UTORRENT_PEX and ut_pex_enabled:
+                    d = bdecode(message[2:])
+                    if type(d) == DictType:
+                        c.got_ut_pex(d)
+                    else:
+                        if DEBUG:
+                            print >>sys.stderr,"Close on bad EXTEND: payload of ut_pex is not a bencoded dict"
+                        connection.close()
+                        return
+                elif ext_msg_name == EXTEND_MSG_METADATA:
+                    if DEBUG:
+                        print >> sys.stderr, "Connecter.got_extend_message() ut_metadata"
+                    # bdecode sloppy will make bdecode ignore the data
+                    # in message that is placed -after- the bencoded
+                    # data (this is the case for a data message)
+                    d = bdecode(message[2:], sloppy=1)
+                    if type(d) == DictType:
+                        self.got_ut_metadata(c, d, message)
+                    else:
+                        if DEBUG:
+                            print >> sys.stderr, "Connecter.got_extend_message() close on bad ut_metadata message"
+                        connection.close()
+                        return
+                elif ext_msg_name == EXTEND_MSG_G2G_V2 and self.use_g2g:
+                    ppdict = bdecode(message[2:])
+                    if type(ppdict) != DictType:
+                        if DEBUG:
+                            print >>sys.stderr,"Close on bad EXTEND+G2G: payload not dict"
+                        connection.close()
+                        return
+                    for k,v in ppdict.iteritems():
+                        if type(k) != StringType or type(v) != StringType:
+                            if DEBUG:
+                                print >>sys.stderr,"Close on bad EXTEND+G2G: key,value not of type int,char"
+                            connection.close()
+                            return
+                        try:
+                            int(k)
+                        except:
+                            if DEBUG:
+                                print >>sys.stderr,"Close on bad EXTEND+G2G: key not int"
+                            connection.close()
+                            return
+                        if ord(v) > 100:
+                            if DEBUG:
+                                print >>sys.stderr,"Close on bad EXTEND+G2G: value too big",ppdict,v,ord(v)
+                            connection.close()
+                            return
+                            
+                    c.got_g2g_piece_xfer_v2(ppdict)
+                    
+                elif ext_msg_name == EXTEND_MSG_HASHPIECE and self.merkle_torrent:
+                    # Merkle: Handle pieces with hashes, Merkle BEP
+                    oldmsg = message[1:]
+                    self.got_hashpiece(connection,oldmsg)
+                    
+                elif ext_msg_name == EXTEND_MSG_CS:
+                    cs_list = bdecode(message[2:])
+                    c.got_cs_message(cs_list)
+                    
+                else:
+                    if DEBUG:
+                        print >>sys.stderr,"Close on bad EXTEND: peer sent ID that maps to name we don't support",ext_msg_name,`ext_id`,ord(ext_id)
+                    connection.close()
+                    return
+            return
+        except Exception,e:
+            if not DEBUG:
+                print >>sys.stderr,"Close on bad EXTEND: exception:",str(e),`message[2:]`
+                print_exc()
+            connection.close()
+            return
+
+    def _cs_completed(self, connection):
+        """
+        When completed, this is a callback function to reset the connection
+        """
+        connection.cs_complete = True # Flag CS as completed
+
+        try:
+            # Can't send bitfield here, must loop and send a bunch of HAVEs
+            # Get the bitfield from the uploader
+            have_list = connection.upload.storage.get_have_list()
+            bitfield = Bitfield(self.numpieces, have_list)
+            connection.send_bitfield(bitfield.tostring())
+            connection.got_anything = False
+            self.choker.start_connection(connection)
+        except Exception,e:
+            print >> sys.stderr,"connecter: CS: Error restarting after CS handshake:",e
+        
+    def cs_handshake_completed(self):
+        if DEBUG_CS:
+            print >>sys.stderr,"connecter: Closed swarm handshake completed!"
+        if self.cs_post_func:
+            self.cs_post_func()
+        elif DEBUG_CS:
+            print >>sys.stderr,"connecter: CS: Woops, don't have post function"
+
+
+def olthread_bartercast_conn_lost(ip,port,down_kb,up_kb):
+    """ Called by OverlayThread to store information about the peer to
+    whom the connection was just closed in the (slow) databases. """
+    
+    from BaseLib.Core.CacheDB.CacheDBHandler import PeerDBHandler, BarterCastDBHandler
+    
+    peerdb = PeerDBHandler.getInstance()
+    bartercastdb = BarterCastDBHandler.getInstance()
+    
+    if bartercastdb:
+    
+        permid = peerdb.getPermIDByIP(ip)
+        my_permid = bartercastdb.my_permid
+    
+        if DEBUG:
+            print >> sys.stderr, "bartercast: (Connecter): Up %d down %d peer %s:%s (PermID = %s)" % (up_kb, down_kb, ip, port, `permid`)
+    
+        # Save exchanged KBs in BarterCastDB
+        changed = False
+        if permid is not None:
+            #name = bartercastdb.getName(permid)
+            
+            if down_kb > 0:
+                new_value = bartercastdb.incrementItem((my_permid, permid), 'downloaded', down_kb, commit=False)
+                changed = True
+     
+            if up_kb > 0:
+                new_value = bartercastdb.incrementItem((my_permid, permid), 'uploaded', up_kb, commit=False)
+                changed = True
+     
+        # For the record: save KBs exchanged with non-tribler peers
+        else:
+            if down_kb > 0:
+                new_value = bartercastdb.incrementItem((my_permid, 'non-tribler'), 'downloaded', down_kb, commit=False)
+                changed = True
+     
+            if up_kb > 0:
+                new_value = bartercastdb.incrementItem((my_permid, 'non-tribler'), 'uploaded', up_kb, commit=False)
+                changed = True
+                
+        if changed:
+            bartercastdb.commit()
+
+    else:
+        if DEBUG:
+            print >> sys.stderr, "BARTERCAST: No bartercastdb instance"
+            
+
+            
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Downloader.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Downloader.py
new file mode 100644 (file)
index 0000000..35e79c1
--- /dev/null
@@ -0,0 +1,1196 @@
+# Written by Bram Cohen and Pawel Garbacki, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+import time
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from random import shuffle
+from base64 import b64encode
+from BaseLib.Core.BitTornado.clock import clock
+from BaseLib.Core.Statistics.Status.Status import get_status_holder
+
+#ProxyService_
+#
+try:
+    from BaseLib.Core.ProxyService.Helper import SingleDownloadHelperInterface
+except ImportError:
+    class SingleDownloadHelperInterface:
+        
+        def __init__(self):
+            pass
+#
+#_ProxyService
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+DEBUGBF = False
+DEBUG_CHUNKS = False # set DEBUG_CHUNKS in PiecePickerStreaming to True
+EXPIRE_TIME = 60 * 60
+
+# only define the following functions in __debug__. And only import
+# them in this case. They are to expensive to have, and have no
+# purpose, outside debug mode.
+#
+# Arno, 2009-06-15: Win32 binary versions have __debug__ True apparently, workaround.
+#
+if DEBUG_CHUNKS:
+    _ident_letters = {}
+    _ident_letter_pool = None
+    def get_ident_letter(download):
+        if not download.ip in _ident_letters:
+            global _ident_letter_pool
+            if not _ident_letter_pool:
+                _ident_letter_pool = [chr(c) for c in range(ord("a"), ord("z")+1)] + [chr(c) for c in range(ord("A"), ord("Z")+1)]
+            _ident_letters[download.ip] = _ident_letter_pool.pop(0)
+        return _ident_letters[download.ip]
+
+    def print_chunks(downloader, pieces, before=(), after=(), compact=True):
+        """
+        Print a line summery indicating completed/outstanding/non-requested chunks
+
+        When COMPACT is True one character will represent one piece.
+        #   --> downloaded
+        -   --> no outstanding requests
+        1-9 --> the number of outstanding requests (max 9)
+
+        When COMPACT is False one character will requests one chunk.
+        #   --> downloaded
+        -   --> no outstanding requests
+        a-z --> requested at peer with that character (also capitals, duplicates may occur)
+        1-9 --> requested multipile times (at n peers)
+        """
+        if pieces:
+            do_I_have = downloader.storage.do_I_have
+            do_I_have_requests = downloader.storage.do_I_have_requests
+            inactive_requests = downloader.storage.inactive_requests
+            piece_size = downloader.storage.piece_length
+            chunk_size = downloader.storage.request_size
+            chunks_per_piece = int(piece_size / chunk_size)
+
+            if compact:
+                request_map = {}
+                for download in downloader.downloads:
+                    for piece, begin, length in download.active_requests:
+                        if not piece in request_map:
+                            request_map[piece] = 0
+                        request_map[piece] += 1
+
+                def print_chunks_helper(piece_id):
+                    if do_I_have(piece_id): return "#"
+                    if do_I_have_requests(piece_id): return "-"
+                    if piece_id in request_map: return str(min(9, request_map[piece_id]))
+                    return "?"
+
+            else:
+                request_map = {}
+                for download in downloader.downloads:
+                    
+                    for piece, begin, length in download.active_requests:
+                        if not piece in request_map:
+                            request_map[piece] = ["-"] * chunks_per_piece
+                        index = int(begin/chunk_size)
+                        if request_map[piece][index] == "-":
+                            request_map[piece][index] = get_ident_letter(download)
+                        elif type(request_map[piece][index]) is str:
+                            request_map[piece][index] = 2
+                        else:
+                            request_map[piece][index] += 1
+                        request_map[piece][int(begin/chunk_size)] = get_ident_letter(download)
+
+                def print_chunks_helper(piece_id):
+                    if do_I_have(piece_id): return "#" * chunks_per_piece
+#                    if do_I_have_requests(piece_id): return "-" * chunks_per_piece
+                    if piece_id in request_map:
+                        if piece_id in inactive_requests and type(inactive_requests[piece_id]) is list:
+                            for begin, length in inactive_requests[piece_id]:
+                                request_map[piece_id][int(begin/chunk_size)] = " "
+                        return "".join([str(c) for c in request_map[piece_id]])
+                    return "-" * chunks_per_piece
+
+            if before:
+                s_before = before[0]
+            else:
+                s_before = ""
+
+            if after:
+                s_after = after[-1]
+            else:
+                s_after = ""
+
+            print >>sys.stderr, "Outstanding %s:%d:%d:%s [%s|%s|%s]" % (s_before, pieces[0], pieces[-1], s_after, "".join(map(print_chunks_helper, before)), "".join(map(print_chunks_helper, pieces)), "".join(map(print_chunks_helper, after)))
+
+        else:
+            print >>sys.stderr, "Outstanding 0:0 []"
+
+else:
+    def print_chunks(downloader, pieces, before=(), after=(), compact=True):
+        pass
+
+
+class PerIPStats:  
+    def __init__(self, ip):
+        self.numgood = 0
+        self.bad = {}
+        self.numconnections = 0
+        self.lastdownload = None
+        self.peerid = None
+
+class BadDataGuard:
+    def __init__(self, download):
+        self.download = download
+        self.ip = download.ip
+        self.downloader = download.downloader
+        self.stats = self.downloader.perip[self.ip]
+        self.lastindex = None
+
+    def failed(self, index, bump = False):
+        self.stats.bad.setdefault(index, 0)
+        self.downloader.gotbaddata[self.ip] = 1
+        self.stats.bad[index] += 1
+        if len(self.stats.bad) > 1:
+            if self.download is not None:
+                self.downloader.try_kick(self.download)
+            elif self.stats.numconnections == 1 and self.stats.lastdownload is not None:
+                self.downloader.try_kick(self.stats.lastdownload)
+        if len(self.stats.bad) >= 3 and len(self.stats.bad) > int(self.stats.numgood/30):
+            self.downloader.try_ban(self.ip)
+        elif bump:
+            self.downloader.picker.bump(index)
+
+    def good(self, index):
+        # lastindex is a hack to only increase numgood by one for each good
+        # piece, however many chunks come from the connection(s) from this IP
+        if index != self.lastindex:
+            self.stats.numgood += 1
+            self.lastindex = index
+
+# 2fastbt_
+class SingleDownload(SingleDownloadHelperInterface):
+# _2fastbt
+    def __init__(self, downloader, connection):
+# 2fastbt_
+        SingleDownloadHelperInterface.__init__(self)
+# _2fastbt
+        self.downloader = downloader
+        self.connection = connection
+        self.choked = True
+        self.interested = False
+        self.active_requests = []
+        self.measure = Measure(downloader.max_rate_period)
+        self.peermeasure = Measure(downloader.max_rate_period)
+        self.have = Bitfield(downloader.numpieces)
+        self.last = -1000
+        self.last2 = -1000
+        self.example_interest = None
+        self.backlog = 2
+        self.ip = connection.get_ip()
+        self.guard = BadDataGuard(self)
+# 2fastbt_
+        self.helper = downloader.picker.helper
+        self.proxy_have = Bitfield(downloader.numpieces)
+# _2fastbt
+
+        # boudewijn: VOD needs a download measurement that is not
+        # averaged over a 'long' period. downloader.max_rate_period is
+        # (by default) 20 seconds because this matches the unchoke
+        # policy.
+        self.short_term_measure = Measure(5)
+
+        # boudewijn: each download maintains a counter for the number
+        # of high priority piece requests that did not get any
+        # responce within x seconds.
+        self.bad_performance_counter = 0
+
+    def _backlog(self, just_unchoked):
+        self.backlog = int(min(
+            2+int(4*self.measure.get_rate()/self.downloader.chunksize),
+            (2*just_unchoked)+self.downloader.queue_limit() ))
+        if self.backlog > 50:
+            self.backlog = int(max(50, self.backlog * 0.075))
+        return self.backlog
+    
+    def disconnected(self):
+        self.downloader.lost_peer(self)
+
+        """ JD: obsoleted -- moved to picker.lost_peer
+
+        if self.have.complete():
+            self.downloader.picker.lost_seed()
+        else:
+            for i in xrange(len(self.have)):
+                if self.have[i]:
+                    self.downloader.picker.lost_have(i)
+        """
+
+        if self.have.complete() and self.downloader.storage.is_endgame():
+            self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+        self._letgo()
+        self.guard.download = None
+
+    def _letgo(self):
+        if self.downloader.queued_out.has_key(self):
+            del self.downloader.queued_out[self]
+        if not self.active_requests:
+            return
+        if self.downloader.endgamemode:
+            self.active_requests = []
+            return
+        lost = {}
+        for index, begin, length in self.active_requests:
+            self.downloader.storage.request_lost(index, begin, length)
+            lost[index] = 1
+        lost = lost.keys()
+        self.active_requests = []
+        if self.downloader.paused:
+            return
+        ds = [d for d in self.downloader.downloads if not d.choked]
+        shuffle(ds)
+        for d in ds:
+            d._request_more()
+        for d in self.downloader.downloads:
+            if d.choked and not d.interested:
+                for l in lost:
+                    if d.have[l] and self.downloader.storage.do_I_have_requests(l):
+                        d.send_interested()
+                        break
+
+    def got_choke(self):
+        if not self.choked:
+            self.choked = True
+            self._letgo()
+
+    def got_unchoke(self):
+        if self.choked:
+            self.choked = False
+            if self.interested:
+                self._request_more(new_unchoke = True)
+            self.last2 = clock()
+
+    def is_choked(self):
+        return self.choked
+
+    def is_interested(self):
+        return self.interested
+
+    def send_interested(self):
+        if not self.interested:
+            self.interested = True
+            self.connection.send_interested()
+
+    def send_not_interested(self):
+        if self.interested:
+            self.interested = False
+            self.connection.send_not_interested()
+
+    def got_piece(self, index, begin, hashlist, piece):
+        """
+        Returns True if the piece is complete.
+        Note that in this case a -piece- means a chunk!
+        """
+
+        if self.bad_performance_counter:
+            self.bad_performance_counter -= 1
+            if DEBUG: print >>sys.stderr, "decreased bad_performance_counter to", self.bad_performance_counter
+
+        length = len(piece)
+        #if DEBUG:
+        #    print >> sys.stderr, 'Downloader: got piece of length %d' % length
+        try:
+            self.active_requests.remove((index, begin, length))
+        except ValueError:
+            self.downloader.discarded += length
+            return False
+        if self.downloader.endgamemode:
+            self.downloader.all_requests.remove((index, begin, length))
+            if DEBUG: print >>sys.stderr, "Downloader: got_piece: removed one request from all_requests", len(self.downloader.all_requests), "remaining"
+
+        self.last = clock()
+        self.last2 = clock()
+        self.measure.update_rate(length)
+        # Update statistic gatherer
+        status = get_status_holder("LivingLab")
+        s_download = status.get_or_create_status_element("downloaded",0)
+        s_download.inc(length)
+        
+        self.short_term_measure.update_rate(length)
+        self.downloader.measurefunc(length)
+        if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard):
+            self.downloader.piece_flunked(index)
+            return False
+
+        # boudewijn: we need more accurate (if possibly invalid)
+        # measurements on current download speed
+        self.downloader.picker.got_piece(index, begin, length)
+
+#        print "Got piece=", index, "begin=", begin, "len=", length
+        if self.downloader.storage.do_I_have(index):
+            self.downloader.picker.complete(index)
+
+        if self.downloader.endgamemode:
+            for d in self.downloader.downloads:
+                if d is not self:
+                    if d.interested:
+                        if d.choked:
+                            assert not d.active_requests
+                            d.fix_download_endgame()
+                        else:
+                            try:
+                                d.active_requests.remove((index, begin, length))
+                            except ValueError:
+                                continue
+                            d.connection.send_cancel(index, begin, length)
+                            d.fix_download_endgame()
+                    else:
+                        assert not d.active_requests
+        self._request_more()
+        self.downloader.check_complete(index)
+        
+        # BarterCast counter
+        self.connection.total_downloaded += length
+    
+        return self.downloader.storage.do_I_have(index)
+
+# 2fastbt_
+    def helper_forces_unchoke(self):
+        self.choked = False
+# _2fastbt
+
+    def _request_more(self, new_unchoke = False, slowpieces = []):
+# 2fastbt_
+        if DEBUG:
+            print >>sys.stderr,"Downloader: _request_more()"
+        if self.helper is not None and self.is_frozen_by_helper():
+            if DEBUG:
+                print >>sys.stderr,"Downloader: _request_more: blocked, returning"
+            return
+# _2fastbt    
+        if self.choked:
+            if DEBUG:
+                print >>sys.stderr,"Downloader: _request_more: choked, returning"
+            return
+# 2fastbt_
+        # do not download from coordinator
+        if self.connection.connection.is_coordinator_con():
+            if DEBUG:
+                print >>sys.stderr,"Downloader: _request_more: coordinator conn"
+            return
+# _2fastbt
+        if self.downloader.endgamemode:
+            self.fix_download_endgame(new_unchoke)
+            if DEBUG:
+                print >>sys.stderr,"Downloader: _request_more: endgame mode, returning"
+            return
+        if self.downloader.paused:
+            if DEBUG:
+                print >>sys.stderr,"Downloader: _request_more: paused, returning"
+            return
+        if len(self.active_requests) >= self._backlog(new_unchoke):
+            if DEBUG:
+                print >>sys.stderr,"Downloader: more req than unchoke (active req: %d >= backlog: %d)" % (len(self.active_requests), self._backlog(new_unchoke))
+            # Jelle: Schedule _request more to be called in some time. Otherwise requesting and receiving packages
+            # may stop, if they arrive to quickly
+            if self.downloader.download_rate:
+                wait_period = self.downloader.chunksize / self.downloader.download_rate / 2.0
+
+                # Boudewijn: when wait_period is 0.0 this will cause
+                # the the _request_more method to be scheduled
+                # multiple times (recursively), causing severe cpu
+                # problems.
+                #
+                # Therefore, only schedule _request_more to be called
+                # if the call will be made in the future. The minimal
+                # wait_period should be tweaked.
+                if wait_period > 1.0:
+                    if DEBUG:
+                        print >>sys.stderr,"Downloader: waiting for %f s to call _request_more again" % wait_period
+                    self.downloader.scheduler(self._request_more, wait_period)
+                                          
+            if not (self.active_requests or self.backlog):
+                self.downloader.queued_out[self] = 1
+            return
+        
+        #if DEBUG:
+        #    print >>sys.stderr,"Downloader: _request_more: len act",len(self.active_requests),"back",self.backlog
+        
+        lost_interests = []
+        while len(self.active_requests) < self.backlog:
+            #if DEBUG:
+            #    print >>sys.stderr,"Downloader: Looking for interesting piece"
+            #st = time.time()
+            #print "DOWNLOADER self.have=", self.have.toboollist()
+            
+            # This is the PiecePicker call is the current client is a Coordinator
+            interest = self.downloader.picker.next(self.have,
+                               self.downloader.storage.do_I_have_requests,
+                               self,
+                               self.downloader.too_many_partials(),
+                               self.connection.connection.is_helper_con(),
+                               slowpieces = slowpieces, connection = self.connection, proxyhave = self.proxy_have)
+            #et = time.time()
+            #diff = et-st
+            diff=-1
+            if DEBUG:
+                print >>sys.stderr,"Downloader: _request_more: next() returned",interest,"took %.5f" % (diff)                               
+            if interest is None:
+                break
+            
+            if self.helper and self.downloader.storage.inactive_requests[interest] is None:
+                # The current node is a helper and received a request from a coordinator for a piece it has already downloaded
+                # Should send a Have message to the coordinator
+                self.connection.send_have(interest)
+                break
+
+            if self.helper and self.downloader.storage.inactive_requests[interest] == []:
+                # The current node is a helper and received a request from a coordinator for a piece that is downloading
+                # (all blocks are requested to the swarm, and have not arrived yet)
+                break
+
+            
+            self.example_interest = interest
+            self.send_interested()
+            loop = True
+            while len(self.active_requests) < self.backlog and loop:
+                
+                begin, length = self.downloader.storage.new_request(interest)
+                
+                if DEBUG:
+                    print >>sys.stderr,"Downloader: new_request",interest,begin,length,"to",self.connection.connection.get_ip(),self.connection.connection.get_port()
+                
+                self.downloader.picker.requested(interest, begin, length)
+                self.active_requests.append((interest, begin, length))
+                self.connection.send_request(interest, begin, length)
+                self.downloader.chunk_requested(length)
+                if not self.downloader.storage.do_I_have_requests(interest):
+                    loop = False
+                    lost_interests.append(interest)
+        if not self.active_requests:
+            self.send_not_interested()
+        if lost_interests:
+            for d in self.downloader.downloads:
+                if d.active_requests or not d.interested:
+                    continue
+                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
+                    continue
+                for lost in lost_interests:
+                    if d.have[lost]:
+                        break
+                else:
+                    continue
+# 2fastbt_
+                #st = time.time()
+                interest = self.downloader.picker.next(d.have,
+                                   self.downloader.storage.do_I_have_requests,
+                                   self, # Arno, 2008-05-22; self -> d? Original Pawel code
+                                   self.downloader.too_many_partials(),
+                                   self.connection.connection.is_helper_con(), willrequest=False,connection=self.connection, proxyhave = self.proxy_have)
+                #et = time.time()
+                #diff = et-st
+                diff=-1
+                if DEBUG:                                   
+                    print >>sys.stderr,"Downloader: _request_more: next()2 returned",interest,"took %.5f" % (diff)
+
+                if interest is not None:
+                    # The helper has at least one piece that the coordinator requested 
+                    if self.helper and self.downloader.storage.inactive_requests[interest] is None:
+                        # The current node is a helper and received a request from a coordinator for a piece it has already downloaded
+                        # Should send a Have message to the coordinator
+                        self.connection.send_have(interest)
+                        break
+                    if self.helper and self.downloader.storage.inactive_requests[interest] == []:
+                        # The current node is a helper and received a request from a coordinator for a piece that is downloading
+                        # (all blocks are requested to the swarm, and have not arrived yet)
+                        break
+
+# _2fastbt
+                if interest is None:
+                    d.send_not_interested()
+                else:
+                    d.example_interest = interest
+                    
+        # Arno: LIVEWRAP: no endgame
+        if not self.downloader.endgamemode and \
+           self.downloader.storage.is_endgame() and \
+           not (self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming):
+            self.downloader.start_endgame()
+
+
+    def fix_download_endgame(self, new_unchoke = False):
+# 2fastbt_
+        # do not download from coordinator
+        if self.downloader.paused or self.connection.connection.is_coordinator_con():
+            if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: paused", self.downloader.paused, "or is_coordinator_con", self.connection.connection.is_coordinator_con()
+            return
+# _2fastbt
+
+        if len(self.active_requests) >= self._backlog(new_unchoke):
+            if not (self.active_requests or self.backlog) and not self.choked:
+                self.downloader.queued_out[self] = 1
+            if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: returned"
+            return
+# 2fastbt_
+        want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests and (self.helper is None or self.connection.connection.is_helper_con() or not self.helper.is_ignored(a[0]))]
+# _2fastbt
+        if not (self.active_requests or want):
+            self.send_not_interested()
+            if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: not interested"
+            return
+        if want:
+            self.send_interested()
+        if self.choked:
+            if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: choked"
+            return
+        shuffle(want)
+        del want[self.backlog - len(self.active_requests):]
+        self.active_requests.extend(want)
+        for piece, begin, length in want:
+# 2fastbt_
+            if self.helper is None or self.connection.connection.is_helper_con() or self.helper.reserve_piece(piece,self):
+                self.connection.send_request(piece, begin, length)
+                self.downloader.chunk_requested(length)
+# _2fastbt
+
+    def got_have(self, index):
+#        print >>sys.stderr,"Downloader: got_have",index
+        if DEBUG:
+            print >>sys.stderr,"Downloader: got_have",index
+        if index == self.downloader.numpieces-1:
+            self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
+            self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
+        else:
+            self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length)
+            self.peermeasure.update_rate(self.downloader.storage.piece_length)
+
+        # Arno: LIVEWRAP
+        if not self.downloader.picker.is_valid_piece(index):
+            if DEBUG:
+                print >>sys.stderr,"Downloader: got_have",index,"is invalid piece"
+            return # TODO: should we request_more()? 
+        
+        if self.have[index]:
+            return
+        
+        self.have[index] = True
+        self.downloader.picker.got_have(index,self.connection)
+        # ProxyService_
+        #
+        # Aggregate the haves bitfields and send them to the coordinator
+        # If I am a coordinator, i will exit shortly
+        self.downloader.aggregate_and_send_haves()
+        #
+        # _ProxyService
+        
+        if self.have.complete():
+            self.downloader.picker.became_seed()
+            if self.downloader.picker.am_I_complete():
+                self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+                self.connection.close()
+                return
+        if self.downloader.endgamemode:
+            self.fix_download_endgame()
+        elif ( not self.downloader.paused
+               and not self.downloader.picker.is_blocked(index)
+               and self.downloader.storage.do_I_have_requests(index) ):
+            if not self.choked:
+                self._request_more()
+            else:
+                self.send_interested()
+
+    def _check_interests(self):
+        if self.interested or self.downloader.paused:
+            return
+        for i in xrange(len(self.have)):
+            if ( self.have[i] and not self.downloader.picker.is_blocked(i)
+                 and ( self.downloader.endgamemode
+                       or self.downloader.storage.do_I_have_requests(i) ) ):
+                self.send_interested()
+                return
+
+    def got_have_bitfield(self, have):
+        if self.downloader.picker.am_I_complete() and have.complete():
+            # Arno: If we're both seeds
+            if self.downloader.super_seeding:
+                self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too
+            self.connection.close()
+            self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+            return
+
+        if DEBUGBF:
+            st = time.time()
+
+        if have.complete():
+            # Arno: He is seed
+            self.downloader.picker.got_seed()
+        else:
+            # Arno: pass on HAVE knowledge to PiecePicker and if LIVEWRAP: 
+            # filter out valid pieces
+            
+            # STBSPEED: if we haven't hooked in yet, don't iterate over whole range
+            # just over the active ranges in the received Bitfield
+            activerangeiterators = []
+            if self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming and self.downloader.picker.videostatus.get_live_startpos() is None:
+                # Not hooked in
+                activeranges = have.get_active_ranges()
+                
+                if len(activeranges) == 0:
+                    # Bug, fallback to whole range
+                    activerangeiterators = [self.downloader.picker.get_valid_range_iterator()]
+                else:
+                    # Create iterators for the active ranges
+                    for (s,e) in activeranges:
+                        activerangeiterators.append(xrange(s,e+1))
+            else:
+                # Hooked in, use own valid range as active range
+
+                # Arno, 2010-04-20: Not correct for VOD with seeking, then we
+                # should store the HAVE info for things before playback too.
+                
+                activerangeiterators = [self.downloader.picker.get_valid_range_iterator()]
+
+            if DEBUGBF:
+                print >>sys.stderr,"Downloader: got_have_field: live: Filtering bitfield",activerangeiterators 
+
+            if not self.downloader.picker.videostatus or self.downloader.picker.videostatus.live_streaming:
+                if DEBUGBF:
+                    print >>sys.stderr,"Downloader: got_have_field: live or normal filter"
+                # Transfer HAVE knowledge to PiecePicker and filter pieces if live
+                validhave = Bitfield(self.downloader.numpieces)
+                for iterator in activerangeiterators:
+                    for i in iterator:
+                        if have[i]:
+                            validhave[i] = True
+                            self.downloader.picker.got_have(i,self.connection)
+            else: # VOD
+                if DEBUGBF:
+                    print >>sys.stderr,"Downloader: got_have_field: VOD filter" 
+                validhave = Bitfield(self.downloader.numpieces)
+                (first,last) = self.downloader.picker.videostatus.download_range()
+                for i in xrange(first,last):
+                    if have[i]:
+                        validhave[i] = True
+                        self.downloader.picker.got_have(i,self.connection)
+            # ProxyService_
+            #
+            # Aggregate the haves bitfields and send them to the coordinator
+            # ARNOPS: Shouldn't this be done after have = validhave?
+            self.downloader.aggregate_and_send_haves()
+            #
+            # _ProxyService
+
+            """
+            # SANITY CHECK
+            checkhave = Bitfield(self.downloader.numpieces)
+            for i in self.downloader.picker.get_valid_range_iterator():
+                if have[i]:
+                    checkhave[i] = True
+
+            assert validhave.tostring() == checkhave.tostring()
+            """
+                    
+            # Store filtered bitfield instead of received one
+            have = validhave
+
+        if DEBUGBF:
+            et = time.time()
+            diff = et - st
+            print >>sys.stderr,"Download: got_have_field: took",diff
+
+                
+        self.have = have
+        
+        #print >>sys.stderr,"Downloader: got_have_bitfield: valid",`have.toboollist()`
+                    
+        if self.downloader.endgamemode and not self.downloader.paused:
+            for piece, begin, length in self.downloader.all_requests:
+                if self.have[piece]:
+                    self.send_interested()
+                    break
+            return
+        self._check_interests()
+
+    def get_rate(self):
+        return self.measure.get_rate()
+
+    def get_short_term_rate(self):
+        return self.short_term_measure.get_rate()
+
+    def is_snubbed(self):
+# 2fastbt_
+        if not self.choked and clock() - self.last2 > self.downloader.snub_time and \
+            not self.connection.connection.is_helper_con() and \
+            not self.connection.connection.is_coordinator_con():
+# _2fastbt
+            for index, begin, length in self.active_requests:
+                self.connection.send_cancel(index, begin, length)
+            self.got_choke()    # treat it just like a choke
+        return clock() - self.last > self.downloader.snub_time
+
+    def peer_is_complete(self):
+        return self.have.complete()
+
+class Downloader:
+    def __init__(self, infohash, storage, picker, backlog, max_rate_period,
+                 numpieces, chunksize, measurefunc, snub_time,
+                 kickbans_ok, kickfunc, banfunc, scheduler = None):
+        self.infohash = infohash
+        self.b64_infohash = b64encode(infohash)
+        self.storage = storage
+        self.picker = picker
+        self.backlog = backlog
+        self.max_rate_period = max_rate_period
+        self.measurefunc = measurefunc
+        self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size)
+        self.numpieces = numpieces
+        self.chunksize = chunksize
+        self.snub_time = snub_time
+        self.kickfunc = kickfunc
+        self.banfunc = banfunc
+        self.disconnectedseeds = {}
+        self.downloads = []
+        self.perip = {}
+        self.gotbaddata = {}
+        self.kicked = {}
+        self.banned = {}
+        self.kickbans_ok = kickbans_ok
+        self.kickbans_halted = False
+        self.super_seeding = False
+        self.endgamemode = False
+        self.endgame_queued_pieces = []
+        self.all_requests = []
+        self.discarded = 0L
+        self.download_rate = 0
+#        self.download_rate = 25000  # 25K/s test rate
+        self.bytes_requested = 0
+        self.last_time = clock()
+        self.queued_out = {}
+        self.requeueing = False
+        self.paused = False
+        self.scheduler = scheduler
+
+        # hack: we should not import this since it is not part of the
+        # core nor should we import here, but otherwise we will get
+        # import errors
+        #
+        # _event_reporter stores events that are logged somewhere...
+        # from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
+        # self._event_reporter = get_reporter_instance()
+        self._event_reporter = get_status_holder("LivingLab")
+
+        # check periodicaly
+        self.scheduler(self.dlr_periodic_check, 1)
+
+    def dlr_periodic_check(self):
+        self.picker.check_outstanding_requests(self.downloads)
+
+        ds = [d for d in self.downloads if not d.choked]
+        shuffle(ds)
+        for d in ds:
+            d._request_more()
+
+        self.scheduler(self.dlr_periodic_check, 1)
+
+    def set_download_rate(self, rate):
+        self.download_rate = rate * 1000
+        self.bytes_requested = 0
+        
+    def queue_limit(self):
+        if not self.download_rate:
+            return 10e10    # that's a big queue!
+        t = clock()
+        self.bytes_requested -= (t - self.last_time) * self.download_rate
+        self.last_time = t
+        if not self.requeueing and self.queued_out and self.bytes_requested < 0:
+            self.requeueing = True
+            q = self.queued_out.keys()
+            shuffle(q)
+            self.queued_out = {}
+            for d in q:
+                d._request_more()
+            self.requeueing = False
+        if -self.bytes_requested > 5*self.download_rate:
+            self.bytes_requested = -5*self.download_rate
+        ql = max(int(-self.bytes_requested/self.chunksize), 0)
+        # if DEBUG:
+        #     print >> sys.stderr, 'Downloader: download_rate: %s, bytes_requested: %s, chunk: %s -> queue limit: %d' % \
+        #         (self.download_rate, self.bytes_requested, self.chunksize, ql)
+        return ql
+
+    def chunk_requested(self, size):
+        self.bytes_requested += size
+
+    external_data_received = chunk_requested
+
+    def make_download(self, connection):
+        ip = connection.get_ip()
+        if self.perip.has_key(ip):
+            perip = self.perip[ip]
+        else:
+            perip = self.perip.setdefault(ip, PerIPStats(ip))
+        perip.peerid = connection.get_readable_id()
+        perip.numconnections += 1
+        d = SingleDownload(self, connection)
+        perip.lastdownload = d
+        self.downloads.append(d)
+        self._event_reporter.create_and_add_event("connection-established", [self.b64_infohash, str(ip)])
+        return d
+
+    def piece_flunked(self, index):
+        if self.paused:
+            return
+        if self.endgamemode:
+            if self.downloads:
+                while self.storage.do_I_have_requests(index):
+                    nb, nl = self.storage.new_request(index)
+                    self.all_requests.append((index, nb, nl))
+                for d in self.downloads:
+                    d.fix_download_endgame()
+                return
+            self._reset_endgame()
+            return
+        ds = [d for d in self.downloads if not d.choked]
+        shuffle(ds)
+        for d in ds:
+            d._request_more()
+        ds = [d for d in self.downloads if not d.interested and d.have[index]]
+        for d in ds:
+            d.example_interest = index
+            d.send_interested()
+
+    def has_downloaders(self):
+        return len(self.downloads)
+
+    def lost_peer(self, download):
+        ip = download.ip
+        self.perip[ip].numconnections -= 1
+        if self.perip[ip].lastdownload == download:
+            self.perip[ip].lastdownload = None
+        self.downloads.remove(download)
+        if self.endgamemode and not self.downloads: # all peers gone
+            self._reset_endgame()
+
+        self._event_reporter.create_and_add_event("connection-upload", [self.b64_infohash, ip, download.connection.total_uploaded])
+        self._event_reporter.create_and_add_event("connection-download", [self.b64_infohash, ip, download.connection.total_downloaded])
+        self._event_reporter.create_and_add_event("connection-lost", [self.b64_infohash, ip])
+        
+    def _reset_endgame(self):            
+        if DEBUG: print >>sys.stderr, "Downloader: _reset_endgame"
+        self.storage.reset_endgame(self.all_requests)
+        self.endgamemode = False
+        self.all_requests = []
+        self.endgame_queued_pieces = []
+
+    def add_disconnected_seed(self, id):
+#        if not self.disconnectedseeds.has_key(id):
+#            self.picker.seed_seen_recently()
+        self.disconnectedseeds[id]=clock()
+
+#   def expire_disconnected_seeds(self):
+
+    def num_disconnected_seeds(self):
+        # first expire old ones
+        expired = []
+        for id, t in self.disconnectedseeds.items():
+            if clock() - t > EXPIRE_TIME:     #Expire old seeds after so long
+                expired.append(id)
+        for id in expired:
+#            self.picker.seed_disappeared()
+            del self.disconnectedseeds[id]
+        return len(self.disconnectedseeds)
+        # if this isn't called by a stats-gathering function
+        # it should be scheduled to run every minute or two.
+
+    def _check_kicks_ok(self):
+        if len(self.gotbaddata) > 10:
+            self.kickbans_ok = False
+            self.kickbans_halted = True
+        return self.kickbans_ok and len(self.downloads) > 2
+
+    def try_kick(self, download):
+        if self._check_kicks_ok():
+            download.guard.download = None
+            ip = download.ip
+            id = download.connection.get_readable_id()
+            self.kicked[ip] = id
+            self.perip[ip].peerid = id
+            self.kickfunc(download.connection)
+        
+    def try_ban(self, ip):
+        if self._check_kicks_ok():
+            self.banfunc(ip)
+            self.banned[ip] = self.perip[ip].peerid
+            if self.kicked.has_key(ip):
+                del self.kicked[ip]
+
+    def set_super_seed(self):
+        self.super_seeding = True
+
+    def check_complete(self, index):
+        if self.endgamemode and not self.all_requests:
+            self.endgamemode = False
+        if self.endgame_queued_pieces and not self.endgamemode:
+            self.requeue_piece_download()
+        if self.picker.am_I_complete():
+            assert not self.all_requests
+            assert not self.endgamemode
+
+            for download in self.downloads:
+                if download.have.complete():
+                    download.connection.send_have(index)   # be nice, tell the other seed you completed
+                    self.add_disconnected_seed(download.connection.get_readable_id())
+                    download.connection.close()
+
+                    self._event_reporter.create_and_add_event("connection-seed", [self.b64_infohash, download.ip, download.connection.total_uploaded])
+                else:
+                    self._event_reporter.create_and_add_event("connection-upload", [self.b64_infohash, download.ip, download.connection.total_uploaded])
+                    self._event_reporter.create_and_add_event("connection-download", [self.b64_infohash, download.ip, download.connection.total_downloaded])
+
+            self._event_reporter.create_and_add_event("complete", [self.b64_infohash])
+            # self._event_reporter.flush()
+                    
+            return True
+        return False
+
+    def too_many_partials(self):
+        return len(self.storage.dirty) > (len(self.downloads)/2)
+
+    def cancel_requests(self, requests, allowrerequest=True):
+
+        # todo: remove duplicates
+        slowpieces = [piece_id for piece_id, _, _ in requests]
+
+        if self.endgamemode:
+            if self.endgame_queued_pieces:
+                for piece_id, _, _ in requests:
+                    if not self.storage.do_I_have(piece_id):
+                        try:
+                            self.endgame_queued_pieces.remove(piece_id)
+                        except:
+                            pass
+
+            # remove the items in requests from self.all_requests
+            if not allowrerequest:
+                self.all_requests = [request for request in self.all_requests if not request in requests]
+                if DEBUG: print >>sys.stderr, "Downloader: cancel_requests: all_requests", len(self.all_requests), "remaining"
+
+        for download in self.downloads:
+            hit = False
+            for request in download.active_requests:
+                if request in requests:
+                    hit = True
+                    if DEBUG: print >>sys.stderr, "Downloader:cancel_requests: canceling", request, "on", download.ip
+                    download.connection.send_cancel(*request)
+                    if not self.endgamemode:
+                        self.storage.request_lost(*request)
+            if hit:
+                download.active_requests = [request for request in download.active_requests if not request in requests]
+                # Arno: VOD: all these peers were slow for their individually 
+                # assigned pieces. These pieces have high priority, so don't
+                # retrieve any of theses pieces from these slow peers, just
+                # give them something further in the future.
+                if allowrerequest:
+                    download._request_more()
+                else:
+                    # Arno: ALT is to just kick peer. Good option if we have lots (See Encryper.to_connect() queue
+                    #print >>sys.stderr,"Downloader: Kicking slow peer",d.ip
+                    #d.connection.close() # bye bye, zwaai zwaai
+                    download._request_more(slowpieces=slowpieces)
+
+            if not self.endgamemode and download.choked:
+                download._check_interests()
+
+    def cancel_piece_download(self, pieces, allowrerequest=True):
+        if self.endgamemode:
+            if self.endgame_queued_pieces:
+                for piece in pieces:
+                    try:
+                        self.endgame_queued_pieces.remove(piece)
+                    except:
+                        pass
+
+            if allowrerequest:
+                for index, nb, nl in self.all_requests:
+                    if index in pieces:
+                        self.storage.request_lost(index, nb, nl)
+
+            else:
+                new_all_requests = []
+                for index, nb, nl in self.all_requests:
+                    if index in pieces:
+                        self.storage.request_lost(index, nb, nl)
+                    else:
+                        new_all_requests.append((index, nb, nl))
+                self.all_requests = new_all_requests
+                if DEBUG: print >>sys.stderr, "Downloader: cancel_piece_download: all_requests", len(self.all_requests), "remaining"
+
+        for d in self.downloads:
+            hit = False
+            for index, nb, nl in d.active_requests:
+                if index in pieces:
+                    hit = True
+                    d.connection.send_cancel(index, nb, nl)
+                    if not self.endgamemode:
+                        self.storage.request_lost(index, nb, nl)
+            if hit:
+                d.active_requests = [ r for r in d.active_requests
+                                      if r[0] not in pieces ]
+                # Arno: VOD: all these peers were slow for their individually 
+                # assigned pieces. These pieces have high priority, so don't
+                # retrieve any of theses pieces from these slow peers, just
+                # give them something further in the future.
+                if not allowrerequest:
+                    # Arno: ALT is to just kick peer. Good option if we have lots (See Encryper.to_connect() queue
+                    #print >>sys.stderr,"Downloader: Kicking slow peer",d.ip
+                    #d.connection.close() # bye bye, zwaai zwaai
+                    d._request_more(slowpieces=pieces)
+                else:
+                    d._request_more()
+            if not self.endgamemode and d.choked:
+                d._check_interests()
+
+    def requeue_piece_download(self, pieces = []):
+        if self.endgame_queued_pieces:
+            for piece in pieces:
+                if not piece in self.endgame_queued_pieces:
+                    self.endgame_queued_pieces.append(piece)
+            pieces = self.endgame_queued_pieces
+        if self.endgamemode:
+            if self.all_requests:
+                self.endgame_queued_pieces = pieces
+                return
+            self.endgamemode = False
+            self.endgame_queued_pieces = None
+           
+        ds = [d for d in self.downloads]
+        shuffle(ds)
+        for d in ds:
+            if d.choked:
+                d._check_interests()
+            else:
+                d._request_more()
+
+    def start_endgame(self):
+        assert not self.endgamemode
+        self.endgamemode = True
+        assert not self.all_requests
+        for d in self.downloads:
+            if d.active_requests:
+                assert d.interested and not d.choked
+            for request in d.active_requests:
+                assert not request in self.all_requests
+                self.all_requests.append(request)
+        for d in self.downloads:
+            d.fix_download_endgame()
+        if DEBUG: print >>sys.stderr, "Downloader: start_endgame: we have", len(self.all_requests), "requests remaining"
+
+    def pause(self, flag):
+        self.paused = flag
+        if flag:
+            for d in self.downloads:
+                for index, begin, length in d.active_requests:
+                    d.connection.send_cancel(index, begin, length)
+                d._letgo()
+                d.send_not_interested()
+            if self.endgamemode:
+                self._reset_endgame()
+        else:
+            shuffle(self.downloads)
+            for d in self.downloads:
+                d._check_interests()
+                if d.interested and not d.choked:
+                    d._request_more()
+
+    def live_invalidate(self,piece,mevirgin=False): # Arno: LIVEWRAP
+        #print >>sys.stderr,"Downloader: live_invalidate",piece
+        for d in self.downloads:
+            d.have[piece] = False
+        # STBSPEED: If I have no pieces yet, no need to loop to invalidate them.
+        if not mevirgin:
+            self.storage.live_invalidate(piece)
+        
+    def live_invalidate_ranges(self,toinvalidateranges,toinvalidateset):
+        """ STBPEED: Faster version of live_invalidate that copies have arrays
+        rather than iterate over them for clearing
+        """
+        if len(toinvalidateranges) == 1:
+            (s,e) = toinvalidateranges[0]
+            emptyrange = [False for piece in xrange(s,e+1)]
+            assert len(emptyrange) == e+1-s
+            
+            for d in self.downloads:
+                newhave = d.have[0:s] + emptyrange + d.have[e+1:]
+
+                #oldhave = d.have
+                d.have = Bitfield(length=len(newhave),fromarray=newhave)
+                #assert oldhave.tostring() == d.have.tostring()
+                """
+                for piece in toinvalidateset:
+                    d.have[piece] = False
+                print >>sys.stderr,"d len",len(d.have)
+                print >>sys.stderr,"new len",len(newhave)
+                    
+                for i in xrange(0,len(newhave)):
+                    if d.have[i] != newhave[i]:
+                        print >>sys.stderr,"newhave diff",i
+                        assert False
+                """
+                
+        else:
+            (s1,e1) = toinvalidateranges[0]
+            (s2,e2) = toinvalidateranges[1]
+            emptyrange1 = [False for piece in xrange(s1,e1+1)]
+            emptyrange2 = [False for piece in xrange(s2,e2+1)]
+            
+            assert len(emptyrange1) == e1+1-s1
+            assert len(emptyrange2) == e2+1-s2
+            
+            for d in self.downloads:
+                newhave = emptyrange1 + d.have[e1+1:s2] + emptyrange2
+                
+                #oldhave = d.have
+                d.have = Bitfield(length=len(newhave),fromarray=newhave)
+                #assert oldhave.tostring() == d.have.tostring()
+                """
+                for piece in toinvalidateset:
+                    d.have[piece] = False
+                print >>sys.stderr,"d len",len(d.have)
+                print >>sys.stderr,"new len",len(newhave)
+                for i in xrange(0,len(newhave)):
+                    if d.have[i] != newhave[i]:
+                        print >>sys.stderr,"newhave diff",i
+                        assert False
+                """
+                
+    # ProxyService_
+    #
+    def aggregate_and_send_haves(self):
+        """ Aggregates the information from the haves bitfields for all the active connections,
+        then calls the helper class to send the aggregated information as a PROXY_HAVE message 
+        """
+        if self.picker.helper:
+            # The current node is a coordinator
+            if DEBUG:
+                print >> sys.stderr,"Downloader: aggregate_and_send_haves: helper None or helper conn"
+            
+            # haves_vector is a matrix, having on each line a Bitfield
+            haves_vector = [None] * len(self.downloads)
+            for i in range(0, len(self.downloads)):
+                haves_vector[i] = self.downloads[i].have
+            
+            #Calculate the aggregated haves
+            aggregated_haves = Bitfield(self.numpieces)
+            for piece in range (0, self.numpieces):
+                aggregated_value = False
+                # For every column in the haves_vector matrix
+                for d in range(0, len(self.downloads)):
+                    # For every active connection
+                    aggregated_value = aggregated_value or haves_vector[d][piece] # Logical OR operation 
+                aggregated_haves[piece] = aggregated_value
+            
+            self.picker.helper.send_proxy_have(aggregated_haves)
+    #
+    # _ProxyService
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/DownloaderFeedback.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/DownloaderFeedback.py
new file mode 100644 (file)
index 0000000..b5804b6
--- /dev/null
@@ -0,0 +1,224 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from threading import Event
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class DownloaderFeedback:
+    def __init__(self, choker, ghttpdl, hhttpdl, add_task, upfunc, downfunc,
+            ratemeasure, leftfunc, file_length, finflag, sp, statistics,
+            statusfunc = None, interval = None, infohash = None, voddownload=None):
+        self.choker = choker
+        self.ghttpdl = ghttpdl
+        self.hhttpdl = hhttpdl
+        self.add_task = add_task
+        self.upfunc = upfunc
+        self.downfunc = downfunc
+        self.ratemeasure = ratemeasure
+        self.leftfunc = leftfunc
+        self.file_length = file_length
+        self.finflag = finflag
+        self.sp = sp
+        self.statistics = statistics
+        self.lastids = []
+        self.spewdata = None
+        self.infohash = infohash
+        self.voddownload = voddownload
+        self.doneprocessing = Event()
+        self.doneprocessing.set()
+        if statusfunc:
+            self.autodisplay(statusfunc, interval)
+        
+
+    def _rotate(self):
+        cs = self.choker.connections
+        for id in self.lastids:
+            for i in xrange(len(cs)):
+                if cs[i].get_id() == id:
+                    return cs[i:] + cs[:i]
+        return cs
+
+    def spews(self):
+        l = []
+        cs = self._rotate()
+        self.lastids = [c.get_id() for c in cs]
+        for c in cs:    # c: Connecter.Connection
+            a = {}
+            a['id'] = c.get_readable_id()
+            a['ip'] = c.get_ip()
+            if c.is_locally_initiated():
+                a['port'] = c.get_port()
+            else:
+                a['port'] = 0
+            try:
+                a['optimistic'] = (c is self.choker.connections[0])
+            except:
+                a['optimistic'] = False
+            if c.is_locally_initiated():
+                a['direction'] = 'L'
+            else:
+                a['direction'] = 'R'
+            ##a['unauth_permid'] = c.get_unauth_permid()
+            u = c.get_upload()
+            a['uprate'] = int(u.measure.get_rate())
+            a['uinterested'] = u.is_interested()
+            a['uchoked'] = u.is_choked()
+            d = c.get_download()
+            a['downrate'] = int(d.measure.get_rate())
+            a['dinterested'] = d.is_interested()
+            a['dchoked'] = d.is_choked()
+            a['snubbed'] = d.is_snubbed()
+            a['utotal'] = d.connection.upload.measure.get_total()
+            a['dtotal'] = d.connection.download.measure.get_total()
+            if d.connection.download.have:
+                a['completed'] = float(len(d.connection.download.have)-d.connection.download.have.numfalse)/float(len(d.connection.download.have))
+            else:
+                a['completed'] = 1.0
+            # The total download speed of the peer as measured from its
+            # HAVE messages.
+            a['speed'] = d.connection.download.peermeasure.get_rate()
+            a['g2g'] = c.use_g2g
+            a['g2g_score'] = c.g2g_score()
+
+            # RePEX: include number of pex messages in the stats
+            a['pex_received'] = c.pex_received 
+            
+            l.append(a)                                               
+
+        for dl in self.ghttpdl.get_downloads():
+            if dl.goodseed:
+                a = {}
+                a['id'] = 'url list'
+                a['ip'] = dl.baseurl
+                a['optimistic'] = False
+                a['direction'] = 'L'
+                a['uprate'] = 0
+                a['uinterested'] = False
+                a['uchoked'] = False
+                a['downrate'] = int(dl.measure.get_rate())
+                a['dinterested'] = True
+                a['dchoked'] = not dl.active
+                a['snubbed'] = not dl.active
+                a['utotal'] = None
+                a['dtotal'] = dl.measure.get_total()
+                a['completed'] = 1.0
+                a['speed'] = None
+
+                l.append(a)
+        for dl in self.hhttpdl.get_downloads():
+            if dl.goodseed:
+                a = {}
+                a['id'] = 'http seed'
+                a['ip'] = dl.baseurl
+                a['optimistic'] = False
+                a['direction'] = 'L'
+                a['uprate'] = 0
+                a['uinterested'] = False
+                a['uchoked'] = False
+                a['downrate'] = int(dl.measure.get_rate())
+                a['dinterested'] = True
+                a['dchoked'] = not dl.active
+                a['snubbed'] = not dl.active
+                a['utotal'] = None
+                a['dtotal'] = dl.measure.get_total()
+                a['completed'] = 1.0
+                a['speed'] = None
+
+                l.append(a)
+        return l
+
+
+    def gather(self, displayfunc = None, getpeerlist=False):
+        """ Called by SingleDownload to obtain download statistics to become the
+        DownloadStates for each Download """
+        s = {'stats': self.statistics.update()}
+        if getpeerlist:
+            s['spew'] = self.spews()
+        else:
+            s['spew'] = None
+        s['up'] = self.upfunc()
+        if self.finflag.isSet():
+            s['done'] = self.file_length
+            s['down'] = 0.0
+            s['frac'] = 1.0
+            s['wanted'] = 0
+            s['time'] = 0
+            s['vod'] = False
+            s['vod_prebuf_frac'] = 1.0
+            s['vod_playable'] = True
+            s['vod_playable_after'] = 0.0
+            s['vod_stats'] = {'harry':1}
+            if self.voddownload is not None:
+                #s['vod'] = True
+                s['vod_stats'] = self.voddownload.get_stats()
+
+#            if self.voddownload:
+#                s['vod_duration'] = self.voddownload.get_duration()
+#            else:
+#                s['vod_duration'] = None
+            return s
+        s['down'] = self.downfunc()
+        obtained, desired, have = self.leftfunc()
+        s['done'] = obtained
+        s['wanted'] = desired
+        if desired > 0:
+            s['frac'] = float(obtained)/desired
+        else:
+            s['frac'] = 1.0
+        if desired == obtained:
+            s['time'] = 0
+        else:
+            s['time'] = self.ratemeasure.get_time_left(desired-obtained)
+            
+        if self.voddownload is not None:
+            s['vod_prebuf_frac'] = self.voddownload.get_prebuffering_progress()
+            s['vod_playable'] = self.voddownload.is_playable()
+            s['vod_playable_after'] = self.voddownload.get_playable_after()
+            s['vod'] = True
+            s['vod_stats'] = self.voddownload.get_stats()
+#            s['vod_duration'] = self.voddownload.get_duration()
+        else:
+            s['vod_prebuf_frac'] = 0.0
+            s['vod_playable'] = False
+            s['vod_playable_after'] = float(2 ** 31)
+            s['vod'] = False
+            s['vod_stats'] = {}
+#            s['vod_duration'] = None
+        return s        
+
+
+    def display(self, displayfunc):
+        if not self.doneprocessing.isSet():
+            return
+        self.doneprocessing.clear()
+        stats = self.gather()
+        if self.finflag.isSet():
+            displayfunc(dpflag = self.doneprocessing, 
+                upRate = stats['up'], 
+                statistics = stats['stats'], spew = stats['spew'])
+        elif stats['time'] is not None:
+            displayfunc(dpflag = self.doneprocessing, 
+                fractionDone = stats['frac'], sizeDone = stats['done'], 
+                downRate = stats['down'], upRate = stats['up'], 
+                statistics = stats['stats'], spew = stats['spew'], 
+                timeEst = stats['time'])
+        else:
+            displayfunc(dpflag = self.doneprocessing, 
+                fractionDone = stats['frac'], sizeDone = stats['done'], 
+                downRate = stats['down'], upRate = stats['up'], 
+                statistics = stats['stats'], spew = stats['spew'])
+
+
+    def autodisplay(self, displayfunc, interval):
+        self.displayfunc = displayfunc
+        self.interval = interval
+        self._autodisplay()
+
+    def _autodisplay(self):
+        self.add_task(self._autodisplay, self.interval)
+        self.display(self.displayfunc)
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Encrypter.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Encrypter.py
new file mode 100644 (file)
index 0000000..396e59f
--- /dev/null
@@ -0,0 +1,771 @@
+# Written by Bram Cohen, Pawel Garbacki, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+from base64 import b64encode
+from cStringIO import StringIO
+from binascii import b2a_hex
+from socket import error as socketerror
+from urllib import quote
+from struct import unpack
+from time import time
+from sets import Set
+from traceback import print_exc
+
+from BaseLib.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern
+from BaseLib.Core.BitTornado.BT1.convert import toint
+from BaseLib.Core.Statistics.Status.Status import get_status_holder
+from BaseLib.Core.ProxyService.ProxyServiceUtil import *
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+if sys.platform == 'win32':
+    # Arno: On windows XP SP2 there is a limit on "the number of concurrent, 
+    # incomplete outbound TCP connection attempts. When the limit is reached, 
+    # subsequent connection attempts are put in a queue and resolved at a fixed 
+    # rate so that there are only a limited number of connections in the 
+    # incomplete state. During normal operation, when programs are connecting 
+    # to available hosts at valid IP addresses, no limit is imposed on the 
+    # number of connections in the incomplete state. When the number of 
+    # incomplete connections exceeds the limit, for example, as a result of 
+    # programs connecting to IP addresses that are not valid, connection-rate 
+    # limitations are invoked, and this event is logged." 
+    # Source: http://go.microsoft.com/fwlink/events.asp and fill in 
+    # Product: "Windos Operating System"
+    # Event: 4226
+    # Which directs to:
+    # http://www.microsoft.com/technet/support/ee/transform.aspx?ProdName=Windows%20Operating%20System&ProdVer=5.2&EvtID=4226&EvtSrc=Tcpip&LCID=1033
+    #
+    # The ABC/BitTornado people felt the need to therefore impose a rate limit
+    # themselves. Normally, I would be against this, because the kernel usually
+    # does a better job at this than some app programmers. But here it makes 
+    # somewhat sense because it appears that when the Win32 "connection-rate 
+    # limitations" are triggered, this causes socket timeout
+    # errors. For ABC/BitTornado this should not be a big problem, as none of 
+    # the TCP connections it initiates are really vital that they proceed 
+    # quickly.
+    #
+    # For Tribler, we have one very important TCP connection at the moment,
+    # that is when the VideoPlayer/VLC tries to connect to our HTTP-based
+    # VideoServer on 127.0.0.1 to play the video. We have actually seen these
+    # connections timeout when we set MAX_INCOMPLETE to > 10.
+    #
+    # So we keep this app-level rate limit mechanism FOR NOW and add a security
+    # margin. To support our SwarmPlayer that wants quick startup of many
+    # connections we decrease the autoclosing timeout, such that bad conns
+    # get removed from this rate-limit admin faster. 
+    #
+    # Windows die die die.
+    #
+    # Update, 2009-10-21:
+    # This limiting has been disabled starting Vista SP2 and beyond:
+    # http://support.microsoft.com/kb/969710
+    #
+    # Go Vista?! 
+    #
+
+    # [E1101] Module 'sys' has no 'getwindowsversion' member
+    # pylint: disable-msg=E1101
+    winvertuple = sys.getwindowsversion()
+    # pylint: enable-msg=E1101
+    spstr = winvertuple[4]
+    if winvertuple[0] == 5 or winvertuple[0] == 6 and spstr < "Service Pack 2":
+        MAX_INCOMPLETE = 8 # safety margin. Even 9 gives video socket timeout, 10 is official limit
+    else:
+        MAX_INCOMPLETE = 1024 # inf
+else:
+    MAX_INCOMPLETE = 32
+
+AUTOCLOSE_TIMEOUT = 15 # secs. Setting this to e.g. 7 causes Video HTTP timeouts
+
+def make_readable(s):
+    if not s:
+        return ''
+    if quote(s).find('%') >= 0:
+        return b2a_hex(s).upper()
+    return '"'+s+'"'
+
+def show(s):
+    return b2a_hex(s)
+
+class IncompleteCounter:
+    def __init__(self):
+        self.c = 0
+    def increment(self):
+        self.c += 1
+    def decrement(self):
+        #print_stack()
+        self.c -= 1
+    def toomany(self):
+        #print >>sys.stderr,"IncompleteCounter: c",self.c
+        return self.c >= MAX_INCOMPLETE
+
+# Arno: This is a global counter!!!!
+incompletecounter = IncompleteCounter()
+
+
+# header, reserved, download id, my id, [length, message]
+
+class Connection:
+# 2fastbt_
+    def __init__(self, Encoder, connection, id, ext_handshake = False, 
+                  locally_initiated = None, dns = None, coord_con = False, challenge = None):
+# _2fastbt
+        self.Encoder = Encoder
+        self.connection = connection    # SocketHandler.SingleSocket
+        self.connecter = Encoder.connecter
+        self.id = id
+        self.readable_id = make_readable(id)
+        self.coord_con = coord_con
+        # Challenge sent by the coordinator to identify the Helper
+        self.challenge = challenge
+        if locally_initiated is not None:
+            self.locally_initiated = locally_initiated
+        elif coord_con:
+            self.locally_initiated = True
+        else:
+            self.locally_initiated = (id != None)
+# _2fastbt
+        self.complete = False
+        self.keepalive = lambda: None
+        self.closed = False
+        self.buffer = StringIO()
+# overlay        
+        self.dns = dns
+        self.support_extend_messages = False
+        self.connecter_conn = None
+# _overlay
+        self.support_merklehash= False
+        self.na_want_internal_conn_from = None
+        self.na_address_distance = None
+        
+        if self.locally_initiated:
+            incompletecounter.increment()
+# 2fastbt_
+        self.create_time = time()
+# _2fastbt
+        
+        if self.locally_initiated or ext_handshake:
+            if DEBUG:
+                print >>sys.stderr,"Encoder.Connection: writing protname + options + infohash"
+            self.connection.write(chr(len(protocol_name)) + protocol_name + 
+                option_pattern + self.Encoder.download_id)
+        if ext_handshake:
+            if DEBUG:
+                print >>sys.stderr,"Encoder.Connection: writing my peer-ID"
+            if coord_con:
+                # on the helper-coordinator BT communication a special peer id is used
+                proxy_peer_id = encode_challenge_in_peerid(self.Encoder.my_id, self.challenge)
+                self.connection.write(proxy_peer_id)
+            else:
+                self.connection.write(self.Encoder.my_id)
+            self.next_len, self.next_func = 20, self.read_peer_id
+        else:
+            self.next_len, self.next_func = 1, self.read_header_len
+        self.Encoder.raw_server.add_task(self._auto_close, AUTOCLOSE_TIMEOUT)
+        
+    def get_ip(self, real=False):
+        return self.connection.get_ip(real)
+
+    def get_port(self, real=False):
+        return self.connection.get_port(real)
+
+    def get_myip(self, real=False):
+        return self.connection.get_myip(real)
+    
+    def get_myport(self, real=False):
+        return self.connection.get_myport(real)
+
+    def get_id(self):
+        return self.id
+
+    def get_readable_id(self):
+        return self.readable_id
+
+    def is_locally_initiated(self):
+        return self.locally_initiated
+
+    def is_flushed(self):
+        return self.connection.is_flushed()
+
+    def supports_merklehash(self):
+        return self.support_merklehash
+
+    def supports_extend_messages(self):
+        return self.support_extend_messages
+
+    def set_options(self, s):
+# overlay_
+        r = unpack("B", s[5])
+        if r[0] & 0x10:    # left + 43 bit
+            self.support_extend_messages = True
+            if DEBUG:
+                print >>sys.stderr,"encoder: Peer supports EXTEND"
+        if r[0] & 0x20:    # left + 42 bit
+            self.support_merklehash= True
+            if DEBUG:
+                print >>sys.stderr,"encoder: Peer supports Merkle hashes"
+# _overlay
+
+    def read_header_len(self, s):
+        if ord(s) != len(protocol_name):
+            return None
+        return len(protocol_name), self.read_header
+
+    def read_header(self, s):
+        if s != protocol_name:
+            return None
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        if DEBUG:
+            print >>sys.stderr,"encoder: Reserved bits:", show(s)
+            print >>sys.stderr,"encoder: Reserved bits=", show(option_pattern)
+        self.set_options(s)
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if s != self.Encoder.download_id:
+            return None
+        if not self.locally_initiated:
+            self.Encoder.connecter.external_connection_made += 1
+            if self.coord_con:
+                # on the helper-coordinator BT communication a special peer id is used
+                proxy_peer_id = encode_challenge_in_peerid(self.Encoder.my_id, self.challenge)
+                self.connection.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id + proxy_peer_id)
+            else:
+                self.connection.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id + self.Encoder.my_id)
+
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+# 2fastbt_
+        """ In the scenario of locally initiating: 
+        - I may or may not (normally not) get the remote peerid from a tracker before connecting. 
+        - If I've gotten the remote peerid, set it as self.id, otherwise set self.id as 0.
+        - I send handshake message without my peerid.
+        - After I received peer's handshake message, if self.id isn't 0 (i.e., I had the remote peerid), 
+        check the remote peerid, otherwise set self.id as the remote id. If the check is failed, drop the connection.
+        - Then I send self.Encoder.my_id to the remote peer. 
+        - The remote peer will record self.Encoder.id as my peerid.
+        - Anyway, self.id should be the same with the remote id if handshake is ok.
+        
+        Note self.Encoder.id is a unique id to each swarm I have. 
+        Normally self.id isn't equal to self.Encoder.my_id.
+        
+        In the scenario of remotely initiating:
+        - I don't have remote id
+        - I received the handshake message to join a swarm. 
+        - Before I read the remote id, I send my handshake with self.Encoder.my_id, my unique id of the swarm.
+        - I read the remote id and set it as my.id
+        
+        before read_peer_id(), self.id = 0 if locally init without remote id
+                               self.id = remote id if locally init with remote id
+                               self.id = None if remotely init
+        after read_peer_id(),  self.id = remote id if locally init
+                               self.id = remote id if remotely init
+        """
+# _2fastbt        
+        if DEBUG:
+            print >>sys.stderr,"Encoder.Connection: read_peer_id"
+
+        if not self.id:    # remote init or local init without remote peer's id or remote init
+            self.id = s
+            self.readable_id = make_readable(s)
+        else:    # local init with remote id
+            if s != self.id:
+                if DEBUG:
+                    print >>sys.stderr,"Encoder.Connection: read_peer_id: s != self.id, returning None"
+                return None
+        self.complete = self.Encoder.got_id(self)
+        
+        if DEBUG:
+            print >>sys.stderr,"Encoder.Connection: read_peer_id: complete is",self.complete
+        
+        
+        if not self.complete:
+            if DEBUG:
+                print >>sys.stderr,"Encoder.Connection: read_peer_id: self not complete!!!, returning None"
+            return None
+        if self.locally_initiated:
+            if self.coord_con:
+                # on the helper-coordinator BT communication a special peer id is used
+                proxy_peer_id = encode_challenge_in_peerid(self.Encoder.my_id, self.challenge)
+                self.connection.write(proxy_peer_id)
+            else:
+                self.connection.write(self.Encoder.my_id)
+            incompletecounter.decrement()
+            # Arno: open new conn from queue if at limit. Faster than RawServer task
+            self.Encoder._start_connection_from_queue(sched=False)
+            
+        c = self.Encoder.connecter.connection_made(self)
+        self.keepalive = c.send_keepalive
+        return 4, self.read_len
+
+    def read_len(self, s):
+        l = toint(s)
+        if l > self.Encoder.max_len:
+            return None
+        return l, self.read_message
+
+    def read_message(self, s):
+        if s != '':
+            self.connecter.got_message(self, s)
+        #else:
+        #    print >>sys.stderr,"encoder: got keepalive from",s.getpeername()
+        return 4, self.read_len
+
+    def read_dead(self, s):
+        return None
+
+    def _auto_close(self):
+        if not self.complete and not self.is_coordinator_con():
+            if DEBUG:
+                print >>sys.stderr,"encoder: autoclosing ",self.get_myip(),self.get_myport(),"to",self.get_ip(),self.get_port()
+
+            self.Encoder._event_reporter.create_and_add_event("connection-timeout", [b64encode(self.Encoder.connecter.infohash), self.get_ip(), self.get_port()])
+
+            # RePEX: inform repexer of timeout
+            repexer = self.Encoder.repexer
+            if repexer and not self.closed:
+                try:
+                    repexer.connection_timeout(self)
+                except:
+                    print_exc()
+            self.close()
+
+    def close(self,closeall=False):
+        if DEBUG:
+            print >>sys.stderr,"encoder: closing connection",self.get_ip()
+            #print_stack()
+        
+        if not self.closed:
+            self.connection.close()
+            self.sever(closeall=closeall)
+            
+
+    def sever(self,closeall=False):
+        self.closed = True
+        if self.Encoder.connections.has_key(self.connection):
+            self.Encoder.admin_close(self.connection)
+        
+        # RePEX: inform repexer of closed connection
+        repexer = self.Encoder.repexer
+        if repexer and not self.complete:
+            try:
+                repexer.connection_closed(self)
+            except:
+                print_exc()
+            
+        if self.complete:
+            self.connecter.connection_lost(self)
+        elif self.locally_initiated:
+            incompletecounter.decrement()
+            # Arno: open new conn from queue if at limit. Faster than RawServer task
+            if not closeall:
+                self.Encoder._start_connection_from_queue(sched=False)
+
+    def send_message_raw(self, message):
+        if not self.closed:
+            self.connection.write(message)    # SingleSocket
+
+    def data_came_in(self, connection, s):
+        self.Encoder.measurefunc(len(s))
+        while 1:
+            if self.closed:
+                return
+            i = self.next_len - self.buffer.tell()
+            if i > len(s):
+                self.buffer.write(s)
+                return
+            self.buffer.write(s[:i])
+            s = s[i:]
+            m = self.buffer.getvalue()
+            self.buffer.reset()
+            self.buffer.truncate()
+            try:
+                x = self.next_func(m)
+            except:
+                print_exc()
+                self.next_len, self.next_func = 1, self.read_dead
+                raise
+            if x is None:
+                if DEBUG:
+                    print >>sys.stderr,"encoder: function failed",self.next_func
+                self.close()
+                return
+            self.next_len, self.next_func = x
+
+    def connection_flushed(self, connection):
+        if self.complete:
+            self.connecter.connection_flushed(self)
+
+    def connection_lost(self, connection):
+        if self.Encoder.connections.has_key(connection):
+            self.sever()
+# 2fastbt_
+    def is_coordinator_con(self):
+        #if DEBUG:
+        #    print >>sys.stderr,"encoder: is_coordinator_con: coordinator is ",self.Encoder.coordinator_ip
+        if self.coord_con:
+            return True
+        elif self.get_ip() == self.Encoder.coordinator_ip and self.get_ip() != '127.0.0.1': # Arno: for testing
+            return True
+        else:
+            return False
+
+    def is_helper_con(self):
+        coordinator = self.connecter.coordinator
+        if coordinator is None:
+            return False
+        return coordinator.is_helper_ip(self.get_ip())
+# _2fastbt
+
+    # NETWORK AWARE
+    def na_set_address_distance(self):
+        """ Calc address distance. Currently simple: if same /24 then 0
+        else 1. TODO: IPv6
+        """
+        hisip = self.get_ip(real=True)
+        myip = self.get_myip(real=True)
+        
+        a = hisip.split(".")
+        b = myip.split(".")
+        if a[0] == b[0] and a[1] == b[1] and a[2] == b[2]:
+            if DEBUG:
+                print >>sys.stderr,"encoder.connection: na: Found peer on local LAN",self.get_ip()
+            self.na_address_distance = 0
+        else:
+            self.na_address_distance = 1
+        
+    def na_get_address_distance(self):
+        return self.na_address_distance
+    
+
+
+
+
+class Encoder:
+    def __init__(self, connecter, raw_server, my_id, max_len,
+            schedulefunc, keepalive_delay, download_id, 
+            measurefunc, config):
+        self.raw_server = raw_server
+        self.connecter = connecter
+        self.my_id = my_id
+        self.max_len = max_len
+        self.schedulefunc = schedulefunc
+        self.keepalive_delay = keepalive_delay
+        self.download_id = download_id
+        self.measurefunc = measurefunc
+        self.config = config
+        self.connections = {}
+        self.banned = {}
+        self.to_connect = Set()
+        self.trackertime = 0
+        self.paused = False
+        if self.config['max_connections'] == 0:
+            self.max_connections = 2 ** 30
+        else:
+            self.max_connections = self.config['max_connections']
+        """
+        In r529 there was a problem when a single Windows client 
+        would connect to our text-based seeder (i.e. btlaunchmany) 
+        with no other clients present. Apparently both the seeder 
+        and client would connect to eachother simultaneously, but 
+        not end up with a good connection, halting the client.
+
+        Arno, 2006-03-10: Reappears in ~r890, fixed in r892. It 
+        appears to be a problem of writing to a nonblocking socket 
+        before it signalled it is ready for writing, although the 
+        evidence is inconclusive. 
+
+        Arno: 2006-12-15: Reappears in r2319. There is some weird
+        socket problem here. Using Python 2.4.4 doesn't solve it.
+        The problem I see here is that as soon as we register
+        at the tracker, the single seeder tries to connect to
+        us. He succeeds, but after a short while the connection
+        appears to be closed by him. We then wind up with no
+        connection at all and have to wait until we recontact
+        the tracker.
+
+        My workaround is to refuse these initial connections from
+        the seeder and wait until I've started connecting to peers
+        based on the info I got from the tracker before accepting
+        remote connections.
+        
+        Arno: 2007-02-16: I think I finally found it. The Tribler 
+        tracker (BitTornado/BT1/track.py) will do a NAT check
+        (BitTornado/BT1/NATCheck) by default, which consists of
+        initiating a connection and then closing it after a good 
+        BT handshake was received.
+        
+        The solution now is to make sure we check IP and port to
+        identify existing connections. I already added that 2006-12-15,
+        so I just removed the restriction on initial connections, 
+        which are superfluous.
+        """
+        self.rerequest = None
+# 2fastbt_
+        self.toofast_banned = {}
+        self.coordinator_ip = None
+# _2fastbt        
+
+        # hack: we should not import this since it is not part of the
+        # core nor should we import here, but otherwise we will get
+        # import errors
+        #
+        # _event_reporter stores events that are logged somewhere...
+        # from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
+        self._event_reporter = get_status_holder("LivingLab")
+
+        # the addresses that have already been reported
+        self._known_addresses = {}
+
+        schedulefunc(self.send_keepalives, keepalive_delay)
+        
+        # RePEX: added repexer field.
+        # Note: perhaps call it observer in the future and make the 
+        # download engine more observable?
+        self.repexer = None
+        
+    def send_keepalives(self):
+        self.schedulefunc(self.send_keepalives, self.keepalive_delay)
+        if self.paused:
+            return
+        for c in self.connections.values():
+            c.keepalive()
+
+    def start_connections(self, dnsidlist):
+        """ Arno: dnsidlist is a list of tuples (dns,id) where dns is a (ip,port) tuple
+        and id is apparently always 0. It must be unequal to None at least,
+        because Encrypter.Connection used the id to see if a connection is
+        locally initiated?! """
+        
+        if DEBUG:
+            print >>sys.stderr,"encoder: adding",len(dnsidlist),"peers to queue, current len",len(self.to_connect)
+        if not self.to_connect:
+            self.raw_server.add_task(self._start_connection_from_queue)
+
+        # all reported addresses are stored in self._known_addresses
+        # to prevent duplicated addresses being send
+        new_addresses = []
+        known_addresses = self._known_addresses
+        for dns, _ in dnsidlist:
+            address = "%s:%s" % dns
+            if not address in known_addresses:
+                known_addresses[address] = True
+                new_addresses.append(address)
+
+        if new_addresses:
+            self._event_reporter.create_and_add_event("known-hosts", [b64encode(self.connecter.infohash), ";".join(new_addresses)])
+
+        # prevent 'to much' memory usage
+        if len(known_addresses) > 2500:
+            known_addresses.clear()
+
+        self.to_connect.update(dnsidlist)
+        # make sure addrs from various sources, like tracker, ut_pex and DHT are mixed
+        # TODO: or not? For Tribler Supported we may want the tracker to
+        # be more authoritative, such that official seeders found fast. Nah.
+        
+        #random.shuffle(self.to_connect) 
+        #Jelle: Since objects are already placed in the Set in pseudo random order, they don't have to 
+        # be shuffled (and a Set cannot be shuffled).
+        
+        self.trackertime = int(time()) 
+
+    def _start_connection_from_queue(self,sched=True):
+        try:
+            if not self.to_connect:
+                return
+            
+            if self.connecter.external_connection_made:
+                max_initiate = self.config['max_initiate']
+            else:
+                max_initiate = int(self.config['max_initiate']*1.5)
+            cons = len(self.connections)
+            
+            if DEBUG:
+                print >>sys.stderr,"encoder: conns",cons,"max conns",self.max_connections,"max init",max_initiate
+            
+            if cons >= self.max_connections or cons >= max_initiate:
+                delay = 60.0
+            elif self.paused or incompletecounter.toomany():
+                delay = 1.0
+            else:
+                delay = 0.0
+                dns, id = self.to_connect.pop()
+                self.start_connection(dns, id)
+            if self.to_connect and sched:
+                if DEBUG:
+                    print >>sys.stderr,"encoder: start_from_queue delay",delay
+                self.raw_server.add_task(self._start_connection_from_queue, delay)
+        except:
+            print_exc()
+            raise
+
+    def start_connection(self, dns, id, coord_con = False, forcenew = False, challenge = None):
+        """ Locally initiated connection """
+        if DEBUG:
+            print >>sys.stderr,"encoder: start_connection:",dns
+            print >>sys.stderr,"encoder: start_connection: qlen",len(self.to_connect),"nconns",len(self.connections),"maxi",self.config['max_initiate'],"maxc",self.config['max_connections']
+        
+        if ( self.paused
+             or len(self.connections) >= self.max_connections
+             or id == self.my_id
+             or self.banned.has_key(dns[0]) ) and not forcenew:
+            if DEBUG:
+                print >>sys.stderr,"encoder: start_connection: we're paused or too busy"
+            return True
+        for v in self.connections.values():    # avoid duplicated connection from a single ip
+            if v is None:
+                continue
+            if id and v.id == id and not forcenew:
+                if DEBUG:
+                    print >>sys.stderr,"encoder: start_connection: already connected to peer",`id`
+                return True
+            ip = v.get_ip(True)
+            port = v.get_port(False)
+            
+            if DEBUG:
+                print >>sys.stderr,"encoder: start_connection: candidate",ip,port,"want",dns[0],dns[1]
+
+            if self.config['security'] and ip != 'unknown' and ip == dns[0] and port == dns[1] and not forcenew:
+                if DEBUG:
+                    print >>sys.stderr,"encoder: start_connection: using existing",ip,"want port",dns[1],"existing port",port,"id",`id`
+                return True
+        try:
+            if DEBUG:
+                print >>sys.stderr,"encoder: start_connection: Setting up new to peer", dns,"id",`id`
+            c = self.raw_server.start_connection(dns)
+            con = Connection(self, c, id, dns = dns, coord_con = coord_con, challenge = challenge)
+            self.connections[c] = con
+            c.set_handler(con)
+        except socketerror:
+            if DEBUG:
+                print >>sys.stderr,"Encoder.connection failed"
+            return False
+        return True
+
+    def _start_connection(self, dns, id):
+        def foo(self=self, dns=dns, id=id):
+            self.start_connection(dns, id)
+       
+        self.schedulefunc(foo, 0)
+
+    def got_id(self, connection):
+        """ check if the connection can be accepted """
+        
+        if connection.id == self.my_id:
+            # NETWORK AWARE
+            ret = self.connecter.na_got_loopback(connection)
+            if DEBUG:
+                print >>sys.stderr,"encoder: got_id: connection to myself? keep",ret
+            if ret == False:
+                self.connecter.external_connection_made -= 1
+            return ret
+        
+        ip = connection.get_ip(True)
+        port = connection.get_port(False)
+        
+        # NETWORK AWARE
+        connection.na_set_address_distance()
+        
+        if self.config['security'] and self.banned.has_key(ip):
+            if DEBUG:
+                print >>sys.stderr,"encoder: got_id: security ban on IP"
+            return False
+        for v in self.connections.values():
+            if connection is not v:
+                # NETWORK AWARE
+                if DEBUG:
+                    print >>sys.stderr,"encoder: got_id: new internal conn from peer? ids",connection.id,v.id
+                if connection.id == v.id:
+                    if DEBUG:
+                        print >>sys.stderr,"encoder: got_id: new internal conn from peer? addrs",v.na_want_internal_conn_from,ip
+                    if v.na_want_internal_conn_from == ip:
+                        # We were expecting a connection from this peer that shares
+                        # a NAT with us via the internal network. This is it.
+                        self.connecter.na_got_internal_connection(v,connection)
+                        return True  
+                    elif v.create_time < connection.create_time:
+                        if DEBUG:
+                            print >>sys.stderr,"encoder: got_id: create time bad?!"
+                    return False
+                # don't allow multiple connections from the same ip if security is set.
+                if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True) and port == v.get_port(False):
+                    print >>sys.stderr,"encoder: got_id: closing duplicate connection"
+                    v.close()
+        return True
+
+    def external_connection_made(self, connection):
+        """ Remotely initiated connection """
+        if DEBUG:
+            print >>sys.stderr,"encoder: external_conn_made",connection.get_ip()
+        if self.paused or len(self.connections) >= self.max_connections:
+            print >>sys.stderr,"encoder: external_conn_made: paused or too many"
+            connection.close()
+            return False
+        con = Connection(self, connection, None)
+        self.connections[connection] = con
+        connection.set_handler(con)
+        return True
+
+    def externally_handshaked_connection_made(self, connection, options, msg_remainder):
+        if DEBUG:
+            print >>sys.stderr,"encoder: external_handshaked_conn_made",connection.get_ip()
+        # 2fastbt_
+        if self.paused or len(self.connections) >= self.max_connections:
+            connection.close()
+            return False
+
+        con = Connection(self, connection, None, True)
+        con.set_options(options)
+        # before: connection.handler = Encoder
+        # Don't forget to count the external conns!
+        self.connections[connection] = con
+        connection.set_handler(con)
+        # after: connection.handler = Encrypter.Connecter
+
+        if msg_remainder:
+            con.data_came_in(con, msg_remainder)
+        return True
+
+    def close_all(self):
+        if DEBUG:
+            print >>sys.stderr,"encoder: closing all connections"
+        copy = self.connections.values()[:]
+        for c in copy:
+            c.close(closeall=True)
+        self.connections = {}
+
+    def ban(self, ip):
+        self.banned[ip] = 1
+
+    def pause(self, flag):
+        self.paused = flag
+
+# 2fastbt_
+    def set_coordinator_ip(self,ip):
+        self.coordinator_ip = ip
+# _2fastbt    
+
+    def set_rerequester(self,rerequest):
+        self.rerequest = rerequest
+
+    def admin_close(self,conn):
+        del self.connections[conn]
+        now = int(time())
+        if DEBUG:
+            print >>sys.stderr,"encoder: admin_close: now-tt is",now-self.trackertime
+        if len(self.connections) == 0 and (now-self.trackertime) < 20:
+            #if DEBUG:
+            #    print >>sys.stderr,"encoder: admin_close: Recontacting tracker, last request got just dead peers: TEMP DISABLED, ARNO WORKING ON IT"
+            ###self.rerequest.encoder_wants_new_peers()
+            pass
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/FileSelector.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/FileSelector.py
new file mode 100644 (file)
index 0000000..e9bbfa0
--- /dev/null
@@ -0,0 +1,243 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from random import shuffle
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+
+class FileSelector:
+    def __init__(self, files, piece_length, bufferdir,
+                 storage, storagewrapper, sched, failfunc):
+        self.files = files
+
+        # JD: Store piece length
+        self.piece_length = piece_length
+
+        self.storage = storage
+        self.storagewrapper = storagewrapper
+        self.sched = sched
+        self.failfunc = failfunc
+        self.downloader = None
+        self.picker = None
+
+        storage.set_bufferdir(bufferdir)
+        
+        self.numfiles = len(files)
+        self.priority = [1] * self.numfiles
+        self.new_priority = None
+        self.new_partials = None
+        self.filepieces = []
+        total = 0L
+        for file, length in files:
+            if not length:
+                self.filepieces.append(())
+            else:
+                pieces = range( int(total/piece_length),
+                                int((total+length-1)/piece_length)+1 )
+                self.filepieces.append(tuple(pieces))
+                total += length
+        self.numpieces = int((total+piece_length-1)/piece_length)
+        self.piece_priority = [1] * self.numpieces
+        
+
+
+    def init_priority(self, new_priority):
+        try:
+            assert len(new_priority) == self.numfiles
+            for v in new_priority:
+                assert type(v) in (type(0), type(0L))
+                assert v >= -1
+                assert v <= 2
+        except:
+#           print_exc()
+            return False
+        try:
+            for f in xrange(self.numfiles):
+                if new_priority[f] < 0:
+                    self.storage.disable_file(f)
+            self.new_priority = new_priority
+        except (IOError, OSError), e:
+            self.failfunc("can't open partial file for "
+                          + self.files[f][0] + ': ' + str(e))
+            return False
+        return True
+
+    '''
+    d['priority'] = [file #1 priority [,file #2 priority...] ]
+                    a list of download priorities for each file.
+                    Priority may be -1, 0, 1, 2.  -1 = download disabled,
+                    0 = highest, 1 = normal, 2 = lowest.
+    Also see Storage.pickle and StorageWrapper.pickle for additional keys.
+    '''
+    def unpickle(self, d):
+        if d.has_key('priority'):
+            if not self.init_priority(d['priority']):
+                return
+        pieces = self.storage.unpickle(d)
+        if not pieces:  # don't bother, nothing restoreable
+            return
+        new_piece_priority = self._get_piece_priority_list(self.new_priority)
+        self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
+        self.new_partials = self.storagewrapper.unpickle(d, pieces)
+
+
+    def tie_in(self, picker, cancelfunc, requestmorefunc):
+        self.picker = picker
+        self.cancelfunc = cancelfunc
+        self.requestmorefunc = requestmorefunc
+
+        if self.new_priority:
+            self.priority = self.new_priority
+            self.new_priority = None
+            self.new_piece_priority = self._set_piece_priority(self.priority)
+
+        if self.new_partials:
+            shuffle(self.new_partials)
+            for p in self.new_partials:
+                self.picker.requested(p)
+        self.new_partials = None
+        
+
+    def _set_files_disabled(self, old_priority, new_priority):
+        old_disabled = [p == -1 for p in old_priority]
+        new_disabled = [p == -1 for p in new_priority]
+        data_to_update = []
+        for f in xrange(self.numfiles):
+            if new_disabled[f] != old_disabled[f]:
+                data_to_update.extend(self.storage.get_piece_update_list(f))
+        buffer = []
+        for piece, start, length in data_to_update:
+            if self.storagewrapper.has_data(piece):
+                data = self.storagewrapper.read_raw(piece, start, length)
+                if data is None:
+                    return False
+                buffer.append((piece, start, data))
+
+        files_updated = False        
+        try:
+            for f in xrange(self.numfiles):
+                if new_disabled[f] and not old_disabled[f]:
+                    self.storage.disable_file(f)
+                    files_updated = True
+                if old_disabled[f] and not new_disabled[f]:
+                    self.storage.enable_file(f)
+                    files_updated = True
+        except (IOError, OSError), e:
+            if new_disabled[f]:
+                msg = "can't open partial file for "
+            else:
+                msg = 'unable to open '
+            self.failfunc(msg + self.files[f][0] + ': ' + str(e))
+            return False
+        if files_updated:
+            self.storage.reset_file_status()
+
+        changed_pieces = {}
+        for piece, start, data in buffer:
+            if not self.storagewrapper.write_raw(piece, start, data):
+                return False
+            data.release()
+            changed_pieces[piece] = 1
+        if not self.storagewrapper.doublecheck_data(changed_pieces):
+            return False
+
+        return True        
+
+
+    def _get_piece_priority_list(self, file_priority_list):
+        l = [-1] * self.numpieces
+        for f in xrange(self.numfiles):
+            if file_priority_list[f] == -1:
+                continue
+            for i in self.filepieces[f]:
+                if l[i] == -1:
+                    l[i] = file_priority_list[f]
+                    continue
+                l[i] = min(l[i], file_priority_list[f])
+        return l
+        
+
+    def _set_piece_priority(self, new_priority):
+        new_piece_priority = self._get_piece_priority_list(new_priority)
+        pieces = range(self.numpieces)
+        shuffle(pieces)
+        new_blocked = []
+        new_unblocked = []
+        for piece in pieces:
+            self.picker.set_priority(piece, new_piece_priority[piece])
+            o = self.piece_priority[piece] == -1
+            n = new_piece_priority[piece] == -1
+            if n and not o:
+                new_blocked.append(piece)
+            if o and not n:
+                new_unblocked.append(piece)
+        if new_blocked:
+            self.cancelfunc(new_blocked)
+        self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
+        if new_unblocked:
+            self.requestmorefunc(new_unblocked)
+
+        return new_piece_priority        
+
+
+    def set_priorities_now(self, new_priority = None):
+        if not new_priority:
+            new_priority = self.new_priority
+            self.new_priority = None    # potential race condition
+            if not new_priority:
+                return
+        old_priority = self.priority
+        self.priority = new_priority
+        if not self._set_files_disabled(old_priority, new_priority):
+            return
+        self.piece_priority = self._set_piece_priority(new_priority)
+
+    def set_priorities(self, new_priority):
+        self.new_priority = new_priority
+        def s(self=self):
+            self.set_priorities_now()
+        self.sched(s)
+        
+    def set_priority(self, f, p):
+        new_priority = self.get_priorities()
+        new_priority[f] = p
+        self.set_priorities(new_priority)
+
+    def get_priorities(self):
+        priority = self.new_priority
+        if not priority:
+            priority = self.priority    # potential race condition
+        return [i for i in priority]
+
+    def __setitem__(self, index, val):
+        self.set_priority(index, val)
+
+    def __getitem__(self, index):
+        try:
+            return self.new_priority[index]
+        except:
+            return self.priority[index]
+
+
+    def finish(self):
+        pass
+#        for f in xrange(self.numfiles):
+#            if self.priority[f] == -1:
+#                self.storage.delete_file(f)
+
+    def pickle(self):
+        d = {'priority': self.priority}
+        try:
+            s = self.storage.pickle()
+            sw = self.storagewrapper.pickle()
+            for k in s.keys():
+                d[k] = s[k]
+            for k in sw.keys():
+                d[k] = sw[k]
+        except (IOError, OSError):
+            pass
+        return d
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Filter.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Filter.py
new file mode 100644 (file)
index 0000000..a564efb
--- /dev/null
@@ -0,0 +1,15 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+class Filter:
+    def __init__(self, callback):
+        self.callback = callback
+
+    def check(self, ip, paramslist, headers):
+
+        def params(key, default = None, l = paramslist):
+            if l.has_key(key):
+                return l[key][0]
+            return default
+
+        return None
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/GetRightHTTPDownloader.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/GetRightHTTPDownloader.py
new file mode 100644 (file)
index 0000000..6d1e4ea
--- /dev/null
@@ -0,0 +1,415 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+# Patched by Diego Andres Rabaioli.
+# This is the HTTPDownloader class that implements the GetRight
+# style WebSeeding technique. Compared to the John Hoffman's style it
+# doesn't require any web server support.However the biggest gap (see
+# http://www.bittorrent.org/beps/bep_0019.html) is not taken into
+# account when requesting pieces.
+
+import sys
+from random import randint
+from urlparse import urlparse
+from httplib import HTTPConnection
+import urllib
+from threading import Thread,currentThread,Lock
+from traceback import print_exc, print_stack 
+
+from BaseLib.Core.BitTornado.__init__ import product_name,version_short
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.Utilities.timeouturlopen import find_proxy
+
+# ProxyService_
+#
+try:
+    from BaseLib.Core.ProxyService.Helper import SingleDownloadHelperInterface
+except ImportError:
+    class SingleDownloadHelperInterface:
+        
+        def __init__(self):
+            pass
+#
+# _ProxyService
+
+DEBUG = False
+
+EXPIRE_TIME = 60 * 60
+
+VERSION = product_name+'/'+version_short
+
+class haveComplete:
+    def complete(self):
+        return True
+    def __getitem__(self, x):
+        return True
+haveall = haveComplete()
+
+class SingleDownload(SingleDownloadHelperInterface):
+
+    def __init__(self, downloader, url):
+        SingleDownloadHelperInterface.__init__(self)
+        self.downloader = downloader
+        self.baseurl = url
+        
+        try:
+            (self.scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
+        except:
+            self.downloader.errorfunc('cannot parse http seed address: '+url)
+            return
+        if self.scheme != 'http':
+            self.downloader.errorfunc('http seed url not http: '+url)
+            return
+
+        # Arno, 2010-03-08: Make proxy aware
+        self.proxyhost = find_proxy(url)
+        try:
+            if self.proxyhost is None:
+                self.connection = HTTPConnection(self.netloc)
+            else:
+                self.connection = HTTPConnection(self.proxyhost)
+        except:
+            self.downloader.errorfunc('cannot connect to http seed: '+url)
+            return
+        
+        self.seedurl = path
+        self.measure = Measure(downloader.max_rate_period)
+        self.index = None
+        self.piece_size = self.downloader.storage._piecelen( 0 )
+        self.total_len = self.downloader.storage.total_length
+        self.url = ''
+        self.requests = []
+        self.request_size = 0
+        self.endflag = False
+        self.error = None
+        self.retry_period = 30
+        self._retry_period = None
+        self.errorcount = 0
+        self.goodseed = False
+        self.active = False
+        self.cancelled = False
+        # HTTP Video Support
+        self.request_lock = Lock()
+        self.video_support_policy     = True  # TODO : get from constructor parameters
+        self.video_support_enabled    = False # Don't start immediately with support
+        self.video_support_speed      = 0.0   # Start with the faster rescheduling speed
+        self.video_support_slow_start = False # If enabled delay the first request (give chance to peers to give bandwidth)
+        # Arno, 2010-04-07: Wait 1 second before using HTTP seed. TODO good policy
+        # If Video Support policy is not eneabled then use Http seed normaly
+        if not self.video_support_policy:
+            self.resched(1)
+
+
+    def resched(self, len = None):
+        if self.video_support_policy:
+            if ( not self.video_support_enabled ) or self.video_support_slow_start:
+                return
+        if len is None:
+            len = self.retry_period
+        if self.errorcount > 3:
+            len = min(1.0,len) * (self.errorcount - 2)
+
+        # Arno, 2010-04-07: If immediately, don't go via queue. Actual work is
+        # done by other thread, so no worries of hogging NetworkThread. 
+        if len > 0: 
+            self.downloader.rawserver.add_task(self.download, len)
+        else:
+            self.download() 
+
+    def _want(self, index):
+        if self.endflag:
+            return self.downloader.storage.do_I_have_requests(index)
+        else:
+            return self.downloader.storage.is_unstarted(index)
+
+    def download(self):
+        from BaseLib.Core.Session import Session
+        session = Session.get_instance()
+        session.uch.perform_usercallback(self._download)
+
+    def _download(self):
+# 2fastbt_
+        self.request_lock.acquire()
+        if DEBUG:
+            print "http-sdownload: download()"
+        if self.is_frozen_by_helper():
+            if DEBUG:
+                print "http-sdownload: blocked, rescheduling"
+            self.resched(1)
+            return
+# _2fastbt    
+        self.cancelled = False
+        if self.downloader.picker.am_I_complete():
+            self.downloader.downloads.remove(self)
+            return
+        self.index = self.downloader.picker.next(haveall, self._want, self)
+# 2fastbt_
+        if self.index is None and self.frozen_by_helper:
+            self.resched(0.01)
+            return
+# _2fastbt
+        if ( self.index is None and not self.endflag
+                     and not self.downloader.peerdownloader.has_downloaders() ):
+            self.endflag = True
+            self.index = self.downloader.picker.next(haveall, self._want, self)
+        if self.index is None:
+            self.endflag = True
+            self.resched()
+        else:
+            self.url = self.seedurl
+            start = self.piece_size * self.index
+            end   = start + self.downloader.storage._piecelen( self.index ) - 1
+            self.request_range = '%d-%d' % ( start, end )
+            self._get_requests()
+            # Just overwrite other blocks and don't ask for ranges.
+            self._request()
+            # Diego : 2010-05-19 : Moving thread creation on _download and not on
+            # _request anymore. One Lock handles sync problems between threads performing
+            # new requests before the previous response is read.
+            """
+            # Arno, 2010-04-07: Use threads from pool to Download, more efficient
+            # than creating a new one for every piece.
+            from BaseLib.Core.Session import Session
+            session = Session.get_instance()
+            session.uch.perform_usercallback(self._request)
+            # Diego
+            rq = Thread(target = self._request)
+            rq.setName( "GetRightHTTPDownloader"+rq.getName() )
+            rq.setDaemon(True)
+            rq.start()
+            """
+            self.active = True
+
+    def _request(self):
+        import encodings.ascii
+        import encodings.punycode
+        import encodings.idna
+        
+        self.error = None
+        self.received_data = None
+        try:
+            #print >>sys.stderr, 'HTTP piece ', self.index
+            if self.proxyhost is None:
+                realurl = self.url
+            else: 
+                realurl = self.scheme+'://'+self.netloc+self.url
+
+            self.connection.request( 'GET', realurl, None,
+                                {'Host': self.netloc, 'User-Agent': VERSION, 'Range' : 'bytes=%s' % self.request_range } )
+
+            r = self.connection.getresponse()
+            self.connection_status = r.status
+            self.received_data = r.read()
+            
+        except Exception, e:
+            print_exc()
+            
+            self.error = 'error accessing http seed: '+str(e)
+            try:
+                self.connection.close()
+            except:
+                pass
+            try:
+                self.connection = HTTPConnection(self.netloc)
+            except:
+                self.connection = None  # will cause an exception and retry next cycle
+        self.downloader.rawserver.add_task(self.request_finished)
+
+    def request_finished(self):
+        self.active = False
+        if self.error is not None:
+            if self.goodseed:
+                self.downloader.errorfunc(self.error)
+            self.errorcount += 1
+        if self.received_data:
+            self.errorcount = 0
+            if not self._got_data():
+                self.received_data = None
+        if not self.received_data:
+            self._release_requests()
+            self.downloader.peerdownloader.piece_flunked(self.index)
+        self.request_lock.release()
+        if self._retry_period is not None:
+            self.resched(self._retry_period)
+            self._retry_period = None
+            return
+        self.resched()
+
+    def _got_data(self):
+        if self.connection_status == 503:   # seed is busy
+            try:
+                self.retry_period = max(int(self.received_data), 5)
+            except:
+                pass
+            return False
+        
+        if self.connection_status != 200 and self.connection_status != 206: # 206 = partial download OK
+            self.errorcount += 1
+            return False
+        # Arno,  2010-04-07: retry_period set to 0 for faster DL speeds
+        # Diego, 2010-04-16: retry_period set depending on the level of support asked by the MovieOnDemandTransporter
+        self._retry_period = self.video_support_speed
+
+        if len(self.received_data) != self.request_size:
+            if self.goodseed:
+                self.downloader.errorfunc('corrupt data from http seed - redownloading')
+            return False
+        self.measure.update_rate(len(self.received_data))
+        self.downloader.measurefunc(len(self.received_data))
+        if self.cancelled:
+            return False
+        if not self._fulfill_requests():
+            return False
+        if not self.goodseed:
+            self.goodseed = True
+            self.downloader.seedsfound += 1
+        if self.downloader.storage.do_I_have(self.index):
+            self.downloader.picker.complete(self.index)
+            self.downloader.peerdownloader.check_complete(self.index)
+            self.downloader.gotpiecefunc(self.index)
+        return True
+    
+    def _get_requests(self):
+        self.requests = []
+        self.request_size = 0L
+        while self.downloader.storage.do_I_have_requests(self.index):
+            r = self.downloader.storage.new_request(self.index)
+            self.requests.append(r)
+            self.request_size += r[1]
+        self.requests.sort()
+
+    def _fulfill_requests(self):
+        start = 0L
+        success = True
+        while self.requests:
+            begin, length = self.requests.pop(0)
+# 2fastbt_
+            if not self.downloader.storage.piece_came_in(self.index, begin, [],
+                            self.received_data[start:start+length], length):
+# _2fastbt
+                success = False
+                break
+            start += length
+        return success
+
+    def _release_requests(self):
+        for begin, length in self.requests:
+            self.downloader.storage.request_lost(self.index, begin, length)
+        self.requests = []
+
+    def _request_ranges(self):
+        s = ''
+        begin, length = self.requests[0]
+        for begin1, length1 in self.requests[1:]:
+            if begin + length == begin1:
+                length += length1
+                continue
+            else:
+                if s:
+                    s += ','
+                s += str(begin)+'-'+str(begin+length-1)
+                begin, length = begin1, length1
+        if s:
+            s += ','
+        s += str(begin)+'-'+str(begin+length-1)
+        return s
+
+# 2fastbt_
+    def helper_forces_unchoke(self):
+        pass
+
+    def helper_set_freezing(self,val):
+        self.frozen_by_helper = val
+# _2fastbt
+
+    def slow_start_wake_up( self ):
+        self.video_support_slow_start = False
+        self.resched(0)
+
+    def is_slow_start( self ):
+        return self.video_support_slow_start
+
+    def start_video_support( self, level = 0.0, sleep_time = None ):
+        '''
+        Level indicates how fast a new request is scheduled and therefore the level of support required.
+        0 = maximum support. (immediate rescheduling)
+        1 ~= 0.01 seconds between each request
+        2 ~= 0.1 seconds between each request
+        and so on... at the moment just level 0 is asked. To be noted that level is a float!
+        '''
+        
+        if DEBUG:
+            print >>sys.stderr,"GetRightHTTPDownloader: START"
+        self.video_support_speed = 0.001 * ( ( 10 ** level ) - 1 )
+        if not self.video_support_enabled:
+            self.video_support_enabled = True
+            if sleep_time:
+                if not self.video_support_slow_start:
+                    self.video_support_slow_start = True
+                    self.downloader.rawserver.add_task( self.slow_start_wake_up, sleep_time )
+            else:
+                self.resched( self.video_support_speed )
+
+    def stop_video_support( self ):
+        if DEBUG:
+            print >>sys.stderr,"GetRightHTTPDownloader: STOP"
+        if not self.video_support_enabled:
+            return
+        self.video_support_enabled = False
+
+    def is_video_support_enabled( self ):
+        return self.video_support_enabled
+
+    
+class GetRightHTTPDownloader:
+    def __init__(self, storage, picker, rawserver,
+                 finflag, errorfunc, peerdownloader,
+                 max_rate_period, infohash, measurefunc, gotpiecefunc):
+        self.storage = storage
+        self.picker = picker
+        self.rawserver = rawserver
+        self.finflag = finflag
+        self.errorfunc = errorfunc
+        self.peerdownloader = peerdownloader
+        self.infohash = infohash
+        self.max_rate_period = max_rate_period
+        self.gotpiecefunc = gotpiecefunc
+        self.measurefunc = measurefunc
+        self.downloads = []
+        self.seedsfound = 0
+        self.video_support_enabled = False
+
+    def make_download(self, url):
+        self.downloads.append(SingleDownload(self, url))
+        return self.downloads[-1]
+
+    def get_downloads(self):
+        if self.finflag.isSet():
+            return []
+        return self.downloads
+
+    def cancel_piece_download(self, pieces):
+        for d in self.downloads:
+            if d.active and d.index in pieces:
+                d.cancelled = True
+
+    # Diego : wrap each single http download
+    def start_video_support( self, level = 0.0, sleep_time = None ):
+        for d in self.downloads:
+            d.start_video_support( level, sleep_time )
+        self.video_support_enabled = True
+
+    def stop_video_support( self ):
+        for d in self.downloads:
+            d.stop_video_support()
+        self.video_support_enabled = False
+
+    def is_video_support_enabled( self ):
+        return self.video_support_enabled
+
+    def is_slow_start( self ):
+        for d in self.downloads:
+            if d.is_slow_start():
+                return True
+        return False
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/HTTPDownloader.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/HTTPDownloader.py
new file mode 100644 (file)
index 0000000..edcc917
--- /dev/null
@@ -0,0 +1,299 @@
+# Written by John Hoffman, George Milescu
+# see LICENSE.txt for license information
+
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from random import randint
+from urlparse import urlparse
+from httplib import HTTPConnection
+from urllib import quote
+from threading import Thread
+from BaseLib.Core.BitTornado.__init__ import product_name,version_short
+
+# ProxyService_
+#
+try:
+    from BaseLib.Core.ProxyService.Helper import SingleDownloadHelperInterface
+except ImportError:
+    class SingleDownloadHelperInterface:
+        
+        def __init__(self):
+            pass
+#
+# ProxyService
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+# 2fastbt_
+DEBUG = False
+# _2fastbt
+
+EXPIRE_TIME = 60 * 60
+
+VERSION = product_name+'/'+version_short
+
+class haveComplete:
+    def complete(self):
+        return True
+    def __getitem__(self, x):
+        return True
+haveall = haveComplete()
+
+# 2fastbt_
+class SingleDownload(SingleDownloadHelperInterface):
+# _2fastbt
+    def __init__(self, downloader, url):
+# 2fastbt_
+        SingleDownloadHelperInterface.__init__(self)
+# _2fastbt
+        self.downloader = downloader
+        self.baseurl = url
+        try:
+            (scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
+        except:
+            self.downloader.errorfunc('cannot parse http seed address: '+url)
+            return
+        if scheme != 'http':
+            self.downloader.errorfunc('http seed url not http: '+url)
+            return
+        try:
+            self.connection = HTTPConnection(self.netloc)
+        except:
+            self.downloader.errorfunc('cannot connect to http seed: '+url)
+            return
+        self.seedurl = path
+        if pars:
+            self.seedurl += ';'+pars
+        self.seedurl += '?'
+        if query:
+            self.seedurl += query+'&'
+        self.seedurl += 'info_hash='+quote(self.downloader.infohash)
+
+        self.measure = Measure(downloader.max_rate_period)
+        self.index = None
+        self.url = ''
+        self.requests = []
+        self.request_size = 0
+        self.endflag = False
+        self.error = None
+        self.retry_period = 30
+        self._retry_period = None
+        self.errorcount = 0
+        self.goodseed = False
+        self.active = False
+        self.cancelled = False
+        self.resched(randint(2, 10))
+
+    def resched(self, len = None):
+        if len is None:
+            len = self.retry_period
+        if self.errorcount > 3:
+            len = len * (self.errorcount - 2)
+        self.downloader.rawserver.add_task(self.download, len)
+
+    def _want(self, index):
+        if self.endflag:
+            return self.downloader.storage.do_I_have_requests(index)
+        else:
+            return self.downloader.storage.is_unstarted(index)
+
+    def download(self):
+# 2fastbt_
+        if DEBUG:
+            print "http-sdownload: download()"
+        if self.is_frozen_by_helper():
+            if DEBUG:
+                print "http-sdownload: blocked, rescheduling"
+            self.resched(1)
+            return
+# _2fastbt    
+        self.cancelled = False
+        if self.downloader.picker.am_I_complete():
+            self.downloader.downloads.remove(self)
+            return
+        self.index = self.downloader.picker.next(haveall, self._want, self)
+# 2fastbt_
+        if self.index is None and self.frozen_by_helper:
+            self.resched(0.01)
+            return
+# _2fastbt
+        if ( self.index is None and not self.endflag
+                     and not self.downloader.peerdownloader.has_downloaders() ):
+            self.endflag = True
+            self.index = self.downloader.picker.next(haveall, self._want, self)
+        if self.index is None:
+            self.endflag = True
+            self.resched()
+        else:
+            self.url = ( self.seedurl+'&piece='+str(self.index) )
+            self._get_requests()
+            if self.request_size < self.downloader.storage._piecelen(self.index):
+                self.url += '&ranges='+self._request_ranges()
+            rq = Thread(target = self._request)
+            rq.setName( "HTTPDownloader"+rq.getName() )
+            rq.setDaemon(True)
+            rq.start()
+            self.active = True
+
+    def _request(self):
+        import encodings.ascii
+        import encodings.punycode
+        import encodings.idna
+        
+        self.error = None
+        self.received_data = None
+        try:
+            self.connection.request('GET', self.url, None, 
+                                {'User-Agent': VERSION})
+            r = self.connection.getresponse()
+            self.connection_status = r.status
+            self.received_data = r.read()
+        except Exception, e:
+            self.error = 'error accessing http seed: '+str(e)
+            try:
+                self.connection.close()
+            except:
+                pass
+            try:
+                self.connection = HTTPConnection(self.netloc)
+            except:
+                self.connection = None  # will cause an exception and retry next cycle
+        self.downloader.rawserver.add_task(self.request_finished)
+
+    def request_finished(self):
+        self.active = False
+        if self.error is not None:
+            if self.goodseed:
+                self.downloader.errorfunc(self.error)
+            self.errorcount += 1
+        if self.received_data:
+            self.errorcount = 0
+            if not self._got_data():
+                self.received_data = None
+        if not self.received_data:
+            self._release_requests()
+            self.downloader.peerdownloader.piece_flunked(self.index)
+        if self._retry_period:
+            self.resched(self._retry_period)
+            self._retry_period = None
+            return
+        self.resched()
+
+    def _got_data(self):
+        if self.connection_status == 503:   # seed is busy
+            try:
+                self.retry_period = max(int(self.received_data), 5)
+            except:
+                pass
+            return False
+        if self.connection_status != 200:
+            self.errorcount += 1
+            return False
+        self._retry_period = 1
+        if len(self.received_data) != self.request_size:
+            if self.goodseed:
+                self.downloader.errorfunc('corrupt data from http seed - redownloading')
+            return False
+        self.measure.update_rate(len(self.received_data))
+        self.downloader.measurefunc(len(self.received_data))
+        if self.cancelled:
+            return False
+        if not self._fulfill_requests():
+            return False
+        if not self.goodseed:
+            self.goodseed = True
+            self.downloader.seedsfound += 1
+        if self.downloader.storage.do_I_have(self.index):
+            self.downloader.picker.complete(self.index)
+            self.downloader.peerdownloader.check_complete(self.index)
+            self.downloader.gotpiecefunc(self.index)
+        return True
+    
+    def _get_requests(self):
+        self.requests = []
+        self.request_size = 0L
+        while self.downloader.storage.do_I_have_requests(self.index):
+            r = self.downloader.storage.new_request(self.index)
+            self.requests.append(r)
+            self.request_size += r[1]
+        self.requests.sort()
+
+    def _fulfill_requests(self):
+        start = 0L
+        success = True
+        while self.requests:
+            begin, length = self.requests.pop(0)
+# 2fastbt_
+            if not self.downloader.storage.piece_came_in(self.index, begin, [],
+                            self.received_data[start:start+length], length):
+# _2fastbt
+                success = False
+                break
+            start += length
+        return success
+
+    def _release_requests(self):
+        for begin, length in self.requests:
+            self.downloader.storage.request_lost(self.index, begin, length)
+        self.requests = []
+
+    def _request_ranges(self):
+        s = ''
+        begin, length = self.requests[0]
+        for begin1, length1 in self.requests[1:]:
+            if begin + length == begin1:
+                length += length1
+                continue
+            else:
+                if s:
+                    s += ','
+                s += str(begin)+'-'+str(begin+length-1)
+                begin, length = begin1, length1
+        if s:
+            s += ','
+        s += str(begin)+'-'+str(begin+length-1)
+        return s
+
+# 2fastbt_
+    def helper_forces_unchoke(self):
+        pass
+
+    def helper_set_freezing(self,val):
+        self.frozen_by_helper = val
+# _2fastbt
+
+
+    
+class HTTPDownloader:
+    def __init__(self, storage, picker, rawserver,
+                 finflag, errorfunc, peerdownloader,
+                 max_rate_period, infohash, measurefunc, gotpiecefunc):
+        self.storage = storage
+        self.picker = picker
+        self.rawserver = rawserver
+        self.finflag = finflag
+        self.errorfunc = errorfunc
+        self.peerdownloader = peerdownloader
+        self.infohash = infohash
+        self.max_rate_period = max_rate_period
+        self.gotpiecefunc = gotpiecefunc
+        self.measurefunc = measurefunc
+        self.downloads = []
+        self.seedsfound = 0
+
+    def make_download(self, url):
+        self.downloads.append(SingleDownload(self, url))
+        return self.downloads[-1]
+
+    def get_downloads(self):
+        if self.finflag.isSet():
+            return []
+        return self.downloads
+
+    def cancel_piece_download(self, pieces):
+        for d in self.downloads:
+            if d.active and d.index in pieces:
+                d.cancelled = True
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/HoffmanHTTPDownloader.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/HoffmanHTTPDownloader.py
new file mode 100644 (file)
index 0000000..0ecce37
--- /dev/null
@@ -0,0 +1,311 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+import sys
+from random import randint
+from urlparse import urlparse
+from httplib import HTTPConnection
+import urllib
+from threading import Thread
+from traceback import print_exc 
+
+from BaseLib.Core.BitTornado.__init__ import product_name,version_short
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.Utilities.timeouturlopen import find_proxy
+
+# ProxyService_
+#
+try:
+    from BaseLib.Core.ProxyService.Helper import SingleDownloadHelperInterface
+except ImportError:
+    class SingleDownloadHelperInterface:
+        
+        def __init__(self):
+            pass
+#
+# _ProxyService
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+# 2fastbt_
+DEBUG = False
+# _2fastbt
+
+EXPIRE_TIME = 60 * 60
+
+VERSION = product_name+'/'+version_short
+
+class haveComplete:
+    def complete(self):
+        return True
+    def __getitem__(self, x):
+        return True
+haveall = haveComplete()
+
+# 2fastbt_
+class SingleDownload(SingleDownloadHelperInterface):
+# _2fastbt
+    def __init__(self, downloader, url):
+# 2fastbt_
+        SingleDownloadHelperInterface.__init__(self)
+# _2fastbt
+        self.downloader = downloader
+        self.baseurl = url
+        try:
+            (self.scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
+        except:
+            self.downloader.errorfunc('cannot parse http seed address: '+url)
+            return
+        if self.scheme != 'http':
+            self.downloader.errorfunc('http seed url not http: '+url)
+            return
+
+        # Arno, 2010-03-08: Make proxy aware
+        self.proxyhost = find_proxy(url)
+        try:
+            if self.proxyhost is None:
+                self.connection = HTTPConnection(self.netloc)
+            else:
+                self.connection = HTTPConnection(self.proxyhost)
+        except:
+            self.downloader.errorfunc('cannot connect to http seed: '+url)
+            return
+        self.seedurl = path
+        if pars:
+            self.seedurl += ';'+pars
+        self.seedurl += '?'
+        if query:
+            self.seedurl += query+'&'
+        self.seedurl += 'info_hash='+urllib.quote(self.downloader.infohash)
+
+        self.measure = Measure(downloader.max_rate_period)
+        self.index = None
+        self.url = ''
+        self.requests = []
+        self.request_size = 0
+        self.endflag = False
+        self.error = None
+        self.retry_period = 30
+        self._retry_period = None
+        self.errorcount = 0
+        self.goodseed = False
+        self.active = False
+        self.cancelled = False
+        self.resched(randint(2, 10))
+
+    def resched(self, len = None):
+        if len is None:
+            len = self.retry_period
+        if self.errorcount > 3:
+            len = len * (self.errorcount - 2)
+        self.downloader.rawserver.add_task(self.download, len)
+
+    def _want(self, index):
+        if self.endflag:
+            return self.downloader.storage.do_I_have_requests(index)
+        else:
+            return self.downloader.storage.is_unstarted(index)
+
+    def download(self):
+# 2fastbt_
+        if DEBUG:
+            print "http-sdownload: download()"
+        if self.is_frozen_by_helper():
+            if DEBUG:
+                print "http-sdownload: blocked, rescheduling"
+            self.resched(1)
+            return
+# _2fastbt    
+        self.cancelled = False
+        if self.downloader.picker.am_I_complete():
+            self.downloader.downloads.remove(self)
+            return
+        self.index = self.downloader.picker.next(haveall, self._want, self)
+# 2fastbt_
+        if self.index is None and self.frozen_by_helper:
+            self.resched(0.01)
+            return
+# _2fastbt
+        if ( self.index is None and not self.endflag
+                     and not self.downloader.peerdownloader.has_downloaders() ):
+            self.endflag = True
+            self.index = self.downloader.picker.next(haveall, self._want, self)
+        if self.index is None:
+            self.endflag = True
+            self.resched()
+        else:
+            self.url = ( self.seedurl+'&piece='+str(self.index) )
+            self._get_requests()
+            if self.request_size < self.downloader.storage._piecelen(self.index):
+                self.url += '&ranges='+self._request_ranges()
+            rq = Thread(target = self._request)
+            rq.setName( "HoffmanHTTPDownloader"+rq.getName() )
+            rq.setDaemon(True)
+            rq.start()
+            self.active = True
+
+    def _request(self):
+        import encodings.ascii
+        import encodings.punycode
+        import encodings.idna
+        
+        self.error = None
+        self.received_data = None
+        try:
+            self.connection.request('GET', self.url, None, 
+                                {'User-Agent': VERSION})
+            r = self.connection.getresponse()
+            self.connection_status = r.status
+            self.received_data = r.read()
+        except Exception, e:
+            print_exc()
+            
+            self.error = 'error accessing http seed: '+str(e)
+            try:
+                self.connection.close()
+            except:
+                pass
+            try:
+                self.connection = HTTPConnection(self.netloc)
+            except:
+                self.connection = None  # will cause an exception and retry next cycle
+        self.downloader.rawserver.add_task(self.request_finished)
+
+    def request_finished(self):
+        self.active = False
+        if self.error is not None:
+            if self.goodseed:
+                self.downloader.errorfunc(self.error)
+            self.errorcount += 1
+        if self.received_data:
+            self.errorcount = 0
+            if not self._got_data():
+                self.received_data = None
+        if not self.received_data:
+            self._release_requests()
+            self.downloader.peerdownloader.piece_flunked(self.index)
+        if self._retry_period:
+            self.resched(self._retry_period)
+            self._retry_period = None
+            return
+        self.resched()
+
+    def _got_data(self):
+        if self.connection_status == 503:   # seed is busy
+            try:
+                self.retry_period = max(int(self.received_data), 5)
+            except:
+                pass
+            return False
+        if self.connection_status != 200:
+            self.errorcount += 1
+            return False
+        self._retry_period = 1
+        if len(self.received_data) != self.request_size:
+            if self.goodseed:
+                self.downloader.errorfunc('corrupt data from http seed - redownloading')
+            return False
+        self.measure.update_rate(len(self.received_data))
+        self.downloader.measurefunc(len(self.received_data))
+        if self.cancelled:
+            return False
+        if not self._fulfill_requests():
+            return False
+        if not self.goodseed:
+            self.goodseed = True
+            self.downloader.seedsfound += 1
+        if self.downloader.storage.do_I_have(self.index):
+            self.downloader.picker.complete(self.index)
+            self.downloader.peerdownloader.check_complete(self.index)
+            self.downloader.gotpiecefunc(self.index)
+        return True
+    
+    def _get_requests(self):
+        self.requests = []
+        self.request_size = 0L
+        while self.downloader.storage.do_I_have_requests(self.index):
+            r = self.downloader.storage.new_request(self.index)
+            self.requests.append(r)
+            self.request_size += r[1]
+        self.requests.sort()
+
+    def _fulfill_requests(self):
+        start = 0L
+        success = True
+        while self.requests:
+            begin, length = self.requests.pop(0)
+# 2fastbt_
+            if not self.downloader.storage.piece_came_in(self.index, begin, [],
+                            self.received_data[start:start+length], length):
+# _2fastbt
+                success = False
+                break
+            start += length
+        return success
+
+    def _release_requests(self):
+        for begin, length in self.requests:
+            self.downloader.storage.request_lost(self.index, begin, length)
+        self.requests = []
+
+    def _request_ranges(self):
+        s = ''
+        begin, length = self.requests[0]
+        for begin1, length1 in self.requests[1:]:
+            if begin + length == begin1:
+                length += length1
+                continue
+            else:
+                if s:
+                    s += ','
+                s += str(begin)+'-'+str(begin+length-1)
+                begin, length = begin1, length1
+        if s:
+            s += ','
+        s += str(begin)+'-'+str(begin+length-1)
+        return s
+
+# 2fastbt_
+    def helper_forces_unchoke(self):
+        pass
+
+    def helper_set_freezing(self,val):
+        self.frozen_by_helper = val
+# _2fastbt
+
+
+    
+class HoffmanHTTPDownloader:
+    def __init__(self, storage, picker, rawserver,
+                 finflag, errorfunc, peerdownloader,
+                 max_rate_period, infohash, measurefunc, gotpiecefunc):
+        self.storage = storage
+        self.picker = picker
+        self.rawserver = rawserver
+        self.finflag = finflag
+        self.errorfunc = errorfunc
+        self.peerdownloader = peerdownloader
+        self.infohash = infohash
+        self.max_rate_period = max_rate_period
+        self.gotpiecefunc = gotpiecefunc
+        self.measurefunc = measurefunc
+        self.downloads = []
+        self.seedsfound = 0
+
+    def make_download(self, url):
+        self.downloads.append(SingleDownload(self, url))
+        return self.downloads[-1]
+
+    def get_downloads(self):
+        if self.finflag.isSet():
+            return []
+        return self.downloads
+
+    def cancel_piece_download(self, pieces):
+        for d in self.downloads:
+            if d.active and d.index in pieces:
+                d.cancelled = True
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/MessageID.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/MessageID.py
new file mode 100644 (file)
index 0000000..8d37030
--- /dev/null
@@ -0,0 +1,249 @@
+# Written by Jie Yang, Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# All message IDs in BitTorrent Protocol and our extensions 
+#  
+#    Arno: please don't define stuff until the spec is ready
+#
+
+protocol_name = 'BitTorrent protocol'
+# Enable Tribler extensions:
+# Left-most bit = Azureus Enhanced Messaging Protocol (AEMP)
+# Left+42 bit = Tribler Simple Merkle Hashes extension v0. Outdated, but still sent for compatibility.
+# Left+43 bit = Tribler Overlay swarm extension
+#               AND uTorrent extended protocol, conflicting. See EXTEND message
+# Right-most bit = BitTorrent DHT extension
+#option_pattern = chr(0)*8
+option_pattern = '\x00\x00\x00\x00\x00\x30\x00\x00'
+
+
+CHOKE = chr(0)
+UNCHOKE = chr(1)
+INTERESTED = chr(2)
+NOT_INTERESTED = chr(3)
+
+# index
+HAVE = chr(4)
+# index, bitfield
+BITFIELD = chr(5)
+# index, begin, length
+REQUEST = chr(6)
+# index, begin, piece
+PIECE = chr(7)
+# index, begin, piece
+CANCEL = chr(8)
+# 2-byte port
+PORT = chr(9)
+
+# uTorrent and Bram's BitTorrent now support an extended protocol
+EXTEND = chr(20)
+
+
+#
+# Tribler specific message IDs
+#
+
+# IDs 255 and 254 are reserved. Tribler extensions number downwards
+
+## PermID /Overlay Swarm Extension
+# ctxt
+CHALLENGE = chr(253)
+# rdata1
+RESPONSE1 = chr(252)
+# rdata2
+RESPONSE2 = chr(251)
+
+## Merkle Hash Extension
+# Merkle: PIECE message with hashes
+HASHPIECE = chr(250)
+
+## Buddycast Extension
+# payload is beencoded dict
+BUDDYCAST = chr(249)
+
+# bencoded torrent_hash (Arno,2007-08-14: shouldn't be bencoded, but is)
+GET_METADATA = chr(248)
+# {'torrent_hash', 'metadata', ... }
+METADATA = chr(247)
+
+## ProxyService extension, reused from Cooperative Download (2fast)
+# For connectability test
+DIALBACK_REQUEST = chr(244)
+DIALBACK_REPLY = chr(243)
+# torrent_hash
+ASK_FOR_HELP = chr(246)
+# torrent_hash
+STOP_HELPING = chr(245)
+# torrent_hash + bencode([piece num,...])
+REQUEST_PIECES = chr(242)
+# torrent_hash + bencode([piece num,...])
+CANCEL_PIECE = chr(241)
+# torrent_hash
+JOIN_HELPERS = chr(224)
+# torrent_hash
+RESIGN_AS_HELPER = chr(223)
+# torrent_hash + bencode([piece num,...])
+DROPPED_PIECE = chr(222)
+PROXY_HAVE = chr(221)
+PROXY_UNHAVE = chr(220)
+
+
+# SecureOverlay empty payload
+KEEP_ALIVE = chr(240)
+
+## Social-Network feature 
+SOCIAL_OVERLAP = chr(239)
+
+# Remote query extension
+QUERY = chr(238)
+QUERY_REPLY = chr(237)
+
+# Bartercast, payload is bencoded dict
+BARTERCAST = chr(236)
+
+# g2g info (uplink statistics, etc)
+G2G_PIECE_XFER = chr(235)
+
+# Friendship messages
+FRIENDSHIP = chr(234)
+
+# Generic Crawler messages
+CRAWLER_REQUEST = chr(232)
+CRAWLER_REPLY = chr(231)
+
+VOTECAST = chr(226)
+CHANNELCAST = chr(225)
+
+GET_SUBS = chr(230)
+SUBS = chr(229)
+
+####### FREE ID = 227/228 + < 220
+
+
+#
+# EXTEND_MSG_CS sub-messages
+#
+# Closed swarms
+# CS  : removed, unused. Using CS_CHALLENGE_A message ID in extend handshake
+CS_CHALLENGE_A = chr(227)
+CS_CHALLENGE_B = chr(228)
+CS_POA_EXCHANGE_A = chr(229)
+CS_POA_EXCHANGE_B = chr(230)
+
+#
+# Crawler sub-messages
+#
+CRAWLER_DATABASE_QUERY = chr(1)
+CRAWLER_SEEDINGSTATS_QUERY = chr(2)
+CRAWLER_NATCHECK = chr(3)
+CRAWLER_FRIENDSHIP_STATS = chr(4)
+CRAWLER_NATTRAVERSAL = chr(5)
+CRAWLER_VIDEOPLAYBACK_INFO_QUERY = chr(6)
+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY = chr(7)
+CRAWLER_REPEX_QUERY = chr(8) # RePEX: query a peer's SwarmCache history
+CRAWLER_PUNCTURE_QUERY = chr(9)
+CRAWLER_CHANNEL_QUERY = chr(10)
+
+
+#
+# Summaries
+#
+
+PermIDMessages = [CHALLENGE, RESPONSE1, RESPONSE2]
+BuddyCastMessages = [CHANNELCAST, VOTECAST, BARTERCAST, BUDDYCAST, KEEP_ALIVE]
+MetadataMessages = [GET_METADATA, METADATA]
+DialbackMessages = [DIALBACK_REQUEST,DIALBACK_REPLY]
+HelpCoordinatorMessages = [ASK_FOR_HELP,STOP_HELPING,REQUEST_PIECES,CANCEL_PIECE]
+HelpHelperMessages = [JOIN_HELPERS,RESIGN_AS_HELPER,DROPPED_PIECE,PROXY_HAVE,PROXY_UNHAVE]
+SocialNetworkMessages = [SOCIAL_OVERLAP]
+RemoteQueryMessages = [QUERY,QUERY_REPLY]
+VoDMessages = [G2G_PIECE_XFER]
+FriendshipMessages = [FRIENDSHIP]
+CrawlerMessages = [CRAWLER_REQUEST, CRAWLER_REPLY]
+SubtitleMessages = [GET_SUBS, SUBS]
+
+# All overlay-swarm messages
+OverlaySwarmMessages = PermIDMessages + BuddyCastMessages + MetadataMessages + HelpCoordinatorMessages + HelpHelperMessages + SocialNetworkMessages + RemoteQueryMessages + CrawlerMessages
+
+
+#
+# Printing
+#
+
+message_map = {
+    CHOKE:"CHOKE",
+    UNCHOKE:"UNCHOKE",
+    INTERESTED:"INTEREST",
+    NOT_INTERESTED:"NOT_INTEREST",
+    HAVE:"HAVE",
+    BITFIELD:"BITFIELD",
+    REQUEST:"REQUEST",
+    CANCEL:"CANCEL",
+    PIECE:"PIECE",
+    PORT:"PORT",
+    EXTEND:"EXTEND",
+    
+    CHALLENGE:"CHALLENGE",
+    RESPONSE1:"RESPONSE1",
+    RESPONSE2:"RESPONSE2",
+    HASHPIECE:"HASHPIECE",
+    BUDDYCAST:"BUDDYCAST",
+    GET_METADATA:"GET_METADATA",
+    METADATA:"METADATA",
+    ASK_FOR_HELP:"ASK_FOR_HELP",
+    STOP_HELPING:"STOP_HELPING",
+    REQUEST_PIECES:"REQUEST_PIECES",
+    CANCEL_PIECE:"CANCEL_PIECE",
+    JOIN_HELPERS:"JOIN_HELPERS",
+    RESIGN_AS_HELPER:"RESIGN_AS_HELPER",
+    DROPPED_PIECE:"DROPPED_PIECE",
+    PROXY_HAVE:"PROXY_HAVE",
+    PROXY_UNHAVE:"PROXY_UNHAVE",
+    DIALBACK_REQUEST:"DIALBACK_REQUEST",
+    DIALBACK_REPLY:"DIALBACK_REPLY",
+    KEEP_ALIVE:"KEEP_ALIVE",
+    SOCIAL_OVERLAP:"SOCIAL_OVERLAP",
+    QUERY:"QUERY",
+    QUERY_REPLY:"QUERY_REPLY",
+    VOTECAST:"VOTECAST",
+    BARTERCAST:"BARTERCAST",
+    G2G_PIECE_XFER: "G2G_PIECE_XFER",
+    FRIENDSHIP:"FRIENDSHIP",
+    VOTECAST:"VOTECAST",
+    CHANNELCAST:"CHANNELCAST",
+    
+    CRAWLER_REQUEST:"CRAWLER_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_DATABASE_QUERY:"CRAWLER_DATABASE_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_SEEDINGSTATS_QUERY:"CRAWLER_SEEDINGSTATS_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_NATCHECK:"CRAWLER_NATCHECK_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_NATTRAVERSAL:"CRAWLER_NATTRAVERSAL_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_VIDEOPLAYBACK_INFO_QUERY:"CRAWLER_VIDEOPLAYBACK_INFO_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY:"CRAWLER_VIDEOPLAYBACK_EVENT_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_REPEX_QUERY:"CRAWLER_REPEX_QUERY_REQUEST",  # RePEX: query a peer's SwarmCache history
+    CRAWLER_REQUEST+CRAWLER_PUNCTURE_QUERY:"CRAWLER_PUNCTURE_QUERY_REQUEST",
+    CRAWLER_REQUEST+CRAWLER_CHANNEL_QUERY:"CRAWLER_CHANNEL_QUERY_REQUEST",
+
+    CRAWLER_REPLY:"CRAWLER_REPLY",
+    CRAWLER_REPLY+CRAWLER_DATABASE_QUERY:"CRAWLER_DATABASE_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_SEEDINGSTATS_QUERY:"CRAWLER_SEEDINGSTATS_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_NATCHECK:"CRAWLER_NATCHECK_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_NATTRAVERSAL:"CRAWLER_NATTRAVERSAL_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS",
+    CRAWLER_REPLY+CRAWLER_FRIENDSHIP_STATS:"CRAWLER_FRIENDSHIP_STATS_REPLY",
+    CRAWLER_REPLY+CRAWLER_VIDEOPLAYBACK_INFO_QUERY:"CRAWLER_VIDEOPLAYBACK_INFO_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_VIDEOPLAYBACK_EVENT_QUERY:"CRAWLER_VIDEOPLAYBACK_EVENT_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_REPEX_QUERY:"CRAWLER_REPEX_QUERY_REPLY",  # RePEX: query a peer's SwarmCache history
+    CRAWLER_REPLY+CRAWLER_PUNCTURE_QUERY:"CRAWLER_PUNCTURE_QUERY_REPLY",
+    CRAWLER_REPLY+CRAWLER_CHANNEL_QUERY:"CRAWLER_CHANNEL_QUERY_REPLY"
+}
+
+def getMessageName(s):
+    """
+    Return the message name for message id s. This may be either a one
+    or a two byte sting
+    """
+    if s in message_map:
+        return message_map[s]
+    else:
+        return "Unknown_MessageID_" + "_".join([str(ord(c)) for c in s])
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/NatCheck.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/NatCheck.py
new file mode 100644 (file)
index 0000000..a44379a
--- /dev/null
@@ -0,0 +1,94 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from socket import error as socketerror
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+protocol_name = 'BitTorrent protocol'
+
+# header, reserved, download id, my id, [length, message]
+
+class NatCheck:
+    def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver):
+        self.resultfunc = resultfunc
+        self.downloadid = downloadid
+        self.peerid = peerid
+        self.ip = ip
+        self.port = port
+        self.closed = False
+        self.buffer = StringIO()
+        self.next_len = 1
+        self.next_func = self.read_header_len
+        try:
+            self.connection = rawserver.start_connection((ip, port), self)
+            self.connection.write(chr(len(protocol_name)) + protocol_name +
+                (chr(0) * 8) + downloadid)
+        except socketerror:
+            self.answer(False)
+        except IOError:
+            self.answer(False)
+
+    def answer(self, result):
+        self.closed = True
+        try:
+            self.connection.close()
+        except AttributeError:
+            pass
+        self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
+
+    def read_header_len(self, s):
+        if ord(s) != len(protocol_name):
+            return None
+        return len(protocol_name), self.read_header
+
+    def read_header(self, s):
+        if s != protocol_name:
+            return None
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if s != self.downloadid:
+            return None
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        if s != self.peerid:
+            return None
+        self.answer(True)
+        return None
+
+    def data_came_in(self, connection, s):
+        while 1:
+            if self.closed:
+                return
+            i = self.next_len - self.buffer.tell()
+            if i > len(s):
+                self.buffer.write(s)
+                return
+            self.buffer.write(s[:i])
+            s = s[i:]
+            m = self.buffer.getvalue()
+            self.buffer.reset()
+            self.buffer.truncate()
+            x = self.next_func(m)
+            if x is None:
+                if not self.closed:
+                    self.answer(False)
+                return
+            self.next_len, self.next_func = x
+
+    def connection_lost(self, connection):
+        if not self.closed:
+            self.closed = True
+            self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port)
+
+    def connection_flushed(self, connection):
+        pass
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/PiecePicker.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/PiecePicker.py
new file mode 100644 (file)
index 0000000..fa4d9d2
--- /dev/null
@@ -0,0 +1,799 @@
+# Written by Bram Cohen and Pawel Garbacki, George Milescu
+# see LICENSE.txt for license information
+
+from random import randrange, shuffle
+from BaseLib.Core.BitTornado.clock import clock
+# 2fastbt_
+from traceback import extract_tb,print_stack
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+import sys
+import time
+# _2fastbt
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+"""
+  rarest_first_cutoff = number of downloaded pieces at which to switch from random to rarest first.
+  rarest_first_priority_cutoff = number of peers which need to have a piece before other partials
+                                 take priority over rarest first.
+"""
+
+class PiecePicker:
+# 2fastbt_
+    def __init__(self, numpieces,
+                 rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
+                 priority_step = 20, helper = None, coordinator = None, rate_predictor = None):
+# TODO: fix PiecePickerSVC and PiecePickerVOD __init calls
+# _2fastbt
+        # If we have less than the cutoff pieces, choose pieces at random. Otherwise,
+        # go for rarest first.
+        self.rarest_first_cutoff = rarest_first_cutoff
+
+        self.priority_step = priority_step
+
+        # cutoff = number of non-seeds which need to have a piece before other
+        #          partials take priority over rarest first. In effect, equal to:
+        #              rarest_first_priority_cutoff + priority_step - #seeds
+        #          before a seed is discovered, it is equal to (as set here):
+        #              rarest_first_priority_cutoff
+        #
+        # This cutoff is used as an interest level (see below). When in random piece
+        # mode, asking for really rare pieces is disfavoured.
+        self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step
+        self.cutoff = rarest_first_priority_cutoff
+
+        # total number of pieces
+        self.numpieces = numpieces
+
+        # pieces we have started to download (in transit)
+        self.started = []
+
+        # !!! the following statistics involve peers, and exclude seeds !!!
+
+        # total number of pieces owned by peers
+        self.totalcount = 0
+
+        # how many peers (non-seeder peers) have a certain piece
+        self.numhaves = [0] * numpieces
+
+        # priority of each peace; -1 to avoid downloading it
+        self.priority = [1] * numpieces
+
+        self.removed_partials = {}
+
+        # self.crosscount[x] = the number of pieces owned by x peers
+        # (inverse of self.numhaves)
+        self.crosscount = [numpieces]
+
+        # self.crosscount2[x] = the number of pieces owned by x peers and me
+        # (inverse of self.numhaves[x]+self.has[x])
+        self.crosscount2 = [numpieces]
+
+        # whether we have a certain piece
+        self.has = [0] * numpieces
+
+        # number of (complete) pieces we got
+        self.numgot = 0
+
+        # whether we're done downloading
+        self.done = False
+
+        # peer information
+        self.peer_connections = {}
+
+        # seeding information
+        self.seed_connections = {}
+        self.seed_time = None
+        self.superseed = False
+        self.seeds_connected = 0
+
+# 2fastbt_
+        self.helper = helper
+        self.coordinator = coordinator
+        self.rate_predictor = rate_predictor
+        self.videostatus = None
+# _2fastbt
+        # Arno, 2010-08-11: STBSPEED, moved to fast_initialize()
+        # self._init_interests()
+
+    def _init_interests(self):
+        """
+        Interests are sets of pieces ordered by priority (0 = high). The
+        priority to the outside world is coarse-grained and is fine-tuned
+        by the number of peers owning a piece.
+
+        The interest level of a piece is self.level_in_interests[piece],
+        which is equal to:
+
+          self.priority[piece] * self.priority_step + self.numhaves[piece].
+
+        Every level is a subset of <peers?> pieces. The placement in the subset
+        with self.pos_in_interests[piece], so
+
+          piece == self.interests
+                     [self.level_in_interests[piece]]
+                     [self.pos_in_interests[piece]]
+
+        holds. Pieces within the same subset are kept shuffled.
+        """
+
+        self.interests = [[] for x in xrange(self.priority_step)]
+        self.level_in_interests = [self.priority_step] * self.numpieces
+        interests = range(self.numpieces)
+        shuffle(interests)
+        self.pos_in_interests = [0] * self.numpieces
+        for i in xrange(self.numpieces):
+            self.pos_in_interests[interests[i]] = i
+        self.interests.append(interests)
+
+    def got_piece(self, piece, begin, length):
+        """
+        Used by the streaming piece picker for additional information.
+        """
+        pass
+
+    def check_outstanding_requests(self, downloads):
+        """
+        Used by the streaming piece picker to cancel slow requests.
+        """
+        pass
+
+    def got_have(self, piece, connection = None):
+        """ A peer reports to have the given piece. """
+
+        self.totalcount+=1
+        numint = self.numhaves[piece]
+        self.numhaves[piece] += 1
+        self.crosscount[numint] -= 1
+        if numint+1==len(self.crosscount):
+            self.crosscount.append(0)
+        self.crosscount[numint+1] += 1
+        if not self.done:
+            numintplus = numint+self.has[piece]
+            self.crosscount2[numintplus] -= 1
+            if numintplus+1 == len(self.crosscount2):
+                self.crosscount2.append(0)
+            self.crosscount2[numintplus+1] += 1
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] += 1
+        if self.superseed:
+            self.seed_got_haves[piece] += 1
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] += 1
+        elif self.has[piece]:
+            return True
+        elif self.priority[piece] == -1:
+            return False
+        if numint == len(self.interests) - 1:
+            self.interests.append([])
+        self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
+        return False
+
+    # ProxyService_
+    #
+    def redirect_haves_to_coordinator(self, connection = None, helper_con = False, piece = None):
+        """ The method is called by the Downloader when a HAVE message is received.
+        
+        If the current node is a helper, it will send the HAVE information to the coordinator.
+        
+        @param connection: the connection for which the HAVE message was received
+        @param helper_con: True if it is a connection to a helper
+        @param piece: the received piece
+        """
+
+        if self.helper :
+            # The current node is a coordinator
+            if DEBUG:
+                print >> sys.stderr,"PiecePicker: proxy_got_have: sending haves to coordinator"
+            
+            # Create the piece list - a copy of numhaves for simplicity
+            piece_list = self.numhaves
+            print "sending piece_list=", piece_list
+            
+            # Send the bitfield
+            self.helper.send_proxy_have(piece_list)
+        else:
+            # if the node is a helper or a non-proxy node, do nothing
+            return
+    #
+    # _ProxyService
+
+
+    def lost_have(self, piece):
+        """ We lost a peer owning the given piece. """
+        self.totalcount-=1
+        numint = self.numhaves[piece]
+        self.numhaves[piece] -= 1
+        self.crosscount[numint] -= 1
+        self.crosscount[numint-1] += 1
+        if not self.done:
+            numintplus = numint+self.has[piece]
+            self.crosscount2[numintplus] -= 1
+            self.crosscount2[numintplus-1] += 1
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] -= 1
+        if self.superseed:
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] -= 1
+        elif self.has[piece] or self.priority[piece] == -1:
+            return
+        self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
+
+
+    # Arno: LIVEWRAP
+    def is_valid_piece(self, piece):
+        return True
+
+    def get_valid_range_iterator(self):
+        return xrange(0,len(self.has))
+
+    def invalidate_piece(self,piece):
+        """ A piece ceases to exist at the neighbours. Required for LIVEWRAP. """
+
+        if self.has[piece]:
+            self.has[piece] = 0
+            #print >>sys.stderr,"PiecePicker: Clearing piece",piece
+            self.numgot -= 1
+
+            # undo self._remove_from_interests(piece); ripped from set_priority
+
+            # reinsert into interests
+            p = self.priority[piece]
+            level = self.numhaves[piece] + (self.priority_step * p)
+            self.level_in_interests[piece] = level
+            while len(self.interests) < level+1:
+                self.interests.append([])
+
+            # insert at a random spot in the list at the current level
+            l2 = self.interests[level]
+            parray = self.pos_in_interests
+            newp = randrange(len(l2)+1)
+            if newp == len(l2):
+                parray[piece] = len(l2)
+                l2.append(piece)
+            else:
+                old = l2[newp]
+                parray[old] = len(l2)
+                l2.append(old)
+                l2[newp] = piece
+                parray[piece] = newp
+
+        # modelled after lost_have
+
+        #assert not self.done
+        #assert not self.seeds_connected
+
+        numint = self.numhaves[piece]
+        if numint == 0:
+            return
+
+        # set numhaves to 0
+        self.totalcount -= numint
+        self.numhaves[piece] = 0
+        self.crosscount[numint] -= 1
+        self.crosscount[0] += 1
+        numintplus = numint+0
+        self.crosscount2[numintplus] -= 1
+        self.crosscount2[0] += 1
+        numint = self.level_in_interests[piece]
+        self.level_in_interests[piece] = 0
+        self._shift_over(piece, self.interests[numint], self.interests[0])
+
+    def set_downloader(self,dl):
+        self.downloader = dl
+
+    def _shift_over(self, piece, l1, l2):
+        """ Moves 'piece' from interests list l1 to l2. """
+
+        assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0)
+        parray = self.pos_in_interests
+
+        # remove piece from l1
+        p = parray[piece]
+        assert l1[p] == piece
+        q = l1[-1]
+        l1[p] = q
+        parray[q] = p
+        del l1[-1]
+
+        # add piece to a random place in l2
+        newp = randrange(len(l2)+1)
+        if newp == len(l2):
+            parray[piece] = len(l2)
+            l2.append(piece)
+        else:
+            old = l2[newp]
+            parray[old] = len(l2)
+            l2.append(old)
+            l2[newp] = piece
+            parray[piece] = newp
+
+    def got_seed(self):
+        self.seeds_connected += 1
+        self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected, 0)
+
+    def became_seed(self):
+        """ A peer just became a seed. """
+
+        self.got_seed()
+        self.totalcount -= self.numpieces
+        self.numhaves = [i-1 for i in self.numhaves]
+        if self.superseed or not self.done:
+            self.level_in_interests = [i-1 for i in self.level_in_interests]
+            del self.interests[0]
+        del self.crosscount[0]
+        if not self.done:
+            del self.crosscount2[0]
+
+    def lost_seed(self):
+        self.seeds_connected -= 1
+        self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected, 0)
+
+    # boudewijn: for VOD we need additional information. added BEGIN
+    # and LENGTH parameter
+    def requested(self, piece, begin=None, length=None):
+        """ Given piece has been requested or a partial of it is on disk. """
+        if piece not in self.started:
+            self.started.append(piece)
+
+    def _remove_from_interests(self, piece, keep_partial = False):
+        l = self.interests[self.level_in_interests[piece]]
+        p = self.pos_in_interests[piece]
+        assert l[p] == piece
+        q = l[-1]
+        l[p] = q
+        self.pos_in_interests[q] = p
+        del l[-1]
+        try:
+            self.started.remove(piece)
+            if keep_partial:
+                self.removed_partials[piece] = 1
+        except ValueError:
+            pass
+
+    def complete(self, piece):
+        """ Succesfully received the given piece. """
+        assert not self.has[piece]
+        self.has[piece] = 1
+        self.numgot += 1
+        
+        if self.numgot == self.numpieces:
+            self.done = True
+            self.crosscount2 = self.crosscount
+        else:
+            numhaves = self.numhaves[piece]
+            self.crosscount2[numhaves] -= 1
+            if numhaves+1 == len(self.crosscount2):
+                self.crosscount2.append(0)
+            self.crosscount2[numhaves+1] += 1
+        self._remove_from_interests(piece)
+
+    # ProxyService_
+    #
+    def _proxynext(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None, proxyhave=None, lookatstarted=False, onlystarted=False):
+        """ Determine which piece to download next from a peer. _proxynext has three extra arguments compared to _next 
+        
+        @param haves: set of pieces owned by that peer
+        @param wantfunc: custom piece filter
+        @param complete_first: whether to complete partial pieces first 
+        @param helper_con: True for Coordinator, False for Helper
+        @param willrequest: 
+        @param connection:
+        @param proxyhave: a bitfield with the pieces that the helper "sees" in the swarm
+        @param lookatstarted: if True, the picker will search in the already started pieces first, and then in the available pieces
+        @param onlystarted: if True, the picker will only search in the already started pieces
+        @return: a piece number or None
+        """
+
+        # First few (rarest_first_cutoff) pieces are selected at random
+        # and completed. Subsequent pieces are downloaded rarest-first.
+
+        # cutoff = True:  random mode
+        #          False: rarest-first mode
+        cutoff = self.numgot < self.rarest_first_cutoff
+
+        # whether to complete existing partials first -- do so before the
+        # cutoff, or if forced by complete_first, but not for seeds.
+        complete_first = (complete_first or cutoff) and not haves.complete()
+
+        # most interesting piece
+        best = None
+
+        # interest level of best piece
+        bestnum = 2 ** 30
+
+        # select piece we started to download with best interest index.
+        if lookatstarted:
+            # No active requested (started) pieces will be rerequested
+            for i in self.started:
+                if proxyhave == None:
+                    proxyhave_i = False
+                else:
+                    proxyhave_i = proxyhave[i]
+                if (haves[i] or proxyhave_i) and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)):
+                    if self.level_in_interests[i] < bestnum:
+                        best = i
+                        bestnum = self.level_in_interests[i]
+
+        if best is not None:
+            # found a piece -- return it if we are completing partials first
+            # or if there is a cutoff
+            if complete_first or (cutoff and len(self.interests) > self.cutoff):
+                return best
+        
+        if onlystarted:
+            # Only look at started downloads - used by the helper
+            return best
+
+        if haves.complete():
+            # peer has all pieces - look for any more interesting piece
+            r = [ (0, min(bestnum, len(self.interests))) ]
+        elif cutoff and len(self.interests) > self.cutoff:
+            # no best piece - start looking for low-priority pieces first
+            r = [ (self.cutoff, min(bestnum, len(self.interests))),
+                      (0, self.cutoff) ]
+        else:
+            # look for the most interesting piece
+            r = [ (0, min(bestnum, len(self.interests))) ]
+#        print "piecepicker: r=", r
+
+        # select first acceptable piece, best interest index first.
+        # r is an interest-range
+        for lo, hi in r:
+            for i in xrange(lo, hi):
+                # Randomize the list of pieces in the interest level i
+                random_interests = []
+                random_interests.extend(self.interests[i])
+                shuffle(random_interests)
+                for j in random_interests:
+                    if proxyhave == None:
+                        proxyhave_j = False
+                    else:
+                        proxyhave_j = proxyhave[j]
+                    if (haves[j] or proxyhave_j) and wantfunc(j) and (self.helper is None or helper_con or not self.helper.is_ignored(j)):
+                        return j
+
+        if best is not None:
+            return best
+        return None
+    #
+    # _ProxyService
+
+# 2fastbt_
+    def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None):
+# _2fastbt
+        """ Determine which piece to download next from a peer.
+        
+        @param haves: set of pieces owned by that peer
+        @param wantfunc: custom piece filter
+        @param complete_first: whether to complete partial pieces first 
+        @param helper_con: True for Coordinator, False for Helper
+        @param willrequest: 
+        @param connection: the connection object on which the returned piece will be requested
+        @return: a piece number or None
+        """
+
+        # First few (rarest_first_cutoff) pieces are selected at random
+        # and completed. Subsequent pieces are downloaded rarest-first.
+
+        # cutoff = True:  random mode
+        #          False: rarest-first mode
+        cutoff = self.numgot < self.rarest_first_cutoff
+
+        # whether to complete existing partials first -- do so before the
+        # cutoff, or if forced by complete_first, but not for seeds.
+        complete_first = (complete_first or cutoff) and not haves.complete()
+
+        # most interesting piece
+        best = None
+
+        # interest level of best piece
+        bestnum = 2 ** 30
+
+        # select piece we started to download with best interest index.
+        for i in self.started:
+# 2fastbt_
+            if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)):
+# _2fastbt
+                if self.level_in_interests[i] < bestnum:
+                    best = i
+                    bestnum = self.level_in_interests[i]
+
+        if best is not None:
+            # found a piece -- return it if we are completing partials first
+            # or if there is a cutoff
+            if complete_first or (cutoff and len(self.interests) > self.cutoff):
+                return best
+
+        if haves.complete():
+            # peer has all pieces - look for any more interesting piece
+            r = [ (0, min(bestnum, len(self.interests))) ]
+        elif cutoff and len(self.interests) > self.cutoff:
+            # no best piece - start looking for low-priority pieces first
+            r = [ (self.cutoff, min(bestnum, len(self.interests))),
+                      (0, self.cutoff) ]
+        else:
+            # look for the most interesting piece
+            r = [ (0, min(bestnum, len(self.interests))) ]
+
+        # select first acceptable piece, best interest index first.
+        # r is an interest-range
+        for lo, hi in r:
+            for i in xrange(lo, hi):
+                for j in self.interests[i]:
+# 2fastbt_
+                    if haves[j] and wantfunc(j) and (self.helper is None or helper_con or not self.helper.is_ignored(j)):
+# _2fastbt
+                        return j
+
+        if best is not None:
+            return best
+        return None
+
+# 2fastbt_
+    def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces= [], willrequest = True, connection = None,  proxyhave = None):
+        """ Return the next piece number to be downloaded
+        
+        @param haves: set of pieces owned by that peer
+        @param wantfunc: custom piece filter
+        @param sdownload: 
+        @param complete_first: whether to complete partial pieces first
+        @param helper_con: True for Coordinator, False for Helper
+        @param slowpieces: 
+        @param willrequest: 
+        @param connection: the connection object on which the returned piece will be requested
+        @param proxyhave: a bitfield with the pieces that the helper "sees" in the swarm
+        @return: a piece number or None 
+        """
+#        try:
+        # Helper connection (helper_con) is true for coordinator
+        # Helper connection (helper_con) is false for helpers 
+        # self.helper is None for Coordinator and is notNone for Helper
+        while True:
+#            print "started =", self.started
+            if helper_con :
+                # The current node is a coordinator
+
+                # First try to request a piece that the peer advertised via a HAVE message
+                piece = self._proxynext(haves, wantfunc, complete_first, helper_con, willrequest = willrequest, connection = connection, proxyhave = None, lookatstarted=False)
+
+                # If no piece could be requested, try to find a piece that the node advertised via a PROXY_HAVE message
+                if piece is None:
+                    piece = self._proxynext(haves, wantfunc, complete_first, helper_con, willrequest = willrequest, connection = connection, proxyhave = proxyhave, lookatstarted=False)
+
+                    if piece is None:
+                        # The piece picker failed to return a piece
+                        if DEBUG:
+                            print >> sys.stderr,"PiecePicker: next: _next returned no pieces for proxyhave!",
+                        break
+                
+                if DEBUG:
+                    print >> sys.stderr,"PiecePicker: next: helper None or helper conn, returning", piece
+                    print >> sys.stderr,"PiecePicker: next: haves[", piece, "]=", haves[piece]
+                    print >> sys.stderr,"PiecePicker: next: proxyhave[", piece, "]=", proxyhave[piece]
+                if not haves[piece]:
+                    # If the piece was not advertised with a BT HAVE message, send a proxy request for it
+                    # Reserve the piece to one of the helpers
+                    self.coordinator.send_request_pieces(piece, connection.get_id())
+                    return None
+                else:
+                    # The piece was advertised with a BT HAVE message 
+                    # Return the selected piece
+                    return piece
+
+            if self.helper is not None:
+                # The current node is a helper
+                
+                # Look into the pieces that are already downloading
+                piece = self._proxynext(haves, wantfunc, complete_first, helper_con, willrequest = willrequest, connection = connection, proxyhave = None, lookatstarted=True, onlystarted=True)
+                if piece is not None:
+                    if DEBUG:
+                        print >> sys.stderr,"PiecePicker: next: helper: continuing already started download for", requested_piece
+                    return piece
+                
+                # If no already started downloads, look at new coordinator requests
+                requested_piece = self.helper.next_request()
+                if requested_piece is not None:
+                    if DEBUG:
+                        print >> sys.stderr,"PiecePicker: next: helper: got request from coordinator for", requested_piece
+                    return requested_piece
+                else:
+                    # There is no pending requested piece from the coordinator
+                    if DEBUG:
+                        print >> sys.stderr,"PiecePicker: next: helper: no piece pending"
+                    return None
+    
+            # The current node not a helper, neither a coordinator
+            # First try to request a piece that the peer advertised via a HAVE message
+            piece = self._next(haves, wantfunc, complete_first, helper_con, willrequest = willrequest, connection = connection)
+
+            if piece is None:
+                # The piece picker failed to return a piece
+                if DEBUG:
+                    print >> sys.stderr,"PiecePicker: next: _next returned no pieces!",
+                break
+
+            # We should never get here
+            if DEBUG:
+                print >> sys.stderr,"PiecePicker: next: helper: an error occurred. Returning piece",piece
+            return piece
+
+        # Arno, 2008-05-20: 2fast code: if we got capacity to DL something,
+        # ask coordinator what new pieces to dl for it.
+        if self.rate_predictor and self.rate_predictor.has_capacity():
+            return self._next(haves, wantfunc, complete_first, True, willrequest = willrequest, connection = connection)
+        else:
+            return None
+
+    def set_rate_predictor(self, rate_predictor):
+        self.rate_predictor = rate_predictor
+# _2fastbt
+
+    def am_I_complete(self):
+        return self.done
+    
+    def bump(self, piece):
+        """ Piece was received but contained bad data? """
+
+        l = self.interests[self.level_in_interests[piece]]
+        pos = self.pos_in_interests[piece]
+        del l[pos]
+        l.append(piece)
+        for i in range(pos, len(l)):
+            self.pos_in_interests[l[i]] = i
+        try:
+            self.started.remove(piece)
+        except:
+            pass
+
+    def set_priority(self, piece, p):
+        """ Define the priority with which a piece needs to be downloaded.
+            A priority of -1 means 'do not download'. """
+
+        if self.superseed:
+            return False    # don't muck with this if you're a superseed
+        oldp = self.priority[piece]
+        if oldp == p:
+            return False
+        self.priority[piece] = p
+        if p == -1:
+            # when setting priority -1,
+            # make sure to cancel any downloads for this piece
+            if not self.has[piece]:
+                self._remove_from_interests(piece, True)
+            return True
+        if oldp == -1:
+            level = self.numhaves[piece] + (self.priority_step * p)
+            self.level_in_interests[piece] = level
+            if self.has[piece]:
+                return True
+            while len(self.interests) < level+1:
+                self.interests.append([])
+            l2 = self.interests[level]
+            parray = self.pos_in_interests
+            newp = randrange(len(l2)+1)
+            if newp == len(l2):
+                parray[piece] = len(l2)
+                l2.append(piece)
+            else:
+                old = l2[newp]
+                parray[old] = len(l2)
+                l2.append(old)
+                l2[newp] = piece
+                parray[piece] = newp
+            if self.removed_partials.has_key(piece):
+                del self.removed_partials[piece]
+                self.started.append(piece)
+            # now go to downloader and try requesting more
+            return True
+        numint = self.level_in_interests[piece]
+        newint = numint + ((p - oldp) * self.priority_step)
+        self.level_in_interests[piece] = newint
+        if self.has[piece]:
+            return False
+        while len(self.interests) < newint+1:
+            self.interests.append([])
+        self._shift_over(piece, self.interests[numint], self.interests[newint])
+        return False
+
+    def is_blocked(self, piece):
+        return self.priority[piece] < 0
+
+
+    def set_superseed(self):
+        assert self.done
+        self.superseed = True
+        self.seed_got_haves = [0] * self.numpieces
+        self._init_interests()  # assume everyone is disconnected
+
+    def next_have(self, connection, looser_upload):
+        if self.seed_time is None:
+            self.seed_time = clock()
+            return None
+        if clock() < self.seed_time+10:  # wait 10 seconds after seeing the first peers
+            return None                  # to give time to grab have lists
+        if not connection.upload.super_seeding:
+            return None
+        if connection in self.seed_connections:
+            if looser_upload:
+                num = 1     # send a new have even if it hasn't spread that piece elsewhere
+            else:
+                num = 2
+            if self.seed_got_haves[self.seed_connections[connection]] < num:
+                return None
+            if not connection.upload.was_ever_interested:   # it never downloaded it?
+                connection.upload.skipped_count += 1
+                if connection.upload.skipped_count >= 3:    # probably another stealthed seed
+                    return -1                               # signal to close it
+        for tier in self.interests:
+            for piece in tier:
+                if not connection.download.have[piece]:
+                    seedint = self.level_in_interests[piece]
+                    self.level_in_interests[piece] += 1  # tweak it up one, so you don't duplicate effort
+                    if seedint == len(self.interests) - 1:
+                        self.interests.append([])
+                    self._shift_over(piece, 
+                                self.interests[seedint], self.interests[seedint + 1])
+                    self.seed_got_haves[piece] = 0       # reset this
+                    self.seed_connections[connection] = piece
+                    connection.upload.seed_have_list.append(piece)
+                    return piece
+        return -1       # something screwy; terminate connection
+
+    def got_peer(self, connection):
+        self.peer_connections[connection] = { "connection": connection }
+
+    def lost_peer(self, connection):
+        if connection.download.have.complete():
+            self.lost_seed()
+        else:
+            has = connection.download.have
+            for i in xrange(0, self.numpieces):
+                if has[i]:
+                    self.lost_have(i)
+
+        if connection in self.seed_connections:
+            del self.seed_connections[connection]
+        del self.peer_connections[connection]
+
+
+    def fast_initialize(self,completeondisk):
+        if completeondisk:
+            self.has = [1] * self.numpieces 
+            self.numgot = self.numpieces
+            self.done = True
+            self.interests = [[] for x in xrange(self.priority_step)]
+            self.interests.append([])
+            self.level_in_interests = [self.priority_step] * self.numpieces
+            self.pos_in_interests = [0] * self.numpieces # Incorrect, but shouldn't matter
+        else:
+            self._init_interests()    
+
+    def print_complete(self):
+        print >>sys.stderr,"pp: self.numpieces",`self.numpieces`
+        print >>sys.stderr,"pp: self.started",`self.started`
+        print >>sys.stderr,"pp: self.totalcount",`self.totalcount`
+        print >>sys.stderr,"pp: self.numhaves",`self.numhaves`
+        print >>sys.stderr,"pp: self.priority",`self.priority`
+        print >>sys.stderr,"pp: self.removed_partials",`self.removed_partials`
+        print >>sys.stderr,"pp: self.crosscount",`self.crosscount`
+        print >>sys.stderr,"pp: self.crosscount2",`self.crosscount2`
+        print >>sys.stderr,"pp: self.has",`self.has`
+        print >>sys.stderr,"pp: self.numgot",`self.numgot`
+        print >>sys.stderr,"pp: self.done",`self.done`
+        print >>sys.stderr,"pp: self.peer_connections",`self.peer_connections`
+        print >>sys.stderr,"pp: self.seed_connections",`self.seed_connections`
+        print >>sys.stderr,"pp: self.seed_time",`self.seed_time`
+        print >>sys.stderr,"pp: self.superseed",`self.superseed`
+        print >>sys.stderr,"pp: self.seeds_connected",`self.seeds_connected`
+        print >>sys.stderr,"pp: self.interests",`self.interests`
+        print >>sys.stderr,"pp: self.level_in_interests",`self.level_in_interests`
+        print >>sys.stderr,"pp: self.pos_in_interests",`self.pos_in_interests`
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Rerequester.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Rerequester.py
new file mode 100644 (file)
index 0000000..0ed1d9b
--- /dev/null
@@ -0,0 +1,606 @@
+# Written by Bram Cohen
+# modified for multitracker operation by John Hoffman
+# modified for mainline DHT support by Fabian van der Werf
+# Modified by Raul Jimenez to integrate KTH DHT
+# see LICENSE.txt for license information
+
+import sys
+from BaseLib.Core.BitTornado.zurllib import urlopen
+from urllib import quote
+from btformats import check_peers
+from BaseLib.Core.BitTornado.bencode import bdecode
+from threading import Thread, Lock, currentThread
+from cStringIO import StringIO
+from traceback import print_exc,print_stack
+from socket import error, gethostbyname, inet_aton, inet_ntoa
+from random import shuffle
+from BaseLib.Core.Utilities.Crypto import sha
+from time import time
+from struct import pack, unpack
+import binascii
+from BaseLib.Core.simpledefs import *
+
+
+import BaseLib.Core.DecentralizedTracking.mainlineDHT as mainlineDHT
+if mainlineDHT.dht_imported:
+    from BaseLib.Core.DecentralizedTracking.kadtracker.identifier import Id, IdError
+    
+
+try:
+    from os import getpid
+except ImportError:
+    def getpid():
+        return 1
+    
+try:
+    True
+except:
+    True = 1
+    False = 0
+    
+DEBUG = False
+DEBUG_DHT = False
+
+mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
+keys = {}
+basekeydata = str(getpid()) + repr(time()) + 'tracker'
+
+def add_key(tracker):
+    key = ''
+    for i in sha(basekeydata+tracker).digest()[-6:]:
+        key += mapbase64[ord(i) & 0x3F]
+    keys[tracker] = key
+
+def get_key(tracker):
+    try:
+        return "&key="+keys[tracker]
+    except:
+        add_key(tracker)
+        return "&key="+keys[tracker]
+
+class fakeflag:
+    def __init__(self, state=False):
+        self.state = state
+    def wait(self):
+        pass
+    def isSet(self):
+        return self.state
+
+class Rerequester:
+    def __init__(self, trackerlist, interval, sched, howmany, minpeers, 
+            connect, externalsched, amount_left, up, down, 
+            port, ip, myid, infohash, timeout, errorfunc, excfunc, 
+            maxpeers, doneflag, upratefunc, downratefunc, 
+            unpauseflag = fakeflag(True), config=None):
+
+        self.excfunc = excfunc
+        newtrackerlist = []        
+        for tier in trackerlist:
+            if len(tier) > 1:
+                shuffle(tier)
+            newtrackerlist += [tier]
+        self.trackerlist = newtrackerlist
+        self.lastsuccessful = ''
+        self.rejectedmessage = 'rejected by tracker - '
+        self.port = port
+        
+        if DEBUG:
+            print >>sys.stderr,"Rerequest tracker: infohash is",`infohash`,"port is",self.port,"myid",`myid`,"quoted id",quote(myid)
+
+        self.url = ('?info_hash=%s&peer_id=%s&port=%s' %
+            (quote(infohash), quote(myid), str(port)))
+        self.ip = ip
+        self.interval = interval
+        self.last = None
+        self.trackerid = None
+        self.announce_interval = 1 * 60
+        self.sched = sched
+        self.howmany = howmany
+        self.minpeers = minpeers
+        self.connect = connect
+        self.externalsched = externalsched
+        self.amount_left = amount_left
+        self.up = up
+        self.down = down
+        self.timeout = timeout
+        self.errorfunc = errorfunc
+        self.maxpeers = maxpeers
+        self.doneflag = doneflag
+        self.upratefunc = upratefunc
+        self.downratefunc = downratefunc
+        self.unpauseflag = unpauseflag
+        self.last_failed = True
+        self.never_succeeded = True
+        self.errorcodes = {}
+        self.lock = SuccessLock()
+        self.special = None
+        self.started = False
+        self.stopped = False
+        self.schedid = 'arno481'
+        self.infohash = infohash
+        self.dht = mainlineDHT.dht
+        self.config = config
+        self.notifiers = [] # Diego : warn who is interested about peers returned (only) by tracker
+
+
+    def start(self):
+        if not self.started:
+            self.started = True
+            self.sched(self.c, self.interval/2)
+            self.d(0)
+
+    def c(self):
+        if self.stopped:
+            return
+        if not self.unpauseflag.isSet() and self.howmany() < self.minpeers:
+            self.announce(3, self._c)
+        else:
+            self._c()
+
+    def _c(self):
+        self.sched(self.c, self.interval)
+
+    def d(self, event = 3):
+        if self.stopped:
+            return
+        if not self.unpauseflag.isSet():
+            self._d()
+            return
+        self.announce(event, self._d)
+
+    def _d(self):
+        if self.never_succeeded:
+            self.sched(self.d, 60)  # retry in 60 seconds
+        else:
+            self.sched(self.d, self.announce_interval)
+
+    def encoder_wants_new_peers(self):
+        """ The list of peers we gave to the encoder via self.connect()
+        did not give any live connections, reconnect to get some more.
+        Officially we should cancel the outstanding
+            self.sched(self.d,self.announce_interval)
+        """
+        self.d(0)
+
+    def announce(self, event = 3, callback = lambda: None, specialurl = None):
+        # IPVSIX: Azureus 3.1.1.0 used as Ubuntu IPv6 tracker doesn't support BEP 7
+        if ':' in self.ip:
+            compact = 0
+        else:
+            compact = 1
+
+        if specialurl is not None:
+            s = self.url+'&uploaded=0&downloaded=0&left=1'   # don't add to statistics
+            if self.howmany() >= self.maxpeers:
+                s += '&numwant=0'
+            else:
+                s += '&no_peer_id=1'
+                if compact:
+                    s+= '&compact=1'
+            self.last_failed = True         # force true, so will display an error
+            self.special = specialurl
+            self.rerequest(s, callback)
+            return
+        
+        else:
+            s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
+                (self.url, str(self.up()), str(self.down()), 
+                str(self.amount_left())))
+        if self.last is not None:
+            s += '&last=' + quote(str(self.last))
+        if self.trackerid is not None:
+            s += '&trackerid=' + quote(str(self.trackerid))
+        if self.howmany() >= self.maxpeers:
+            s += '&numwant=0'
+        else:
+                s += '&no_peer_id=1'
+                if compact:
+                    s+= '&compact=1'
+        if event != 3:
+            s += '&event=' + ['started', 'completed', 'stopped'][event]
+        if event == 2:
+            self.stopped = True
+        self.rerequest(s, callback)
+
+
+    def snoop(self, peers, callback = lambda: None):  # tracker call support
+        self.rerequest(self.url
+            +'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+            +str(peers), callback)
+
+
+    def rerequest(self, s, callback):
+        # ProxyService_
+        #
+        proxy_mode = self.config.get('proxy_mode',0)
+        if DEBUG:
+            if proxy_mode == PROXY_MODE_PRIVATE:
+                if True:
+                    print "_rerequest exited."# + str(proxy_mode)
+                return
+            else:
+                if True:
+                    print "_rerequest did not exit"# + str(proxy_mode) 
+        #
+        # _ProxyService
+
+        if not self.lock.isfinished():  # still waiting for prior cycle to complete??
+            def retry(self = self, s = s, callback = callback):
+                self.rerequest(s, callback)
+            self.sched(retry, 5)         # retry in 5 seconds
+            return
+        self.lock.reset()
+        rq = Thread(target = self._rerequest, args = [s, callback])
+        rq.setName( "TrackerRerequestA"+rq.getName() )
+        # Arno: make this a daemon thread so the client closes sooner.
+        rq.setDaemon(True)
+        rq.start()
+
+    def _rerequest(self, s, callback):
+        try:
+            def fail(self = self, callback = callback):
+                self._fail(callback)
+            if self.ip:
+                try:
+                    # IPVSIX
+                    if ':' in self.ip:
+                        # TODO: support for ipv4= field
+                        urlip = "["+self.ip+"]" # URL encoding for IPv6, see RFC3986
+                        field = "ipv6"
+                    else:
+                        urlip = self.ip
+                        field = "ip"
+                        
+                    s += '&' + field + '=' + urlip  
+                except:
+                    self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
+                    self.externalsched(fail)
+            self.errorcodes = {}
+            if self.special is None:
+
+                #Do dht request
+                if self.dht:
+                    self._dht_rerequest()
+                elif DEBUG_DHT:
+                    print >>sys.stderr,"Rerequester: No DHT support loaded"
+
+                for t in range(len(self.trackerlist)):
+                    for tr in range(len(self.trackerlist[t])):
+                        tracker  = self.trackerlist[t][tr]
+                        # Arno: no udp support yet
+                        if tracker.startswith( 'udp:' ):
+                            if DEBUG:
+                                print >>sys.stderr,"Rerequester: Ignoring tracker",tracker
+                            continue
+                        #elif DEBUG:
+                        #    print >>sys.stderr,"Rerequester: Trying tracker",tracker
+                        if self.rerequest_single(tracker, s, callback):
+                            if not self.last_failed and tr != 0:
+                                del self.trackerlist[t][tr]
+                                self.trackerlist[t] = [tracker] + self.trackerlist[t]
+                            return
+            else:
+                tracker = self.special
+                self.special = None
+                if self.rerequest_single(tracker, s, callback):
+                    return
+            # no success from any tracker
+            self.externalsched(fail)
+        except:
+            self.exception(callback)
+
+
+    def _fail(self, callback):
+        if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
+             or not self.amount_left() ):
+            for f in ['rejected', 'bad_data', 'troublecode']:
+                if self.errorcodes.has_key(f):
+                    r = self.errorcodes[f]
+                    break
+            else:
+                r = 'Problem connecting to tracker - unspecified error:'+`self.errorcodes`
+            self.errorfunc(r)
+
+        self.last_failed = True
+        self.lock.give_up()
+        self.externalsched(callback)
+
+
+    def rerequest_single(self, t, s, callback):
+        l = self.lock.set()
+        rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
+        rq.setName( "TrackerRerequestB"+rq.getName() )
+        # Arno: make this a daemon thread so the client closes sooner.
+        rq.setDaemon(True)
+        rq.start()
+        self.lock.wait()
+        if self.lock.success:
+            self.lastsuccessful = t
+            self.last_failed = False
+            self.never_succeeded = False
+            return True
+        if not self.last_failed and self.lastsuccessful == t:
+            # if the last tracker hit was successful, and you've just tried the tracker
+            # you'd contacted before, don't go any further, just fail silently.
+            self.last_failed = True
+            self.externalsched(callback)
+            self.lock.give_up()
+            return True
+        return False    # returns true if it wants rerequest() to exit
+
+
+    def _rerequest_single(self, t, s, l, callback):
+        try:        
+            closer = [None]
+            def timedout(self = self, l = l, closer = closer):
+                if self.lock.trip(l):
+                    self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
+                    self.lock.unwait(l)
+                try:
+                    closer[0]()
+                except:
+                    pass
+                    
+            self.externalsched(timedout, self.timeout)
+
+            err = None
+            try:
+                if DEBUG:
+                    print >>sys.stderr,"Rerequest tracker:"
+                    print >>sys.stderr,t+s
+                h = urlopen(t+s)
+                closer[0] = h.close
+                data = h.read()
+            except (IOError, error), e:
+                err = 'Problem connecting to tracker - ' + str(e)
+                if DEBUG:
+                    print_exc()
+            except:
+                err = 'Problem connecting to tracker'
+                if DEBUG:
+                    print_exc()
+                    
+                    
+            #if DEBUG:
+            #    print >>sys.stderr,"rerequest: Got data",data
+                    
+            try:
+                h.close()
+            except:
+                pass
+            if err:        
+                if self.lock.trip(l):
+                    self.errorcodes['troublecode'] = err
+                    self.lock.unwait(l)
+                return
+
+            if not data:
+                if self.lock.trip(l):
+                    self.errorcodes['troublecode'] = 'no data from tracker'
+                    self.lock.unwait(l)
+                return
+            
+            try:
+                r = bdecode(data, sloppy=1)
+                if DEBUG:
+                    print >>sys.stderr,"Rerequester: Tracker returns:", r
+                check_peers(r)
+                
+                #print >>sys.stderr,"Rerequester: Tracker returns, post check done"
+                
+            except ValueError, e:
+                if DEBUG:
+                    print_exc()
+                if self.lock.trip(l):
+                    self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
+                    self.lock.unwait(l)
+                return
+            
+            if r.has_key('failure reason'):
+                if self.lock.trip(l):
+                    self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
+                    self.lock.unwait(l)
+                return
+                
+            if self.lock.trip(l, True):     # success!
+                self.lock.unwait(l)
+            else:
+                callback = lambda: None     # attempt timed out, don't do a callback
+
+            # even if the attempt timed out, go ahead and process data
+            def add(self = self, r = r, callback = callback):
+                #print >>sys.stderr,"Rerequester: add: postprocessing",r
+                self.postrequest(r, callback, self.notifiers)
+                
+            #print >>sys.stderr,"Rerequester: _request_single: scheduling processing of returned",r
+            self.externalsched(add)
+        except:
+            
+            print_exc()
+            
+            self.exception(callback)
+
+    def _dht_rerequest(self):
+        if DEBUG_DHT:
+            print >>sys.stderr,"Rerequester: _dht_rerequest",`self.infohash`
+        try:
+            info_hash_id = Id(self.infohash)
+        except (IdError):
+            print >>sys.stderr,"Rerequester: _dht_rerequest: self.info_hash is not a valid identifier"
+            return
+                
+        if 'dialback' in self.config and self.config['dialback']:
+            from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+            
+            if DialbackMsgHandler.getInstance().isConnectable():
+                if DEBUG_DHT:
+                    print >>sys.stderr,"Rerequester: _dht_rerequest: get_peers AND announce"
+                self.dht.get_peers(info_hash_id, self._dht_got_peers, self.port)
+                return
+                #raul: I added this return so when the peer is NOT connectable
+                # it does a get_peers lookup but it does not announce
+        if DEBUG_DHT:
+            print >>sys.stderr,"Rerequester: _dht_rerequest: JUST get_peers, DO NOT announce"
+        self.dht.get_peers(info_hash_id, self._dht_got_peers)
+
+
+    def _dht_got_peers(self, peers):
+        if DEBUG_DHT:
+            print >>sys.stderr,"Rerequester: DHT: Received",len(peers),"peers",currentThread().getName()
+        """
+        raul: This is quite weird but I leave as it is.
+        """
+        p = [{'ip': peer[0],'port': peer[1]} for peer in peers]
+        if p:
+            r = {'peers':p}
+            def add(self = self, r = r):
+                self.postrequest(r, lambda : None)
+            self.externalsched(add)
+
+
+    def add_notifier( self, cb ):
+        self.notifiers.append( cb )
+
+    def postrequest(self, r, callback, notifiers = []):
+        try:
+            if r.has_key('warning message'):
+                self.errorfunc('warning from tracker - ' + r['warning message'])
+            self.announce_interval = r.get('interval', self.announce_interval)
+            self.interval = r.get('min interval', self.interval)
+            
+            if DEBUG:
+                print >> sys.stderr,"Rerequester: announce min is",self.announce_interval,self.interval
+            
+            self.trackerid = r.get('tracker id', self.trackerid)
+            self.last = r.get('last', self.last)
+    #        ps = len(r['peers']) + self.howmany()
+            peers = []
+            p = r.get('peers')
+            if p is not None:
+                if type(p) == type(''):
+                    for x in xrange(0, len(p), 6):
+                        ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
+                        port = (ord(p[x+4]) << 8) | ord(p[x+5])
+                        peers.append(((ip, port), 0)) # Arno: note: not just (ip,port)!!!
+                else:
+                    # IPVSIX: Azureus 3.1.1.0 used as Ubuntu IPv6 tracker 
+                    # doesn't support BEP 7. Hence these may be IPv6.
+                    #
+                    for x in p:
+                        peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0)))
+            else:
+                # IPv6 Tracker Extension, http://www.bittorrent.org/beps/bep_0007.html
+                p = r.get('peers6')
+                if type(p) == type(''):
+                    for x in xrange(0, len(p), 18):
+                        #ip = '.'.join([str(ord(i)) for i in p[x:x+16]])
+                        hexip = binascii.b2a_hex(p[x:x+16])
+                        ip = ''
+                        for i in xrange(0,len(hexip),4):
+                            ip += hexip[i:i+4]
+                            if i+4 != len(hexip):
+                                ip += ':'
+                        port = (ord(p[x+16]) << 8) | ord(p[x+17])
+                        peers.append(((ip, port), 0)) # Arno: note: not just (ip,port)!!!
+                else:
+                    for x in p:
+                        peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0)))
+            
+            
+                # Arno, 2009-04-06: Need more effort to support IPv6, e.g.
+                # see SocketHandler.SingleSocket.get_ip(). The getsockname()
+                # + getpeername() calls should be make to accept IPv6 returns.
+                # Plus use inet_ntop() instead of inet_ntoa(), but former only
+                # supported on UNIX :-( See new ipaddr module in Python 2.7
+                #
+                print >>sys.stderr,"Rerequester: Got IPv6 peer addresses, not yet supported, ignoring."
+                peers = []
+            
+            if DEBUG:
+                print >>sys.stderr,"Rerequester: postrequest: Got peers",peers
+            ps = len(peers) + self.howmany()
+            if ps < self.maxpeers:
+                if self.doneflag.isSet():
+                    if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
+                        self.last = None
+                else:
+                    if r.get('num peers', 1000) > ps * 1.2:
+                        self.last = None
+
+
+            if peers:
+                shuffle(peers)
+                self.connect(peers)    # Encoder.start_connections(peers)
+                for notifier in notifiers:
+                    notifier( peers )
+                
+            callback()
+        except:
+            print >>sys.stderr,"Rerequester: Error in postrequest"
+            import traceback
+            traceback.print_exc()
+
+    def exception(self, callback):
+        data = StringIO()
+        print_exc(file = data)
+        def r(s = data.getvalue(), callback = callback):
+            if self.excfunc:
+                self.excfunc(s)
+            else:
+                print s
+            callback()
+        self.externalsched(r)
+
+
+class SuccessLock:
+    def __init__(self):
+        self.lock = Lock()
+        self.pause = Lock()
+        self.code = 0L
+        self.success = False
+        self.finished = True
+
+    def reset(self):
+        self.success = False
+        self.finished = False
+
+    def set(self):
+        self.lock.acquire()
+        if not self.pause.locked():
+            self.pause.acquire()
+        self.first = True
+        self.code += 1L
+        self.lock.release()
+        return self.code
+
+    def trip(self, code, s = False):
+        self.lock.acquire()
+        try:
+            if code == self.code and not self.finished:
+                r = self.first
+                self.first = False
+                if s:
+                    self.finished = True
+                    self.success = True
+                return r
+        finally:
+            self.lock.release()
+
+    def give_up(self):
+        self.lock.acquire()
+        self.success = False
+        self.finished = True
+        self.lock.release()
+
+    def wait(self):
+        self.pause.acquire()
+
+    def unwait(self, code):
+        if code == self.code and self.pause.locked():
+            self.pause.release()
+
+    def isfinished(self):
+        self.lock.acquire()
+        x = self.finished
+        self.lock.release()
+        return x    
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Statistics.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Statistics.py
new file mode 100644 (file)
index 0000000..0ee040e
--- /dev/null
@@ -0,0 +1,182 @@
+# Written by Edward Keyes
+# see LICENSE.txt for license information
+
+from threading import Event
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class Statistics_Response:
+    pass    # empty class
+
+
+class Statistics:
+    def __init__(self, upmeasure, downmeasure, connecter, ghttpdl, hhttpdl,
+                 ratelimiter, rerequest_lastfailed, fdatflag):
+        self.upmeasure = upmeasure
+        self.downmeasure = downmeasure
+        self.connecter = connecter
+        self.ghttpdl = ghttpdl
+        self.hhttpdl = hhttpdl
+        self.ratelimiter = ratelimiter
+        self.downloader = connecter.downloader
+        self.picker = connecter.downloader.picker
+        self.storage = connecter.downloader.storage
+        self.torrentmeasure = connecter.downloader.totalmeasure
+        self.rerequest_lastfailed = rerequest_lastfailed
+        self.fdatflag = fdatflag
+        self.fdatactive = False
+        self.piecescomplete = None
+        self.placesopen = None
+        self.storage_totalpieces = len(self.storage.hashes)
+
+
+    def set_dirstats(self, files, piece_length):
+        self.piecescomplete = 0
+        self.placesopen = 0
+        self.filelistupdated = Event()
+        self.filelistupdated.set()
+        frange = xrange(len(files))
+        self.filepieces = [[] for x in frange]
+        self.filepieces2 = [[] for x in frange]
+        self.fileamtdone = [0.0 for x in frange]
+        self.filecomplete = [False for x in frange]
+        self.fileinplace = [False for x in frange]
+        start = 0L
+        for i in frange:
+            l = files[i][1]
+            if l == 0:
+                self.fileamtdone[i] = 1.0
+                self.filecomplete[i] = True
+                self.fileinplace[i] = True
+            else:
+                fp = self.filepieces[i]
+                fp2 = self.filepieces2[i]
+                for piece in range(int(start/piece_length), 
+                                   int((start+l-1)/piece_length)+1):
+                    fp.append(piece)
+                    fp2.append(piece)
+                start += l
+
+
+    def update(self):
+        s = Statistics_Response()
+        s.upTotal = self.upmeasure.get_total()
+        s.downTotal = self.downmeasure.get_total()
+        s.last_failed = self.rerequest_lastfailed()
+        s.external_connection_made = self.connecter.external_connection_made
+        if s.downTotal > 0:
+            s.shareRating = float(s.upTotal)/s.downTotal
+        elif s.upTotal == 0:
+            s.shareRating = 0.0
+        else:
+            s.shareRating = -1.0
+        s.torrentRate = self.torrentmeasure.get_rate()
+        s.torrentTotal = self.torrentmeasure.get_total()
+        s.numSeeds = self.picker.seeds_connected
+        s.numOldSeeds = self.downloader.num_disconnected_seeds()
+        s.numPeers = len(self.downloader.downloads)-s.numSeeds
+        s.numCopies = 0.0
+        for i in self.picker.crosscount:
+            if i==0:
+                s.numCopies+=1
+            else:
+                s.numCopies+=1-float(i)/self.picker.numpieces
+                break
+        if self.picker.done:
+            s.numCopies2 = s.numCopies + 1
+        else:
+            s.numCopies2 = 0.0
+            for i in self.picker.crosscount2:
+                if i==0:
+                    s.numCopies2+=1
+                else:
+                    s.numCopies2+=1-float(i)/self.picker.numpieces
+                    break
+        s.discarded = self.downloader.discarded
+        s.numSeeds += self.ghttpdl.seedsfound
+        s.numSeeds += self.hhttpdl.seedsfound
+        s.numOldSeeds += self.ghttpdl.seedsfound
+        s.numOldSeeds += self.hhttpdl.seedsfound
+        if s.numPeers == 0 or self.picker.numpieces == 0:
+            s.percentDone = 0.0
+        else:
+            s.percentDone = 100.0*(float(self.picker.totalcount)/self.picker.numpieces)/s.numPeers
+
+        s.backgroundallocating = self.storage.bgalloc_active
+        s.storage_totalpieces = len(self.storage.hashes)
+        s.storage_active = len(self.storage.stat_active)
+        s.storage_new = len(self.storage.stat_new)
+        s.storage_dirty = len(self.storage.dirty)
+        numdownloaded = self.storage.stat_numdownloaded
+        s.storage_justdownloaded = numdownloaded
+        s.storage_numcomplete = self.storage.stat_numfound + numdownloaded
+        s.storage_numflunked = self.storage.stat_numflunked
+        s.storage_isendgame = self.downloader.endgamemode
+
+        s.peers_kicked = self.downloader.kicked.items()
+        s.peers_banned = self.downloader.banned.items()
+
+        try:
+            s.upRate = int(self.ratelimiter.upload_rate/1000)
+            assert s.upRate < 5000
+        except:
+            s.upRate = 0
+        s.upSlots = self.ratelimiter.slots
+
+        s.have = self.storage.get_have_copy()
+
+        if self.piecescomplete is None:     # not a multi-file torrent
+            return s
+        
+        if self.fdatflag.isSet():
+            if not self.fdatactive:
+                self.fdatactive = True
+        else:
+            self.fdatactive = False
+
+        if self.piecescomplete != self.picker.numgot:
+            for i in xrange(len(self.filecomplete)):
+                if self.filecomplete[i]:
+                    continue
+                oldlist = self.filepieces[i]
+                newlist = [ piece
+                            for piece in oldlist
+                            if not self.storage.have[piece] ]
+                if len(newlist) != len(oldlist):
+                    self.filepieces[i] = newlist
+                    self.fileamtdone[i] = (
+                        (len(self.filepieces2[i])-len(newlist))
+                         /float(len(self.filepieces2[i])) )
+                    if not newlist:
+                        self.filecomplete[i] = True
+                    self.filelistupdated.set()
+
+            self.piecescomplete = self.picker.numgot
+
+        if ( self.filelistupdated.isSet()
+                 or self.placesopen != len(self.storage.places) ):
+            for i in xrange(len(self.filecomplete)):
+                if not self.filecomplete[i] or self.fileinplace[i]:
+                    continue
+                while self.filepieces2[i]:
+                    piece = self.filepieces2[i][-1]
+                    if self.storage.places[piece] != piece:
+                        break
+                    del self.filepieces2[i][-1]
+                if not self.filepieces2[i]:
+                    self.fileinplace[i] = True
+                    self.storage.set_file_readonly(i)
+                    self.filelistupdated.set()
+
+            self.placesopen = len(self.storage.places)
+
+        s.fileamtdone = self.fileamtdone
+        s.filecomplete = self.filecomplete
+        s.fileinplace = self.fileinplace
+        s.filelistupdated = self.filelistupdated
+
+        return s
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Storage.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Storage.py
new file mode 100644 (file)
index 0000000..7a0f971
--- /dev/null
@@ -0,0 +1,597 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BaseLib.Core.BitTornado.piecebuffer import BufferPool
+from threading import Lock
+from time import strftime, localtime
+import os
+from os.path import exists, getsize, getmtime as getmtime_, basename
+from traceback import print_exc
+try:
+    from os import fsync
+except ImportError:
+    fsync = lambda x: None
+from bisect import bisect
+import sys
+    
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+#MAXREADSIZE = 32768
+MAXREADSIZE = 2 ** 16 # Arno: speed opt
+MAXLOCKSIZE = 1000000000L
+MAXLOCKRANGE = 3999999999L   # only lock first 4 gig of file
+
+_pool = BufferPool()
+PieceBuffer = _pool.new
+
+def getmtime(path):
+    # On some OS's, getmtime returns a float
+    return int(getmtime_(path))
+
+def dummy_status(fractionDone = None, activity = None):
+    pass
+
+class Storage:
+    def __init__(self, files, piece_length, doneflag, config, 
+                 disabled_files = None):
+        # can raise IOError and ValueError
+        self.files = files
+        self.piece_length = piece_length
+        self.doneflag = doneflag
+        self.disabled = [False] * len(files)
+        self.file_ranges = []
+        self.disabled_ranges = []
+        self.working_ranges = []
+        numfiles = 0
+        total = 0L
+        self.so_far = 0L
+        self.handles = {}
+        self.whandles = {}
+        self.tops = {}
+        self.sizes = {}
+        self.mtimes = {}
+        if config.get('lock_files', True):
+            self.lock_file, self.unlock_file = self._lock_file, self._unlock_file
+        else:
+            self.lock_file, self.unlock_file = lambda x1, x2: None, lambda x1, x2: None
+        self.lock_while_reading = config.get('lock_while_reading', False)
+        self.lock = Lock()
+
+        if not disabled_files:
+            disabled_files = [False] * len(files)
+
+        for i in xrange(len(files)):
+            file, length = files[i]
+            if doneflag.isSet():    # bail out if doneflag is set
+                return
+            self.disabled_ranges.append(None)
+            if length == 0:
+                self.file_ranges.append(None)
+                self.working_ranges.append([])
+            else:
+                range = (total, total + length, 0, file)
+                self.file_ranges.append(range)
+                self.working_ranges.append([range])
+                numfiles += 1
+                total += length
+                if disabled_files[i]:
+                    l = 0
+                else:
+                    if exists(file):
+                        l = getsize(file)
+                        if l > length:
+                            h = open(file, 'rb+')
+                            h.truncate(length)
+                            h.flush()
+                            h.close()
+                            l = length
+                    else:
+                        l = 0
+                        h = open(file, 'wb+')
+                        h.flush()
+                        h.close()
+                    self.mtimes[file] = getmtime(file)
+                self.tops[file] = l
+                self.sizes[file] = length
+                self.so_far += l
+
+        self.total_length = total
+        self._reset_ranges()
+
+        self.max_files_open = config['max_files_open']
+        if self.max_files_open > 0 and numfiles > self.max_files_open:
+            self.handlebuffer = []
+        else:
+            self.handlebuffer = None
+
+
+    if os.name == 'nt':
+        def _lock_file(self, name, f):
+            import msvcrt
+            for p in range(0, min(self.sizes[name], MAXLOCKRANGE), MAXLOCKSIZE):
+                f.seek(p)
+                msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 
+                               min(MAXLOCKSIZE, self.sizes[name]-p))
+
+        def _unlock_file(self, name, f):
+            import msvcrt
+            for p in range(0, min(self.sizes[name], MAXLOCKRANGE), MAXLOCKSIZE):
+                f.seek(p)
+                msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, 
+                               min(MAXLOCKSIZE, self.sizes[name]-p))
+
+    elif os.name == 'posix':
+        def _lock_file(self, name, f):
+            import fcntl
+            fcntl.flock(f.fileno(), fcntl.LOCK_EX)
+
+        def _unlock_file(self, name, f):
+            import fcntl
+            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
+
+    else:
+        def _lock_file(self, name, f):
+            pass
+        def _unlock_file(self, name, f):
+            pass
+
+    # Arno, 2010-04-16: STBSPEED
+    def get_length_initial_content(self):
+        return self.so_far
+
+    def was_preallocated(self, pos, length):
+        for file, begin, end in self._intervals(pos, length):
+            if self.tops.get(file, 0) < end:
+                return False
+        return True
+
+
+    def _sync(self, file):
+        self._close(file)
+        if self.handlebuffer:
+            self.handlebuffer.remove(file)
+
+    def sync(self):
+        # may raise IOError or OSError
+        for file in self.whandles.keys():
+            self._sync(file)
+
+
+    def set_readonly(self, f=None):
+        if f is None:
+            self.sync()
+            return
+        file = self.files[f][0]
+        if self.whandles.has_key(file):
+            self._sync(file)
+            
+
+    def get_total_length(self):
+        return self.total_length
+
+
+    def _open(self, file, mode):
+        if self.mtimes.has_key(file):
+            try:
+                if self.handlebuffer is not None:
+                    assert getsize(file) == self.tops[file]
+                    newmtime = getmtime(file)
+                    oldmtime = self.mtimes[file]
+                    assert newmtime <= oldmtime+1
+                    assert newmtime >= oldmtime-1
+            except:
+                if DEBUG:
+                    print( file+' modified: '
+                           +strftime('(%x %X)', localtime(self.mtimes[file]))
+                           +strftime(' != (%x %X) ?', localtime(getmtime(file))) )
+                raise IOError('modified during download')
+        try:
+            return open(file, mode)
+        except:
+            if DEBUG:
+                print_exc()
+            raise
+
+
+    def _close(self, file):
+        f = self.handles[file]
+        del self.handles[file]
+        if self.whandles.has_key(file):
+            del self.whandles[file]
+            f.flush()
+            self.unlock_file(file, f)
+            f.close()
+            self.tops[file] = getsize(file)
+            self.mtimes[file] = getmtime(file)
+        else:
+            if self.lock_while_reading:
+                self.unlock_file(file, f)
+            f.close()
+
+
+    def _close_file(self, file):
+        if not self.handles.has_key(file):
+            return
+        self._close(file)
+        if self.handlebuffer:
+            self.handlebuffer.remove(file)
+        
+
+    def _get_file_handle(self, file, for_write):
+        if self.handles.has_key(file):
+            if for_write and not self.whandles.has_key(file):
+                self._close(file)
+                try:
+                    f = self._open(file, 'rb+')
+                    self.handles[file] = f
+                    self.whandles[file] = 1
+                    self.lock_file(file, f)
+                except (IOError, OSError), e:
+                    if DEBUG:
+                        print_exc()
+                    raise IOError('unable to reopen '+file+': '+str(e))
+
+            if self.handlebuffer:
+                if self.handlebuffer[-1] != file:
+                    self.handlebuffer.remove(file)
+                    self.handlebuffer.append(file)
+            elif self.handlebuffer is not None:
+                self.handlebuffer.append(file)
+        else:
+            try:
+                if for_write:
+                    f = self._open(file, 'rb+')
+                    self.handles[file] = f
+                    self.whandles[file] = 1
+                    self.lock_file(file, f)
+                else:
+                    f = self._open(file, 'rb')
+                    self.handles[file] = f
+                    if self.lock_while_reading:
+                        self.lock_file(file, f)
+            except (IOError, OSError), e:
+                if DEBUG:
+                    print_exc()
+                raise IOError('unable to open '+file+': '+str(e))
+            
+            if self.handlebuffer is not None:
+                self.handlebuffer.append(file)
+                if len(self.handlebuffer) > self.max_files_open:
+                    self._close(self.handlebuffer.pop(0))
+
+        return self.handles[file]
+
+
+    def _reset_ranges(self):
+        self.ranges = []
+        for l in self.working_ranges:
+            self.ranges.extend(l)
+            self.begins = [i[0] for i in self.ranges]
+
+    def _intervals(self, pos, amount):
+        r = []
+        stop = pos + amount
+        p = bisect(self.begins, pos) - 1
+        while p < len(self.ranges):
+            begin, end, offset, file = self.ranges[p]
+            if begin >= stop:
+                break
+            r.append(( file, 
+                       offset + max(pos, begin) - begin, 
+                       offset + min(end, stop) - begin ))
+            p += 1
+        return r
+
+
+    def read(self, pos, amount, flush_first = False):
+        r = PieceBuffer()
+        for file, pos, end in self._intervals(pos, amount):
+            if DEBUG:
+                print >>sys.stderr,'reading '+file+' from '+str(pos)+' to '+str(end)+' amount '+str(amount)
+            try:
+                self.lock.acquire()
+                h = self._get_file_handle(file, False)
+                if flush_first and self.whandles.has_key(file):
+                    h.flush()
+                    fsync(h)
+                h.seek(pos)
+                while pos < end:
+                    length = min(end-pos, MAXREADSIZE)
+                    data = h.read(length)
+                    if len(data) != length:
+                        raise IOError('error reading data from '+ file)
+                    r.append(data)
+                    pos += length
+                self.lock.release()
+            except:
+                self.lock.release()
+                raise IOError('error reading data from '+ file)
+        return r
+
+    def write(self, pos, s):
+        # might raise an IOError
+        total = 0
+        for file, begin, end in self._intervals(pos, len(s)):
+            if DEBUG:
+                print 'writing '+file+' from '+str(pos)+' to '+str(end)
+            self.lock.acquire()
+            h = self._get_file_handle(file, True)
+            h.seek(begin)
+            h.write(s[total: total + end - begin])
+            self.lock.release()
+            total += end - begin
+
+    def top_off(self):
+        for begin, end, offset, file in self.ranges:
+            l = offset + end - begin
+            if l > self.tops.get(file, 0):
+                self.lock.acquire()
+                h = self._get_file_handle(file, True)
+                h.seek(l-1)
+                h.write(chr(0xFF))
+                self.lock.release()
+
+    def flush(self):
+        # may raise IOError or OSError
+        for file in self.whandles.keys():
+            self.lock.acquire()
+            self.handles[file].flush()
+            self.lock.release()
+
+    def close(self):
+        for file, f in self.handles.items():
+            try:
+                self.unlock_file(file, f)
+            except:
+                pass
+            try:
+                f.close()
+            except:
+                pass
+        self.handles = {}
+        self.whandles = {}
+        self.handlebuffer = None
+
+
+    def _get_disabled_ranges(self, f):
+        if not self.file_ranges[f]:
+            return ((), (), ())
+        r = self.disabled_ranges[f]
+        if r:
+            return r
+        start, end, offset, file = self.file_ranges[f]
+        if DEBUG:
+            print 'calculating disabled range for '+self.files[f][0]
+            print 'bytes: '+str(start)+'-'+str(end)
+            print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1)
+        pieces = range(int(start/self.piece_length), 
+                        int((end-1)/self.piece_length)+1)
+        offset = 0
+        disabled_files = []
+        if len(pieces) == 1:
+            if ( start % self.piece_length == 0
+                 and end % self.piece_length == 0 ):   # happens to be a single,
+                                                       # perfect piece
+                working_range = [(start, end, offset, file)]
+                update_pieces = []
+            else:
+                midfile = os.path.join(self.bufferdir, str(f))
+                working_range = [(start, end, 0, midfile)]
+                disabled_files.append((midfile, start, end))
+                length = end - start
+                self.sizes[midfile] = length
+                piece = pieces[0]
+                update_pieces = [(piece, start-(piece*self.piece_length), length)]
+        else:
+            update_pieces = []
+            if start % self.piece_length != 0:  # doesn't begin on an even piece boundary
+                end_b = pieces[1]*self.piece_length
+                startfile = os.path.join(self.bufferdir, str(f)+'b')
+                working_range_b = [ ( start, end_b, 0, startfile ) ]
+                disabled_files.append((startfile, start, end_b))
+                length = end_b - start
+                self.sizes[startfile] = length
+                offset = length
+                piece = pieces.pop(0)
+                update_pieces.append((piece, start-(piece*self.piece_length), length))
+            else:
+                working_range_b = []
+            if f  != len(self.files)-1 and end % self.piece_length != 0:
+                                                # doesn't end on an even piece boundary
+                start_e = pieces[-1] * self.piece_length
+                endfile = os.path.join(self.bufferdir, str(f)+'e')
+                working_range_e = [ ( start_e, end, 0, endfile ) ]
+                disabled_files.append((endfile, start_e, end))
+                length = end - start_e
+                self.sizes[endfile] = length
+                piece = pieces.pop(-1)
+                update_pieces.append((piece, 0, length))
+            else:
+                working_range_e = []
+            if pieces:
+                working_range_m = [ ( pieces[0]*self.piece_length,
+                                      (pieces[-1]+1)*self.piece_length,
+                                      offset, file ) ]
+            else:
+                working_range_m = []
+            working_range = working_range_b + working_range_m + working_range_e
+
+        if DEBUG:            
+            print str(working_range)
+            print str(update_pieces)
+        r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files))
+        self.disabled_ranges[f] = r
+        return r
+        
+
+    def set_bufferdir(self, dir):
+        self.bufferdir = dir
+
+    def enable_file(self, f):
+        if not self.disabled[f]:
+            return
+        self.disabled[f] = False
+        r = self.file_ranges[f]
+        if not r:
+            return
+        file = r[3]
+        if not exists(file):
+            h = open(file, 'wb+')
+            h.flush()
+            h.close()
+        if not self.tops.has_key(file):
+            self.tops[file] = getsize(file)
+        if not self.mtimes.has_key(file):
+            self.mtimes[file] = getmtime(file)
+        self.working_ranges[f] = [r]
+
+    def disable_file(self, f):
+        if self.disabled[f]:
+            return
+        self.disabled[f] = True
+        r = self._get_disabled_ranges(f)
+        if not r:
+            return
+        for file, begin, end in r[2]:
+            if not os.path.isdir(self.bufferdir):
+                os.makedirs(self.bufferdir)
+            if not exists(file):
+                h = open(file, 'wb+')
+                h.flush()
+                h.close()
+            if not self.tops.has_key(file):
+                self.tops[file] = getsize(file)
+            if not self.mtimes.has_key(file):
+                self.mtimes[file] = getmtime(file)
+        self.working_ranges[f] = r[0]
+
+    reset_file_status = _reset_ranges
+
+
+    def get_piece_update_list(self, f):
+        return self._get_disabled_ranges(f)[1]
+
+
+    def delete_file(self, f):
+        try:
+            os.remove(self.files[f][0])
+        except:
+            pass
+
+
+    '''
+    Pickled data format:
+
+    d['files'] = [ file #, size, mtime {, file #, size, mtime...} ]
+                    file # in torrent, and the size and last modification
+                    time for those files.  Missing files are either empty
+                    or disabled.
+    d['partial files'] = [ name, size, mtime... ]
+                    Names, sizes and last modification times of files containing
+                    partial piece data.  Filenames go by the following convention:
+                    {file #, 0-based}{nothing, "b" or "e"}
+                    eg: "0e" "3" "4b" "4e"
+                    Where "b" specifies the partial data for the first piece in
+                    the file, "e" the last piece, and no letter signifying that
+                    the file is disabled but is smaller than one piece, and that
+                    all the data is cached inside so adjacent files may be
+                    verified.
+    '''
+    def pickle(self):
+        files = []
+        pfiles = []
+        for i in xrange(len(self.files)):
+            if not self.files[i][1]:    # length == 0
+                continue
+            if self.disabled[i]:
+                for file, start, end in self._get_disabled_ranges(i)[2]:
+                    pfiles.extend([basename(file), getsize(file), getmtime(file)])
+                continue
+            file = self.files[i][0]
+            files.extend([i, getsize(file), getmtime(file)])
+        return {'files': files, 'partial files': pfiles}
+
+
+    def unpickle(self, data):
+        # assume all previously-disabled files have already been disabled
+        try:
+            files = {}
+            pfiles = {}
+            l = data['files']
+            assert len(l) % 3 == 0
+            l = [l[x:x+3] for x in xrange(0, len(l), 3)]
+            for f, size, mtime in l:
+                files[f] = (size, mtime)
+            l = data.get('partial files', [])
+            assert len(l) % 3 == 0
+            l = [l[x:x+3] for x in xrange(0, len(l), 3)]
+            for file, size, mtime in l:
+                pfiles[file] = (size, mtime)
+
+            valid_pieces = {}
+            for i in xrange(len(self.files)):
+                if self.disabled[i]:
+                    continue
+                r = self.file_ranges[i]
+                if not r:
+                    continue
+                start, end, offset, file = r
+                if DEBUG:
+                    print 'adding '+file
+                for p in xrange( int(start/self.piece_length),
+                                 int((end-1)/self.piece_length)+1 ):
+                    valid_pieces[p] = 1
+
+            if DEBUG:
+                print valid_pieces.keys()
+            
+            def test(old, size, mtime):
+                oldsize, oldmtime = old
+                if size != oldsize:
+                    return False
+                if mtime > oldmtime+1:
+                    return False
+                if mtime < oldmtime-1:
+                    return False
+                return True
+
+            for i in xrange(len(self.files)):
+                if self.disabled[i]:
+                    for file, start, end in self._get_disabled_ranges(i)[2]:
+                        f1 = basename(file)
+                        if ( not pfiles.has_key(f1)
+                             or not test(pfiles[f1],getsize(file),getmtime(file)) ):
+                            if DEBUG:
+                                print 'removing '+file
+                            for p in xrange( int(start/self.piece_length),
+                                             int((end-1)/self.piece_length)+1 ):
+                                if valid_pieces.has_key(p):
+                                    del valid_pieces[p]
+                    continue
+                file, size = self.files[i]
+                if not size:
+                    continue
+                if ( not files.has_key(i)
+                     or not test(files[i], getsize(file), getmtime(file)) ):
+                    start, end, offset, file = self.file_ranges[i]
+                    if DEBUG:
+                        print 'removing '+file
+                    for p in xrange( int(start/self.piece_length),
+                                     int((end-1)/self.piece_length)+1 ):
+                        if valid_pieces.has_key(p):
+                            del valid_pieces[p]
+        except:
+            if DEBUG:
+                print_exc()
+            return []
+
+        if DEBUG:
+            print valid_pieces.keys()                        
+        return valid_pieces.keys()
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/StorageWrapper.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/StorageWrapper.py
new file mode 100644 (file)
index 0000000..e33f746
--- /dev/null
@@ -0,0 +1,1357 @@
+# Written by Bram Cohen, Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+from traceback import print_exc
+from random import randrange
+from copy import deepcopy
+import pickle
+import traceback
+import time
+
+from BaseLib.Core.Merkle.merkle import MerkleTree
+from BaseLib.Core.Utilities.Crypto import sha
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from BaseLib.Core.BitTornado.clock import clock
+from BaseLib.Core.BitTornado.bencode import bencode
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+from bisect import insort
+
+DEBUG = False
+
+STATS_INTERVAL = 0.2
+RARE_RAWSERVER_TASKID = -481  # This must be a rawserver task ID that is never valid.
+
+
+def dummy_status(fractionDone = None, activity = None):
+    pass
+
+class Olist:
+    def __init__(self, l = []):
+        self.d = {}
+        for i in l:
+            self.d[i] = 1
+    def __len__(self):
+        return len(self.d)
+    def includes(self, i):
+        return self.d.has_key(i)
+    def add(self, i):
+        self.d[i] = 1
+    def extend(self, l):
+        for i in l:
+            self.d[i] = 1
+    def pop(self, n=0):
+        # assert self.d
+        k = self.d.keys()
+        if n == 0:
+            i = min(k)
+        elif n == -1:
+            i = max(k)
+        else:
+            k.sort()
+            i = k[n]
+        del self.d[i]
+        return i
+    def remove(self, i):
+        if self.d.has_key(i):
+            del self.d[i]
+
+class fakeflag:
+    def __init__(self, state=False):
+        self.state = state
+    def wait(self):
+        pass
+    def isSet(self):
+        return self.state
+
+
+class StorageWrapper:
+    def __init__(self, videoinfo, storage, request_size, hashes, 
+            piece_size, root_hash, finished, failed, 
+            statusfunc = dummy_status, flag = fakeflag(), check_hashes = True, 
+            data_flunked = lambda x: None, 
+            piece_from_live_source_func = lambda i,d: None, 
+            backfunc = None, 
+            config = {}, unpauseflag = fakeflag(True)):
+        
+        if DEBUG: 
+            print >>sys.stderr, "StorageWrapper: __init__: wrapped around", storage.files
+        self.videoinfo = videoinfo
+        self.storage = storage
+        self.request_size = long(request_size)
+        self.hashes = hashes
+        self.piece_size = long(piece_size)
+        self.piece_length = long(piece_size)
+        self.finished = finished
+        self.report_failure = failed
+        self.statusfunc = statusfunc
+        self.flag = flag
+        self.check_hashes = check_hashes
+        self.data_flunked = data_flunked
+        self.piece_from_live_source_func = piece_from_live_source_func
+        self.backfunc = backfunc
+        self.config = config
+        self.unpauseflag = unpauseflag
+
+        self.live_streaming = self.videoinfo['live']
+        
+        self.alloc_type = config.get('alloc_type', 'normal')
+        self.double_check = config.get('double_check', 0)
+        self.triple_check = config.get('triple_check', 0)
+        if self.triple_check:
+            self.double_check = True
+        self.bgalloc_enabled = False
+        self.bgalloc_active = False
+        self.total_length = storage.get_total_length()
+        self.amount_left = self.total_length
+        if self.total_length <= self.piece_size * (len(hashes) - 1):
+            raise ValueError, 'bad data in responsefile - total too small'
+        if self.total_length > self.piece_size * len(hashes):
+            raise ValueError, 'bad data in responsefile - total too big'
+        self.numactive = [0] * len(hashes)
+        self.inactive_requests = [1] * len(hashes)
+        self.amount_inactive = self.total_length
+        self.amount_obtained = 0
+        self.amount_desired = self.total_length
+        self.have = Bitfield(len(hashes))
+        self.have_cloaked_data = None
+        self.blocked = [False] * len(hashes)
+        self.blocked_holes = []
+        self.blocked_movein = Olist()
+        self.blocked_moveout = Olist()
+        self.waschecked = [False] * len(hashes)
+        self.places = {} # Arno, maps piece to actual position in files 
+        self.holes = []
+        self.stat_active = {}
+        self.stat_new = {}
+        self.dirty = {}
+        self.stat_numflunked = 0
+        self.stat_numdownloaded = 0
+        self.stat_numfound = 0
+        self.download_history = {}
+        self.failed_pieces = {}
+        self.out_of_place = 0
+        self.write_buf_max = config['write_buffer_size']*1048576L
+        self.write_buf_size = 0L
+        self.write_buf = {}   # structure:  piece: [(start, data), ...]
+        self.write_buf_list = []
+        # Arno, 2010-04-23: STBSPEED: the piece that were correct on disk at start
+        self.pieces_on_disk_at_startup = []
+
+        # Merkle:
+        self.merkle_torrent = (root_hash is not None)
+        self.root_hash = root_hash
+        # STBSPEED: no hashchecking for live, so no need for this expensive op.
+        if self.live_streaming:
+            self.initial_hashes = None
+        else:
+            self.initial_hashes = deepcopy(self.hashes)
+        
+        if self.merkle_torrent:
+            self.hashes_unpickled = False
+            # Must see if we're initial seeder
+            self.check_hashes = True
+            # Fallback for if we're not an initial seeder or don't have a 
+            # Merkle tree on disk.
+            self.merkletree = MerkleTree(self.piece_size,self.total_length,self.root_hash,None)
+        else:
+            # Normal BT
+            self.hashes_unpickled = True
+
+        self.initialize_tasks = [
+            ['checking existing data', 0, self.init_hashcheck, self.hashcheckfunc], 
+            ['moving data', 1, self.init_movedata, self.movedatafunc], 
+            ['allocating disk space', 1, self.init_alloc, self.allocfunc] ]
+        self.initialize_done = None
+
+        # Arno: move starting of periodic _bgalloc to init_alloc
+        self.backfunc(self._bgsync, max(self.config['auto_flush']*60, 60))
+
+
+    def _bgsync(self):
+        if self.config['auto_flush']:
+            self.sync()
+        self.backfunc(self._bgsync, max(self.config['auto_flush']*60, 60))
+
+
+    def old_style_init(self):
+        while self.initialize_tasks:
+            msg, done, init, next = self.initialize_tasks.pop(0)
+            if init():
+                self.statusfunc(activity = msg, fractionDone = done)
+                t = clock() + STATS_INTERVAL
+                x = 0
+                while x is not None:
+                    if t < clock():
+                        t = clock() + STATS_INTERVAL
+                        self.statusfunc(fractionDone = x)
+                    self.unpauseflag.wait()
+                    if self.flag.isSet():
+                        return False
+                    x = next()
+
+        self.statusfunc(fractionDone = 0)
+        return True
+
+
+    def initialize(self, donefunc, statusfunc = None):
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: initialize: enter, backfunc is",self.backfunc
+        
+        self.initialize_done = donefunc
+        if statusfunc is None:
+            statusfunc = self.statusfunc
+        self.initialize_status = statusfunc
+        self.initialize_next = None
+            
+        """
+        Arno: 2007-01-02:
+        This next line used to read:
+            self.backfunc(self._initialize)
+        So without the task ID. I've changed this to accomodate the
+        following situation. In video-on-demand, it may occur that
+        a torrent is stopped and then immediately after it is
+        restarted. In particular, we use this when a user selects
+        a torrent from the mainwin to be played (again). Because the
+        torrent does not necessarily use a VOD-piecepicker we have
+        to stop the current DL process and start a new one. 
+        
+        When stopping and starting a torrent quickly a problem occurs.
+        When a torrent is stopped, its infohash is registered in kill list 
+        of the (real) RawServer class. The next time the rawserver looks 
+        for tasks to execute it will first check the kill list. If it's not
+        empty it will remove all tasks that have the given infohash as taskID.
+        This mechanism ensures that when a torrent is stopped, any outstanding
+        tasks belonging to the torrent are removed from the rawserver task queue.
+        
+        It can occur that we've stopped the torrent and the
+        infohash is on the kill list, but the queue has not yet been cleared of
+        old entries because the thread that runs the rawserver did not get to
+        executing new tasks yet. This causes a problem right here, because
+        we now want to schedule a new task on behalf of the new download process.
+        If it is enqueued now, it will be removed the next time the rawserver 
+        checks its task list and because the infohash is on the kill list be
+        deleted.
+        
+        My fix is to schedule this first task of the new torrent under a 
+        different task ID. Hence, when the rawserver checks its queue it
+        will not delete it, thinking it belonged to the old download
+        process. The really clean solution is to stop using infohash as
+        taskid, and use a unique ID for a download process. This will
+        take a bit of work to ensure it works correctly, so in the mean
+        time we'll use this fix.
+        """
+        
+        # Arno, STBSPEED: potentially we can just call 
+        #       self.initialize_done(success=True)
+        # here for live, but we need to check if all variables are set correctly
+        # (also if different disk allocation policies are used, etc.
+        # 
+        self.backfunc(self._initialize, id = RARE_RAWSERVER_TASKID)
+
+    def _initialize(self):
+        
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: _initialize: enter"
+        if not self.unpauseflag.isSet():
+            self.backfunc(self._initialize, 1)
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: _initialize: next is",self.initialize_next
+
+        if self.initialize_next:
+            x = self.initialize_next()
+            if x is None:
+                self.initialize_next = None
+            else:
+                self.initialize_status(fractionDone = x)
+        else:
+            if not self.initialize_tasks:
+                self.initialize_done(success=True)
+                self.initialize_done = None
+                return
+            msg, done, init, next = self.initialize_tasks.pop(0)
+            if DEBUG:
+                print >>sys.stderr,"StorageWrapper: _initialize performing task",msg
+            if DEBUG:
+                st = time.time()
+            if init():
+                self.initialize_status(activity = msg, fractionDone = done)
+                self.initialize_next = next
+            if DEBUG:
+                et = time.time()
+                diff = et - st
+                print >>sys.stderr,"StorageWrapper: _initialize: task took",diff
+
+        self.backfunc(self._initialize)
+
+
+    def init_hashcheck(self):
+
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: init_hashcheck: #hashes",len(self.hashes),"amountleft",self.amount_left
+
+        
+        if self.live_streaming:
+            # STBSPEED by Milton
+            self.set_nohashcheck()
+            return False
+
+        # Non-live streaming
+        if self.flag.isSet():
+            if DEBUG:
+                print >>sys.stderr,"StorageWrapper: init_hashcheck: FLAG IS SET"
+            return False
+        self.check_list = []
+        if not self.hashes or self.amount_left == 0:
+            self.check_total = 0
+            self.finished()
+            if DEBUG:
+                print >>sys.stderr,"StorageWrapper: init_hashcheck: Download finished"
+            return False
+
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: init_hashcheck: self.places",`self.places`
+
+        self.check_targets = {}
+        got = {}
+        for p, v in self.places.iteritems():
+            assert not got.has_key(v)
+            got[v] = 1
+            
+        # Arno, 2010-04-16: STBSPEED: Avoid costly calculations if new VOD
+        if len(self.places) == 0 and self.storage.get_length_initial_content() == 0L:
+            self.set_nohashcheck()
+            return False
+            
+        # STBSPEED: TODO: optimize
+        for i in xrange(len(self.hashes)):
+            # Arno, 2010-04-16: STBSPEED: Only execute if there is persistent 
+            # state (=self.places) on already hashchecked pieces.
+            if len(self.places) > 0:
+                if self.places.has_key(i):  # restored from pickled
+                    self.check_targets[self.hashes[i]] = []
+                    if self.places[i] == i:
+                        continue
+                    else:
+                        assert not got.has_key(i)
+                        self.out_of_place += 1
+                if got.has_key(i):
+                    continue
+                
+            if self._waspre(i):
+                # Arno: If there is data on disk, check it
+                if self.blocked[i]:
+                    self.places[i] = i
+                else:
+                    self.check_list.append(i)
+                continue
+            if not self.check_hashes:
+                self.failed('file supposed to be complete on start-up, but data is missing')
+                return False
+            self.holes.append(i)
+            if self.blocked[i] or self.check_targets.has_key(self.hashes[i]):
+                self.check_targets[self.hashes[i]] = [] # in case of a hash collision, discard
+            else:
+                self.check_targets[self.hashes[i]] = [i]
+        self.check_total = len(self.check_list)
+        self.check_numchecked = 0.0
+        self.lastlen = self._piecelen(len(self.hashes) - 1)
+        self.numchecked = 0.0
+        if DEBUG:
+            print "StorageWrapper: init_hashcheck: checking",self.check_list
+            print "StorageWrapper: init_hashcheck: return self.check_total > 0 is ",(self.check_total > 0)
+        return self.check_total > 0
+
+
+    def set_nohashcheck(self):
+        if DEBUG:
+            print "StorageWrapper: init_hashcheck: live or empty files, skipping"
+        self.places = {}
+        self.check_targets = {}
+        self.check_list = []
+        self.check_total = len(self.check_list)
+        self.check_numchecked = 0.0
+        self.lastlen = self._piecelen(len(self.hashes) - 1)
+        self.numchecked = 0.0
+        self.check_targets[self.hashes[0]] = [0]
+        self.holes = range(len(self.hashes))
+        
+        
+    # Arno, 2010-04-20: STBSPEED
+    def get_pieces_on_disk_at_startup(self):
+        """ Returns list of pieces currently on disk that were succesfully
+        hashchecked. If the file was complete on disk, this list is empty.
+        See download_bt1.py::BT1Download::startEngine for how this is dealt with.
+        """
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: get_pieces_on_disk_at_startup: self.places len",len(self.places),"on disk",len(self.pieces_on_disk_at_startup)
+        
+        return self.pieces_on_disk_at_startup
+
+
+    def _markgot(self, piece, pos):
+        if DEBUG:
+            print str(piece)+' at '+str(pos)
+        self.places[piece] = pos
+        self.have[piece] = True
+        self.pieces_on_disk_at_startup.append(piece)
+        len = self._piecelen(piece)
+        self.amount_obtained += len
+        self.amount_left -= len
+        self.amount_inactive -= len
+        self.inactive_requests[piece] = None
+        self.waschecked[piece] = self.check_hashes
+        self.stat_numfound += 1
+
+    def hashcheckfunc(self):
+        try:
+            if self.live_streaming:
+                return None
+            if self.flag.isSet():
+                return None
+            if not self.check_list:
+                return None
+
+            i = self.check_list.pop(0)
+            if not self.check_hashes:
+                self._markgot(i, i)
+            else:
+                d1 = self.read_raw(i, 0, self.lastlen)
+                if d1 is None:
+                    return None
+                sh = sha(d1[:])
+                d1.release()
+                sp = sh.digest()
+                d2 = self.read_raw(i, self.lastlen, self._piecelen(i)-self.lastlen)
+                if d2 is None:
+                    return None
+                sh.update(d2[:])
+                d2.release()
+                s = sh.digest()
+
+
+                if DEBUG:
+                    if s != self.hashes[i]:
+                        print >>sys.stderr,"StorageWrapper: hashcheckfunc: piece corrupt",i
+
+                # Merkle: If we didn't read the hashes from persistent storage then
+                # we can't check anything. Exception is the case where we are the
+                # initial seeder. In that case we first calculate all hashes, 
+                # and then compute the hash tree. If the root hash equals the
+                # root hash in the .torrent we're a seeder. Otherwise, we are
+                # client with messed up data and no (local) way of checking it.
+                #
+                if not self.hashes_unpickled:
+                    if DEBUG:
+                        print "StorageWrapper: Merkle torrent, saving calculated hash",i
+                    self.initial_hashes[i] = s
+                    self._markgot(i, i)
+                elif s == self.hashes[i]:
+                    self._markgot(i, i)
+                elif (self.check_targets.get(s)
+                       and self._piecelen(i) == self._piecelen(self.check_targets[s][-1])):
+                    self._markgot(self.check_targets[s].pop(), i)
+                    self.out_of_place += 1
+                elif (not self.have[-1] and sp == self.hashes[-1]
+                       and (i == len(self.hashes) - 1
+                            or not self._waspre(len(self.hashes) - 1))):
+                    self._markgot(len(self.hashes) - 1, i)
+                    self.out_of_place += 1
+                else:
+                    self.places[i] = i
+            self.numchecked += 1
+            if self.amount_left == 0:
+                if not self.hashes_unpickled:
+                    # Merkle: The moment of truth. Are we an initial seeder?
+                    self.merkletree = MerkleTree(self.piece_size,self.total_length,None,self.initial_hashes)
+                    if self.merkletree.compare_root_hashes(self.root_hash):
+                        if DEBUG:
+                            print "StorageWrapper: Merkle torrent, initial seeder!"
+                        self.hashes = self.initial_hashes
+                    else:
+                        # Bad luck
+                        if DEBUG:
+                            print "StorageWrapper: Merkle torrent, NOT a seeder!"
+                        self.failed('download corrupted, hash tree does not compute; please delete and restart')
+                        return 1
+                self.finished()
+                
+            return (self.numchecked / self.check_total)
+
+        except Exception, e:
+            print_exc()
+            self.failed('download corrupted: '+str(e)+'; please delete and restart')
+    
+
+    def init_movedata(self):
+        if self.flag.isSet():
+            return False
+        if self.alloc_type != 'sparse':
+            return False
+        self.storage.top_off()  # sets file lengths to their final size
+        self.movelist = []
+        if self.out_of_place == 0:
+            for i in self.holes:
+                self.places[i] = i
+            self.holes = []
+            return False
+        self.tomove = float(self.out_of_place)
+        for i in xrange(len(self.hashes)):
+            if not self.places.has_key(i):
+                self.places[i] = i
+            elif self.places[i] != i:
+                self.movelist.append(i)
+        self.holes = []
+        return True
+
+    def movedatafunc(self):
+        if self.flag.isSet():
+            return None
+        if not self.movelist:
+            return None
+        i = self.movelist.pop(0)
+        old = self.read_raw(self.places[i], 0, self._piecelen(i))
+        if old is None:
+            return None
+        if not self.write_raw(i, 0, old):
+            return None
+        if self.double_check and self.have[i]:
+            if self.triple_check:
+                old.release()
+                old = self.read_raw(i, 0, self._piecelen(i), 
+                                            flush_first = True)
+                if old is None:
+                    return None
+            if sha(old[:]).digest() != self.hashes[i]:
+                self.failed('download corrupted, piece on disk failed triple check; please delete and restart')
+                return None
+        old.release()
+
+        self.places[i] = i
+        self.tomove -= 1
+        return (self.tomove / self.out_of_place)
+
+
+    def init_alloc(self):
+        if self.flag.isSet():
+            return False
+        if not self.holes:
+            return False
+        self.numholes = float(len(self.holes))
+        self.alloc_buf = chr(0xFF) * self.piece_size
+        ret = False
+        if self.alloc_type == 'pre-allocate':
+            self.bgalloc_enabled = True
+            ret = True
+        if self.alloc_type == 'background':
+            self.bgalloc_enabled = True
+        # Arno: only enable this here, eats CPU otherwise
+        if self.bgalloc_enabled:
+            self.backfunc(self._bgalloc, 0.1)
+        if ret:
+            return ret
+        if self.blocked_moveout:
+            return True
+        return False
+
+
+    def _allocfunc(self):
+        while self.holes:
+            n = self.holes.pop(0)
+            if self.blocked[n]: # assume not self.blocked[index]
+                if not self.blocked_movein:
+                    self.blocked_holes.append(n)
+                    continue
+                if not self.places.has_key(n):
+                    b = self.blocked_movein.pop(0)
+                    oldpos = self._move_piece(b, n)
+                    self.places[oldpos] = oldpos
+                    return None
+            if self.places.has_key(n):
+                oldpos = self._move_piece(n, n)
+                self.places[oldpos] = oldpos
+                return None
+            return n
+        return None
+
+    def allocfunc(self):
+        if self.flag.isSet():
+            return None
+        
+        if self.blocked_moveout:
+            self.bgalloc_active = True
+            n = self._allocfunc()
+            if n is not None:
+                if self.blocked_moveout.includes(n):
+                    self.blocked_moveout.remove(n)
+                    b = n
+                else:
+                    b = self.blocked_moveout.pop(0)
+                oldpos = self._move_piece(b, n)
+                self.places[oldpos] = oldpos
+            return len(self.holes) / self.numholes
+
+        if self.holes and self.bgalloc_enabled:
+            self.bgalloc_active = True
+            n = self._allocfunc()
+            if n is not None:
+                self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)])
+                self.places[n] = n
+            return len(self.holes) / self.numholes
+
+        self.bgalloc_active = False
+        return None
+
+    def bgalloc(self):
+        if self.bgalloc_enabled:
+            if not self.holes and not self.blocked_moveout and self.backfunc:
+                self.backfunc(self.storage.flush)
+                # force a flush whenever the "finish allocation" button is hit
+        self.bgalloc_enabled = True
+        return False
+
+    def _bgalloc(self):
+        self.allocfunc()
+        if self.config.get('alloc_rate', 0) < 0.1:
+            self.config['alloc_rate'] = 0.1
+        self.backfunc(self._bgalloc, 
+              float(self.piece_size)/(self.config['alloc_rate']*1048576))
+
+    def _waspre(self, piece):
+        return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece))
+
+    def _piecelen(self, piece):
+        if piece < len(self.hashes) - 1:
+            return self.piece_size
+        else:
+            return self.total_length - (piece * self.piece_size)
+
+    def get_amount_left(self):
+        return self.amount_left
+
+    def do_I_have_anything(self):
+        return self.amount_left < self.total_length
+
+    def _make_inactive(self, index):
+        """ Mark the blocks that form a piece and save that information to inactive_requests. Each block is marked with a (begin, length) pair.
+        
+        @param index: the index of the piece for which blocks are being calculated
+        """
+        length = self._piecelen(index)
+        l = []
+        x = 0
+        while x + self.request_size < length:
+            l.append((x, self.request_size))
+            x += self.request_size
+        l.append((x, length - x))
+        self.inactive_requests[index] = l # Note: letter L not number 1
+
+    def is_endgame(self):
+        return not self.amount_inactive
+
+    def reset_endgame(self, requestlist):
+        for index, begin, length in requestlist:
+            self.request_lost(index, begin, length)
+
+    def get_have_list(self):
+        return self.have.tostring()
+
+    def get_have_copy(self):
+        return self.have.copy()
+
+    def get_have_list_cloaked(self):
+        if self.have_cloaked_data is None:
+            newhave = Bitfield(copyfrom = self.have)
+            unhaves = []
+            n = min(randrange(2, 5), len(self.hashes))    # between 2-4 unless torrent is small
+            while len(unhaves) < n:
+                unhave = randrange(min(32, len(self.hashes)))    # all in first 4 bytes
+                if not unhave in unhaves:
+                    unhaves.append(unhave)
+                    newhave[unhave] = False
+            self.have_cloaked_data = (newhave.tostring(), unhaves)
+        return self.have_cloaked_data
+
+    def do_I_have(self, index):
+        return self.have[index]
+
+    def do_I_have_requests(self, index):
+        return not not self.inactive_requests[index]
+
+    def is_unstarted(self, index):
+        return (not self.have[index] and not self.numactive[index]
+                 and not self.dirty.has_key(index))
+
+    def get_hash(self, index):
+        return self.hashes[index]
+
+    def get_stats(self):
+        return self.amount_obtained, self.amount_desired, self.have
+
+    def new_request(self, index):
+        """ Return a block mark to be downloaded from a piece
+        
+        @param index: the index of the piece for which a block will be downloaded
+        @return: a (begin, length) pair
+        """
+        
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: new_request",index,"#"
+        
+        # returns (begin, length)
+        if self.inactive_requests[index] == 1: # number 1, not letter L
+            self._make_inactive(index)
+        self.numactive[index] += 1
+        self.stat_active[index] = 1
+        if not self.dirty.has_key(index):
+            self.stat_new[index] = 1
+        rs = self.inactive_requests[index]
+#        r = min(rs)
+#        rs.remove(r)
+        r = rs.pop(0)
+        self.amount_inactive -= r[1]
+        return r
+
+
+    def request_too_slow(self,index):
+        """ Arno's addition to get pieces we requested from slow peers to be
+        back in the PiecePicker's list of candidates """
+        if self.amount_inactive == 0:
+            # all has been requested, endgame about to start, don't mess around
+            return
+        
+        self.inactive_requests[index] = 1  # number 1, not letter L
+        self.amount_inactive += self._piecelen(index)
+
+
+    def write_raw(self, index, begin, data):
+        try:
+            self.storage.write(self.piece_size * index + begin, data)
+            return True
+        except IOError, e:
+            traceback.print_exc()
+            self.failed('IO Error: ' + str(e))
+            return False
+
+
+    def _write_to_buffer(self, piece, start, data):
+        if not self.write_buf_max:
+            return self.write_raw(self.places[piece], start, data)
+        self.write_buf_size += len(data)
+        while self.write_buf_size > self.write_buf_max:
+            old = self.write_buf_list.pop(0)
+            if not self._flush_buffer(old, True):
+                return False
+        if self.write_buf.has_key(piece):
+            self.write_buf_list.remove(piece)
+        else:
+            self.write_buf[piece] = []
+        self.write_buf_list.append(piece)
+        self.write_buf[piece].append((start, data))
+        return True
+
+    def _flush_buffer(self, piece, popped = False):
+        if not self.write_buf.has_key(piece):
+            return True
+        if not popped:
+            self.write_buf_list.remove(piece)
+        l = self.write_buf[piece]
+        del self.write_buf[piece]
+        l.sort()
+        for start, data in l:
+            self.write_buf_size -= len(data)
+            if not self.write_raw(self.places[piece], start, data):
+                return False
+        return True
+
+    def sync(self):
+        spots = {}
+        for p in self.write_buf_list:
+            spots[self.places[p]] = p
+        l = spots.keys()
+        l.sort()
+        for i in l:
+            try:
+                self._flush_buffer(spots[i])
+            except:
+                pass
+        try:
+            self.storage.sync()
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+        except OSError, e:
+            self.failed('OS Error: ' + str(e))
+
+
+    def _move_piece(self, index, newpos):
+        oldpos = self.places[index]
+        if DEBUG:
+            print 'moving '+str(index)+' from '+str(oldpos)+' to '+str(newpos)
+        assert oldpos != index
+        assert oldpos != newpos
+        assert index == newpos or not self.places.has_key(newpos)
+        old = self.read_raw(oldpos, 0, self._piecelen(index))
+        if old is None:
+            return -1
+        if not self.write_raw(newpos, 0, old):
+            return -1
+        self.places[index] = newpos
+        if self.have[index] and (
+                self.triple_check or (self.double_check and index == newpos)):
+            if self.triple_check:
+                old.release()
+                old = self.read_raw(newpos, 0, self._piecelen(index), 
+                                    flush_first = True)
+                if old is None:
+                    return -1
+            if sha(old[:]).digest() != self.hashes[index]:
+                self.failed('download corrupted, piece on disk failed triple check; please delete and restart')
+                return -1
+        old.release()
+
+        if self.blocked[index]:
+            self.blocked_moveout.remove(index)
+            if self.blocked[newpos]:
+                self.blocked_movein.remove(index)
+            else:
+                self.blocked_movein.add(index)
+        else:
+            self.blocked_movein.remove(index)
+            if self.blocked[newpos]:
+                self.blocked_moveout.add(index)
+            else:
+                self.blocked_moveout.remove(index)
+                    
+        return oldpos
+            
+    def _clear_space(self, index):
+        h = self.holes.pop(0)
+        n = h
+        if self.blocked[n]: # assume not self.blocked[index]
+            if not self.blocked_movein:
+                self.blocked_holes.append(n)
+                return True    # repeat
+            if not self.places.has_key(n):
+                b = self.blocked_movein.pop(0)
+                oldpos = self._move_piece(b, n)
+                if oldpos < 0:
+                    return False
+                n = oldpos
+        if self.places.has_key(n):
+            oldpos = self._move_piece(n, n)
+            if oldpos < 0:
+                return False
+            n = oldpos
+        if index == n or index in self.holes:
+            if n == h:
+                self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)])
+            self.places[index] = n
+            if self.blocked[n]:
+                # because n may be a spot cleared 10 lines above, it's possible
+                # for it to be blocked.  While that spot could be left cleared
+                # and a new spot allocated, this condition might occur several
+                # times in a row, resulting in a significant amount of disk I/O,
+                # delaying the operation of the engine.  Rather than do this,
+                # queue the piece to be moved out again, which will be performed
+                # by the background allocator, with which data movement is
+                # automatically limited.
+                self.blocked_moveout.add(index)
+            return False
+        for p, v in self.places.iteritems():
+            if v == index:
+                break
+        else:
+            self.failed('download corrupted; please delete and restart')
+            return False
+        self._move_piece(p, n)
+        self.places[index] = index
+        return False
+
+    ## Arno: don't think we need length here, FIXME 
+    def piece_came_in(self, index, begin, hashlist, piece, baddataguard, source = None):
+        assert not self.have[index]
+        # Merkle: Check that the hashes are valid using the known root_hash
+        # If so, put them in the hash tree and the normal list of hashes to
+        # allow (1) us to send this piece to others using the right hashes
+        # and (2) us to check the validity of the piece when it has been
+        # received completely.
+        #
+        if self.merkle_torrent and len(hashlist) > 0:
+            if self.merkletree.check_hashes(hashlist):
+                self.merkletree.update_hash_admin(hashlist,self.hashes)
+            else:
+                raise ValueError("bad list of hashes")
+            
+        if not self.places.has_key(index):
+            while self._clear_space(index):
+                pass
+            if DEBUG:
+                print 'new place for '+str(index)+' at '+str(self.places[index])
+        if self.flag.isSet():
+            return False
+
+        if self.failed_pieces.has_key(index):
+            old = self.read_raw(self.places[index], begin, len(piece))
+            if old is None:
+                return True
+            if old[:].tostring() != piece:
+                try:
+                    self.failed_pieces[index][self.download_history[index][begin]] = 1
+                except:
+                    self.failed_pieces[index][None] = 1
+            old.release()
+        self.download_history.setdefault(index, {})[begin] = source
+        
+        if not self._write_to_buffer(index, begin, piece):
+            return True
+        
+        self.amount_obtained += len(piece)
+        self.dirty.setdefault(index, []).append((begin, len(piece)))
+        self.numactive[index] -= 1
+        assert self.numactive[index] >= 0
+        if not self.numactive[index]:
+            del self.stat_active[index]
+        if self.stat_new.has_key(index):
+            del self.stat_new[index]
+
+        if self.inactive_requests[index] or self.numactive[index]:
+            return True
+        
+        del self.dirty[index]
+        if not self._flush_buffer(index):
+            return True
+        
+        length = self._piecelen(index)
+        # Check hash
+        data = self.read_raw(self.places[index], 0, length, 
+                                     flush_first = self.triple_check)
+        if data is None:
+            return True
+        
+        pieceok = False
+        if self.live_streaming:
+            # LIVESOURCEAUTH
+            if self.piece_from_live_source_func(index,data[:]):
+                pieceok = True
+        else:
+            hash = sha(data[:]).digest()
+            data.release()
+            if hash == self.hashes[index]:
+                pieceok = True
+                
+        if not pieceok: 
+            self.amount_obtained -= length
+            self.data_flunked(length, index)
+            self.inactive_requests[index] = 1  # number 1, not letter L
+            self.amount_inactive += length
+            self.stat_numflunked += 1
+
+            self.failed_pieces[index] = {}
+            allsenders = {}
+            for d in self.download_history[index].values():
+                allsenders[d] = 1
+            if len(allsenders) == 1:
+                culprit = allsenders.keys()[0]
+                if culprit is not None:
+                    culprit.failed(index, bump = True)
+                del self.failed_pieces[index] # found the culprit already
+                
+            if self.live_streaming:
+                # TODO: figure out how to use the Download.BadDataGuard
+                # cf. the culprit business above.
+                print >>sys.stderr,"////////////////////////////////////////////////////////////// kicking peer"
+                raise ValueError("Arno quick fix: Unauth data unacceptable")
+                
+            return False
+
+        self.have[index] = True
+        self.inactive_requests[index] = None
+        self.waschecked[index] = True
+        
+        self.amount_left -= length
+        self.stat_numdownloaded += 1
+
+        for d in self.download_history[index].values():
+            if d is not None:
+                d.good(index)
+        del self.download_history[index]
+        if self.failed_pieces.has_key(index):
+            for d in self.failed_pieces[index].keys():
+                if d is not None:
+                    d.failed(index)
+            del self.failed_pieces[index]
+
+        if self.amount_left == 0:
+            self.finished()
+        return True
+
+
+    def request_lost(self, index, begin, length):
+        
+        if DEBUG:
+            print >>sys.stderr,"StorageWrapper: request_lost",index,"#"
+        
+        assert not (begin, length) in self.inactive_requests[index]
+        insort(self.inactive_requests[index], (begin, length))
+        self.amount_inactive += length
+        self.numactive[index] -= 1
+        if not self.numactive[index]:
+            del self.stat_active[index]
+            if self.stat_new.has_key(index):
+                del self.stat_new[index]
+
+
+    def get_piece(self, index, begin, length):
+        # Merkle: Get (sub)piece from disk and its associated hashes
+        # do_get_piece() returns PieceBuffer
+        pb = self.do_get_piece(index,begin,length)
+        if self.merkle_torrent and pb is not None and begin == 0:
+            hashlist = self.merkletree.get_hashes_for_piece(index)
+        else:
+            hashlist = []
+        return [pb,hashlist]
+
+    def do_get_piece(self, index, begin, length):
+        if not self.have[index]:
+            return None
+        data = None
+        if not self.waschecked[index]:
+            data = self.read_raw(self.places[index], 0, self._piecelen(index))
+            if data is None:
+                return None
+            if not self.live_streaming and sha(data[:]).digest() != self.hashes[index]:
+                self.failed('file supposed to be complete on start-up, but piece failed hash check')
+                return None
+            self.waschecked[index] = True
+            if length == -1 and begin == 0:
+                return data     # optimization
+        if length == -1:
+            if begin > self._piecelen(index):
+                return None
+            length = self._piecelen(index)-begin
+            if begin == 0:
+                return self.read_raw(self.places[index], 0, length)
+        elif begin + length > self._piecelen(index):
+            return None
+        if data is not None:
+            s = data[begin:begin+length]
+            data.release()
+            return s
+        data = self.read_raw(self.places[index], begin, length)
+        if data is None:
+            return None
+        s = data.getarray()
+        data.release()
+        return s
+
+    def read_raw(self, piece, begin, length, flush_first = False):
+        try:
+            return self.storage.read(self.piece_size * piece + begin, 
+                                                     length, flush_first)
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+            return None
+
+
+    def set_file_readonly(self, n):
+        try:
+            self.storage.set_readonly(n)
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+        except OSError, e:
+            self.failed('OS Error: ' + str(e))
+
+
+    def has_data(self, index):
+        return index not in self.holes and index not in self.blocked_holes
+
+    def doublecheck_data(self, pieces_to_check):
+        if not self.double_check:
+            return
+        sources = []
+        for p, v in self.places.iteritems():
+            if pieces_to_check.has_key(v):
+                sources.append(p)
+        assert len(sources) == len(pieces_to_check)
+        sources.sort()
+        for index in sources:
+            if self.have[index]:
+                piece = self.read_raw(self.places[index], 0, self._piecelen(index), 
+                                       flush_first = True)
+                if piece is None:
+                    return False
+                if sha(piece[:]).digest() != self.hashes[index]:
+                    self.failed('download corrupted, piece on disk failed double check; please delete and restart')
+                    return False
+                piece.release()
+        return True
+
+
+    def reblock(self, new_blocked):
+        # assume downloads have already been canceled and chunks made inactive
+        for i in xrange(len(new_blocked)):
+            if new_blocked[i] and not self.blocked[i]:
+                length = self._piecelen(i)
+                self.amount_desired -= length
+                if self.have[i]:
+                    self.amount_obtained -= length
+                    continue
+                if self.inactive_requests[i] == 1: # number 1, not letter L
+                    self.amount_inactive -= length
+                    continue
+                inactive = 0
+                for nb, nl in self.inactive_requests[i]:
+                    inactive += nl
+                self.amount_inactive -= inactive
+                self.amount_obtained -= length - inactive
+                
+            if self.blocked[i] and not new_blocked[i]:
+                length = self._piecelen(i)
+                self.amount_desired += length
+                if self.have[i]:
+                    self.amount_obtained += length
+                    continue
+                if self.inactive_requests[i] == 1:
+                    self.amount_inactive += length
+                    continue
+                inactive = 0
+                for nb, nl in self.inactive_requests[i]:
+                    inactive += nl
+                self.amount_inactive += inactive
+                self.amount_obtained += length - inactive
+
+        self.blocked = new_blocked
+
+        self.blocked_movein = Olist()
+        self.blocked_moveout = Olist()
+        for p, v in self.places.iteritems():
+            if p != v:
+                if self.blocked[p] and not self.blocked[v]:
+                    self.blocked_movein.add(p)
+                elif self.blocked[v] and not self.blocked[p]:
+                    self.blocked_moveout.add(p)
+
+        self.holes.extend(self.blocked_holes)    # reset holes list
+        self.holes.sort()
+        self.blocked_holes = []
+
+
+    """
+    Pickled data format:
+
+    d['pieces'] = either a string containing a bitfield of complete pieces,
+                    or the numeric value "1" signifying a seed.  If it is
+                    a seed, d['places'] and d['partials'] should be empty
+                    and needn't even exist. d['merkletree'] must exist
+                    if it's a seed and a Merkle torrent.
+    d['partials'] = [ piece, [ offset, length... ]... ]
+                    a list of partial data that had been previously
+                    downloaded, plus the given offsets.  Adjacent partials
+                    are merged so as to save space, and so that if the
+                    request size changes then new requests can be
+                    calculated more efficiently.
+    d['places'] = [ piece, place, {,piece, place ...} ]
+                    the piece index, and the place it's stored.
+                    If d['pieces'] specifies a complete piece or d['partials']
+                    specifies a set of partials for a piece which has no
+                    entry in d['places'], it can be assumed that
+                    place[index] = index.  A place specified with no
+                    corresponding data in d['pieces'] or d['partials']
+                    indicates allocated space with no valid data, and is
+                    reserved so it doesn't need to be hash-checked.
+    d['merkletree'] = pickle.dumps(self.merkletree)
+                    if we're using a Merkle torrent the Merkle tree, otherwise
+                    there is no 'merkletree' in the dictionary.
+    """
+    def pickle(self):
+        if self.have.complete():
+            if self.merkle_torrent:
+                return {'pieces': 1, 'merkletree': pickle.dumps(self.merkletree) }
+            else:
+                return {'pieces': 1 }
+        pieces = Bitfield(len(self.hashes))
+        places = []
+        partials = []
+        for p in xrange(len(self.hashes)):
+            if self.blocked[p] or not self.places.has_key(p):
+                continue
+            h = self.have[p]
+            pieces[p] = h
+            pp = self.dirty.get(p)
+            if not h and not pp:  # no data
+                places.extend([self.places[p], self.places[p]])
+            elif self.places[p] != p:
+                places.extend([p, self.places[p]])
+            if h or not pp:
+                continue
+            pp.sort()
+            r = []
+            while len(pp) > 1:
+                if pp[0][0]+pp[0][1] == pp[1][0]:
+                    pp[0] = list(pp[0])
+                    pp[0][1] += pp[1][1]
+                    del pp[1]
+                else:
+                    r.extend(pp[0])
+                    del pp[0]
+            r.extend(pp[0])
+            partials.extend([p, r])
+        if self.merkle_torrent:
+            return {'pieces': pieces.tostring(), 'places': places, 'partials': partials, 'merkletree': pickle.dumps(self.merkletree) }
+        else:
+            return {'pieces': pieces.tostring(), 'places': places, 'partials': partials }
+
+
+    def unpickle(self, data, valid_places):
+        got = {}
+        places = {}
+        dirty = {}
+        download_history = {}
+        stat_active = {}
+        stat_numfound = self.stat_numfound
+        amount_obtained = self.amount_obtained
+        amount_inactive = self.amount_inactive
+        amount_left = self.amount_left
+        inactive_requests = [x for x in self.inactive_requests]
+        restored_partials = []
+
+        try:
+            if data.has_key('merkletree'):
+                try:
+                    if DEBUG:
+                        print "StorageWrapper: Unpickling Merkle tree!"
+                    self.merkletree = pickle.loads(data['merkletree'])
+                    self.hashes = self.merkletree.get_piece_hashes()
+                    self.hashes_unpickled = True
+                except Exception, e:
+                    print "StorageWrapper: Exception while unpickling Merkle tree",str(e)
+                    print_exc()
+            if data['pieces'] == 1:     # a seed
+                assert not data.get('places', None)
+                assert not data.get('partials', None)
+                # Merkle: restore Merkle tree
+                have = Bitfield(len(self.hashes))
+                for i in xrange(len(self.hashes)):
+                    have[i] = True
+                assert have.complete()
+                _places = []
+                _partials = []
+            else:
+                have = Bitfield(len(self.hashes), data['pieces'])
+                _places = data['places']
+                assert len(_places) % 2 == 0
+                _places = [_places[x:x+2] for x in xrange(0, len(_places), 2)]
+                _partials = data['partials']
+                assert len(_partials) % 2 == 0
+                _partials = [_partials[x:x+2] for x in xrange(0, len(_partials), 2)]
+                
+            for index, place in _places:
+                if place not in valid_places:
+                    continue
+                assert not got.has_key(index)
+                assert not got.has_key(place)
+                places[index] = place
+                got[index] = 1
+                got[place] = 1
+
+            for index in xrange(len(self.hashes)):
+                if DEBUG:
+                    print "StorageWrapper: Unpickle: Checking if we have piece",index
+                if have[index]:
+                    if not places.has_key(index):
+                        if index not in valid_places:
+                            have[index] = False
+                            continue
+                        assert not got.has_key(index)
+                        places[index] = index
+                        got[index] = 1
+                    length = self._piecelen(index)
+                    amount_obtained += length
+                    stat_numfound += 1
+                    amount_inactive -= length
+                    amount_left -= length
+                    inactive_requests[index] = None
+
+            for index, plist in _partials:
+                assert not dirty.has_key(index)
+                assert not have[index]
+                if not places.has_key(index):
+                    if index not in valid_places:
+                        continue
+                    assert not got.has_key(index)
+                    places[index] = index
+                    got[index] = 1
+                assert len(plist) % 2 == 0
+                plist = [plist[x:x+2] for x in xrange(0, len(plist), 2)]
+                dirty[index] = plist
+                stat_active[index] = 1
+                download_history[index] = {}
+                # invert given partials
+                length = self._piecelen(index)
+                l = []
+                if plist[0][0] > 0:
+                    l.append((0, plist[0][0]))
+                for i in xrange(len(plist)-1):
+                    end = plist[i][0]+plist[i][1]
+                    assert not end > plist[i+1][0]
+                    l.append((end, plist[i+1][0]-end))
+                end = plist[-1][0]+plist[-1][1]
+                assert not end > length
+                if end < length:
+                    l.append((end, length-end))
+                # split them to request_size
+                ll = []
+                amount_obtained += length
+                amount_inactive -= length
+                for nb, nl in l:
+                    while nl > 0:
+                        r = min(nl, self.request_size)
+                        ll.append((nb, r))
+                        amount_inactive += r
+                        amount_obtained -= r
+                        nb += self.request_size
+                        nl -= self.request_size
+                inactive_requests[index] = ll
+                restored_partials.append(index)
+
+            assert amount_obtained + amount_inactive == self.amount_desired
+        except:
+#            print_exc()
+            return []   # invalid data, discard everything
+
+        self.have = have
+        self.places = places
+        self.dirty = dirty
+        self.download_history = download_history
+        self.stat_active = stat_active
+        self.stat_numfound = stat_numfound
+        self.amount_obtained = amount_obtained
+        self.amount_inactive = amount_inactive
+        self.amount_left = amount_left
+        self.inactive_requests = inactive_requests
+                
+        return restored_partials
+    
+    def failed(self,s):
+        # Arno: report failure of hash check
+        self.report_failure(s)
+        if self.initialize_done is not None:
+            self.initialize_done(success=False)
+
+    def live_invalidate(self,piece): # Arno: LIVEWRAP
+        # Assumption: not outstanding requests
+        length = self._piecelen(piece)
+        oldhave = self.have[piece]
+        self.have[piece] = False
+        #self.waschecked[piece] = False
+        self.inactive_requests[piece] = 1
+        if oldhave: 
+            self.amount_left += length
+            self.amount_obtained -= length
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/T2T.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/T2T.py
new file mode 100644 (file)
index 0000000..2b2f857
--- /dev/null
@@ -0,0 +1,191 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from Rerequester import Rerequester
+from urllib import quote
+from threading import Event
+from random import randrange
+import __init__
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+
+def excfunc(x):
+    print x
+
+class T2TConnection:
+    def __init__(self, myid, tracker, hash, interval, peers, timeout,
+                     rawserver, disallow, isdisallowed):
+        self.tracker = tracker
+        self.interval = interval
+        self.hash = hash
+        self.operatinginterval = interval
+        self.peers = peers
+        self.rawserver = rawserver
+        self.disallow = disallow
+        self.isdisallowed = isdisallowed
+        self.active = True
+        self.busy = False
+        self.errors = 0
+        self.rejected = 0
+        self.trackererror = False
+        self.peerlists = []
+
+        self.rerequester = Rerequester([[tracker]], interval,
+            rawserver.add_task, lambda: 0, peers, self.addtolist, 
+            rawserver.add_task, lambda: 1, 0, 0, 0, '',
+            myid, hash, timeout, self.errorfunc, excfunc, peers, Event(),
+            lambda: 0, lambda: 0)
+
+        if self.isactive():
+            rawserver.add_task(self.refresh, randrange(int(self.interval/10), self.interval))
+                                        # stagger announces
+
+    def isactive(self):
+        if self.isdisallowed(self.tracker):    # whoops!
+            self.deactivate()
+        return self.active
+            
+    def deactivate(self):
+        self.active = False
+
+    def refresh(self):
+        if not self.isactive():
+            return
+        self.lastsuccessful = True
+        self.newpeerdata = []
+        if DEBUG:
+            print 'contacting %s for info_hash=%s' % (self.tracker, quote(self.hash))
+        self.rerequester.snoop(self.peers, self.callback)
+
+    def callback(self):
+        self.busy = False
+        if self.lastsuccessful:
+            self.errors = 0
+            self.rejected = 0
+            if self.rerequester.announce_interval > (3*self.interval):
+                # I think I'm stripping from a regular tracker; boost the number of peers requested
+                self.peers = int(self.peers * (self.rerequester.announce_interval / self.interval))
+            self.operatinginterval = self.rerequester.announce_interval
+            if DEBUG:
+                print ("%s with info_hash=%s returned %d peers" %
+                        (self.tracker, quote(self.hash), len(self.newpeerdata)))
+            self.peerlists.append(self.newpeerdata)
+            self.peerlists = self.peerlists[-10:]  # keep up to the last 10 announces
+        if self.isactive():
+            self.rawserver.add_task(self.refresh, self.operatinginterval)
+
+    def addtolist(self, peers):
+        for peer in peers:
+            self.newpeerdata.append((peer[1],peer[0][0],peer[0][1]))
+        
+    def errorfunc(self, r):
+        self.lastsuccessful = False
+        if DEBUG:
+            print "%s with info_hash=%s gives error: '%s'" % (self.tracker, quote(self.hash), r)
+        if r == self.rerequester.rejectedmessage + 'disallowed':   # whoops!
+            if DEBUG:
+                print ' -- disallowed - deactivating'
+            self.deactivate()
+            self.disallow(self.tracker)   # signal other torrents on this tracker
+            return
+        if r[:8].lower() == 'rejected': # tracker rejected this particular torrent
+            self.rejected += 1
+            if self.rejected == 3:     # rejected 3 times
+                if DEBUG:
+                    print ' -- rejected 3 times - deactivating'
+                self.deactivate()
+            return
+        self.errors += 1
+        if self.errors >= 3:                         # three or more errors in a row
+            self.operatinginterval += self.interval  # lengthen the interval
+            if DEBUG:
+                print ' -- lengthening interval to '+str(self.operatinginterval)+' seconds'
+
+    def harvest(self):
+        x = []
+        for list in self.peerlists:
+            x += list
+        self.peerlists = []
+        return x
+
+
+class T2TList:
+    def __init__(self, enabled, trackerid, interval, maxpeers, timeout, rawserver):
+        self.enabled = enabled
+        self.trackerid = trackerid
+        self.interval = interval
+        self.maxpeers = maxpeers
+        self.timeout = timeout
+        self.rawserver = rawserver
+        self.list = {}
+        self.torrents = {}
+        self.disallowed = {}
+        self.oldtorrents = []
+
+    def parse(self, allowed_list):
+        if not self.enabled:
+            return
+
+        # step 1:  Create a new list with all tracker/torrent combinations in allowed_dir        
+        newlist = {}
+        for hash, data in allowed_list.items():
+            if data.has_key('announce-list'):
+                for tier in data['announce-list']:
+                    for tracker in tier:
+                        self.disallowed.setdefault(tracker, False)
+                        newlist.setdefault(tracker, {})
+                        newlist[tracker][hash] = None # placeholder
+                            
+        # step 2:  Go through and copy old data to the new list.
+        # if the new list has no place for it, then it's old, so deactivate it
+        for tracker, hashdata in self.list.items():
+            for hash, t2t in hashdata.items():
+                if not newlist.has_key(tracker) or not newlist[tracker].has_key(hash):
+                    t2t.deactivate()                # this connection is no longer current
+                    self.oldtorrents += [t2t]
+                        # keep it referenced in case a thread comes along and tries to access.
+                else:
+                    newlist[tracker][hash] = t2t
+            if not newlist.has_key(tracker):
+                self.disallowed[tracker] = False    # reset when no torrents on it left
+
+        self.list = newlist
+        newtorrents = {}
+
+        # step 3:  If there are any entries that haven't been initialized yet, do so.
+        # At the same time, copy all entries onto the by-torrent list.
+        for tracker, hashdata in newlist.items():
+            for hash, t2t in hashdata.items():
+                if t2t is None:
+                    hashdata[hash] = T2TConnection(self.trackerid, tracker, hash,
+                                        self.interval, self.maxpeers, self.timeout,
+                                        self.rawserver, self._disallow, self._isdisallowed)
+                newtorrents.setdefault(hash,[])
+                newtorrents[hash] += [hashdata[hash]]
+                
+        self.torrents = newtorrents
+
+        # structures:
+        # list = {tracker: {hash: T2TConnection, ...}, ...}
+        # torrents = {hash: [T2TConnection, ...]}
+        # disallowed = {tracker: flag, ...}
+        # oldtorrents = [T2TConnection, ...]
+
+    def _disallow(self,tracker):
+        self.disallowed[tracker] = True
+
+    def _isdisallowed(self,tracker):
+        return self.disallowed[tracker]
+
+    def harvest(self,hash):
+        harvest = []
+        if self.enabled:
+            for t2t in self.torrents[hash]:
+                harvest += t2t.harvest()
+        return harvest
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Uploader.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/Uploader.py
new file mode 100644 (file)
index 0000000..cfadae7
--- /dev/null
@@ -0,0 +1,195 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.Statistics.Status.Status import get_status_holder
+
+import sys
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class Upload:
+    def __init__(self, connection, ratelimiter, totalup, choker, storage, 
+                 picker, config):
+        self.connection = connection
+        self.ratelimiter = ratelimiter
+        self.totalup = totalup
+        self.choker = choker
+        self.storage = storage
+        self.picker = picker
+        self.config = config
+        self.max_slice_length = config['max_slice_length']
+        self.choked = True
+        self.cleared = True
+        self.interested = False
+        self.super_seeding = False
+        self.buffer = []
+        self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge'])
+        self.was_ever_interested = False
+        if storage.get_amount_left() == 0:
+            if choker.super_seed:
+                self.super_seeding = True   # flag, and don't send bitfield
+                self.seed_have_list = []    # set from piecepicker
+                self.skipped_count = 0
+            else:
+                if config['breakup_seed_bitfield']:
+                    bitfield, msgs = storage.get_have_list_cloaked()
+                    connection.send_bitfield(bitfield)
+                    for have in msgs:
+                        connection.send_have(have)
+                else:
+                    connection.send_bitfield(storage.get_have_list())
+        else:
+            if storage.do_I_have_anything():
+                connection.send_bitfield(storage.get_have_list())
+                
+        self.piecedl = None
+        self.piecebuf = None
+        # Merkle
+        self.hashlist = []
+
+    def send_haves(self, connection):
+        """
+        Send all pieces I have a series of HAVEs - this is done
+        by closed swarms after successfully connecting (will send blank
+        bitfields until remote node is authorized)
+        """
+        have_list = self.storage.get_have_list()
+
+        print >>sys.stderr, "Have list:",have_list
+        
+    def send_bitfield(self, connection):
+        """
+        Send the bitfield (again)
+        """
+        if self.storage.get_amount_left() == 0:
+            if not self.super_seeding:
+                if self.config['breakup_seed_bitfield']:
+                   bitfield, msgs = self.storage.get_have_list_cloaked()
+                   connection.send_bitfield(bitfield)
+                   for have in msgs:
+                        connection.send_have(have)
+                else:
+                    connection.send_bitfield(self.storage.get_have_list())
+        else:
+            if self.storage.do_I_have_anything():
+                connection.send_bitfield(self.storage.get_have_list())
+
+
+    def got_not_interested(self):
+        if self.interested:
+            self.interested = False
+            del self.buffer[:]
+            self.piecedl = None
+            if self.piecebuf:
+                self.piecebuf.release()
+            self.piecebuf = None
+            self.choker.not_interested(self.connection)
+
+    def got_interested(self):
+        if not self.interested:
+            self.interested = True
+            self.was_ever_interested = True
+            self.choker.interested(self.connection)
+
+    def get_upload_chunk(self):
+        if self.choked or not self.buffer:
+            return None
+        index, begin, length = self.buffer.pop(0)
+        if self.config['buffer_reads']:
+            if index != self.piecedl:
+                if self.piecebuf:
+                    self.piecebuf.release()
+                self.piecedl = index
+                # Merkle
+                [ self.piecebuf, self.hashlist ] = self.storage.get_piece(index, 0, -1)
+            try:
+                piece = self.piecebuf[begin:begin+length]
+                assert len(piece) == length
+            except:     # fails if storage.get_piece returns None or if out of range
+                self.connection.close()
+                return None
+            if begin == 0:
+                hashlist = self.hashlist
+            else:
+                hashlist = []
+        else:
+            if self.piecebuf:
+                self.piecebuf.release()
+                self.piecedl = None
+            [piece, hashlist] = self.storage.get_piece(index, begin, length)
+            if piece is None:
+                self.connection.close()
+                return None
+        self.measure.update_rate(len(piece))
+        self.totalup.update_rate(len(piece))
+
+        status = get_status_holder("LivingLab")
+        s_upload = status.get_or_create_status_element("uploaded",0)
+        s_upload.inc(len(piece))
+
+        # BarterCast counter
+        self.connection.total_uploaded += length
+        
+        return (index, begin, hashlist, piece)
+
+    def got_request(self, index, begin, length):
+        if ((self.super_seeding and not index in self.seed_have_list)
+                   or (not self.connection.connection.is_coordinator_con() and not self.interested)
+                   or length > self.max_slice_length):
+            self.connection.close()
+            return
+        if not self.cleared:
+            self.buffer.append((index, begin, length))
+        if not self.choked and self.connection.next_upload is None:
+            self.ratelimiter.queue(self.connection)
+
+    def got_cancel(self, index, begin, length):
+        try:
+            self.buffer.remove((index, begin, length))
+        except ValueError:
+            pass
+
+    def choke(self):
+        if not self.choked:
+            self.choked = True
+            self.connection.send_choke()
+        self.piecedl = None
+        if self.piecebuf:
+            self.piecebuf.release()
+            self.piecebuf = None
+
+    def choke_sent(self):
+        del self.buffer[:]
+        self.cleared = True
+
+    def unchoke(self):
+        if self.choked:
+            try:
+                if self.connection.send_unchoke():
+                    self.choked = False
+                    self.cleared = False
+            except:
+                pass
+        
+    def disconnected(self):
+        if self.piecebuf:
+            self.piecebuf.release()
+            self.piecebuf = None
+
+    def is_choked(self):
+        return self.choked
+        
+    def is_interested(self):
+        return self.interested
+
+    def has_queries(self):
+        return not self.choked and self.buffer
+
+    def get_rate(self):
+        return self.measure.get_rate()
+    
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/__init__.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/__init__.py
new file mode 100644 (file)
index 0000000..1902f5a
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/btformats.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/btformats.py
new file mode 100644 (file)
index 0000000..7478791
--- /dev/null
@@ -0,0 +1,130 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+import sys
+from types import UnicodeType, StringType, LongType, IntType, ListType, DictType
+from re import compile
+
+#reg = compile(r'^[^/\\.~][^/\\]*$')
+#reg = compile(r'^[^/\\]*$')
+
+ints = (LongType, IntType)
+
+def check_info(info):
+    if type(info) != DictType:
+        raise ValueError, 'bad metainfo - not a dictionary'
+
+    if info.has_key('pieces'):
+        pieces = info.get('pieces')
+        if type(pieces) != StringType or len(pieces) % 20 != 0:
+            raise ValueError, 'bad metainfo - bad pieces key'
+    elif info.has_key('root hash'):
+        # Merkle
+        root_hash = info.get('root hash')
+        if type(root_hash) != StringType or len(root_hash) != 20:
+            raise ValueError, 'bad metainfo - bad root hash key'
+    piecelength = info.get('piece length')
+    if type(piecelength) not in ints or piecelength <= 0:
+        raise ValueError, 'bad metainfo - illegal piece length'
+    name = info.get('name')
+    if StringType != type(name) != UnicodeType:
+        raise ValueError, 'bad metainfo - bad name'
+    #if not reg.match(name):
+    #    raise ValueError, 'name %s disallowed for security reasons' % name
+    if info.has_key('files') == info.has_key('length'):
+        raise ValueError, 'single/multiple file mix'
+    if info.has_key('length'):
+        length = info.get('length')
+        if type(length) not in ints or length < 0:
+            raise ValueError, 'bad metainfo - bad length'
+    else:
+        files = info.get('files')
+        if type(files) != ListType:
+            raise ValueError
+        for f in files:
+            if type(f) != DictType:
+                raise ValueError, 'bad metainfo - bad file value'
+            length = f.get('length')
+            if type(length) not in ints or length < 0:
+                raise ValueError, 'bad metainfo - bad length'
+            path = f.get('path')
+            if type(path) != ListType or path == []:
+                raise ValueError, 'bad metainfo - bad path'
+            for p in path:
+                if StringType != type(p) != UnicodeType:
+                    raise ValueError, 'bad metainfo - bad path dir'
+                #if not reg.match(p):
+                #    raise ValueError, 'path %s disallowed for security reasons' % p
+        for i in xrange(len(files)):
+            for j in xrange(i):
+                if files[i]['path'] == files[j]['path']:
+                    raise ValueError, 'bad metainfo - duplicate path'
+
+def check_message(message):
+    if type(message) != DictType:
+        raise ValueError
+    check_info(message.get('info'))
+    if StringType != type(message.get('announce')) != UnicodeType:
+        raise ValueError
+
+def check_peers(message):
+    if type(message) != DictType:
+        raise ValueError
+    if message.has_key('failure reason'):
+        if type(message['failure reason']) != StringType:
+            raise ValueError
+        return
+    peers = message.get('peers')
+    if peers is not None:
+        if type(peers) == ListType:
+            for p in peers:
+                if type(p) != DictType:
+                    raise ValueError
+                if type(p.get('ip')) != StringType:
+                    raise ValueError
+                port = p.get('port')
+                if type(port) not in ints or p <= 0:
+                    raise ValueError
+                if p.has_key('peer id'):
+                    id = p['peer id']
+                    if type(id) != StringType or len(id) != 20:
+                        raise ValueError
+        elif type(peers) != StringType or len(peers) % 6 != 0:
+            raise ValueError
+        
+    # IPv6 Tracker extension. http://www.bittorrent.org/beps/bep_0007.html
+    peers6 = message.get('peers6')
+    if peers6 is not None:
+        if type(peers6) == ListType:
+            for p in peers6:
+                if type(p) != DictType:
+                    raise ValueError
+                if type(p.get('ip')) != StringType:
+                    raise ValueError
+                port = p.get('port')
+                if type(port) not in ints or p <= 0:
+                    raise ValueError
+                if p.has_key('peer id'):
+                    id = p['peer id']
+                    if type(id) != StringType or len(id) != 20:
+                        raise ValueError
+        elif type(peers6) != StringType or len(peers6) % 18 != 0:
+            raise ValueError 
+        
+    interval = message.get('interval', 1)
+    if type(interval) not in ints or interval <= 0:
+        raise ValueError
+    minint = message.get('min interval', 1)
+    if type(minint) not in ints or minint <= 0:
+        raise ValueError
+    if type(message.get('tracker id', '')) != StringType:
+        raise ValueError
+    npeers = message.get('num peers', 0)
+    if type(npeers) not in ints or npeers < 0:
+        raise ValueError
+    dpeers = message.get('done peers', 0)
+    if type(dpeers) not in ints or dpeers < 0:
+        raise ValueError
+    last = message.get('last', 0)
+    if type(last) not in ints or last < 0:
+        raise ValueError
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/convert.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/convert.py
new file mode 100644 (file)
index 0000000..0f9a832
--- /dev/null
@@ -0,0 +1,12 @@
+# Written by Bram Cohen and Arno Bakker
+# see LICENSE.txt for license information
+
+from binascii import b2a_hex
+
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+def tobinary(i):
+    return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 
+        chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/fakeopen.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/fakeopen.py
new file mode 100644 (file)
index 0000000..659566a
--- /dev/null
@@ -0,0 +1,87 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+class FakeHandle:
+    def __init__(self, name, fakeopen):
+        self.name = name
+        self.fakeopen = fakeopen
+        self.pos = 0
+    
+    def flush(self):
+        pass
+    
+    def close(self):
+        pass
+    
+    def seek(self, pos):
+        self.pos = pos
+    
+    def read(self, amount = None):
+        old = self.pos
+        f = self.fakeopen.files[self.name]
+        if self.pos >= len(f):
+            return ''
+        if amount is None:
+            self.pos = len(f)
+            return ''.join(f[old:])
+        else:
+            self.pos = min(len(f), old + amount)
+            return ''.join(f[old:self.pos])
+    
+    def write(self, s):
+        f = self.fakeopen.files[self.name]
+        while len(f) < self.pos:
+            f.append(chr(0))
+        self.fakeopen.files[self.name][self.pos : self.pos + len(s)] = list(s)
+        self.pos += len(s)
+
+class FakeOpen:
+    def __init__(self, initial = {}):
+        self.files = {}
+        for key, value in initial.items():
+            self.files[key] = list(value)
+    
+    def open(self, filename, mode):
+        """currently treats everything as rw - doesn't support append"""
+        self.files.setdefault(filename, [])
+        return FakeHandle(filename, self)
+
+    def exists(self, file):
+        return self.files.has_key(file)
+
+    def getsize(self, file):
+        return len(self.files[file])
+
+def test_normal():
+    f = FakeOpen({'f1': 'abcde'})
+    assert f.exists('f1')
+    assert not f.exists('f2')
+    assert f.getsize('f1') == 5
+    h = f.open('f1', 'rw')
+    assert h.read(3) == 'abc'
+    assert h.read(1) == 'd'
+    assert h.read() == 'e'
+    assert h.read(2) == ''
+    h.write('fpq')
+    h.seek(4)
+    assert h.read(2) == 'ef'
+    h.write('ghij')
+    h.seek(0)
+    assert h.read() == 'abcdefghij'
+    h.seek(2)
+    h.write('p')
+    h.write('q')
+    assert h.read(1) == 'e'
+    h.seek(1)
+    assert h.read(5) == 'bpqef'
+
+    h2 = f.open('f2', 'rw')
+    assert h2.read() == ''
+    h2.write('mnop')
+    h2.seek(1)
+    assert h2.read() == 'nop'
+    
+    assert f.exists('f1')
+    assert f.exists('f2')
+    assert f.getsize('f1') == 10
+    assert f.getsize('f2') == 4
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/track.py b/instrumentation/next-share/BaseLib/Core/BitTornado/BT1/track.py
new file mode 100644 (file)
index 0000000..2ffd823
--- /dev/null
@@ -0,0 +1,1092 @@
+# Written by Bram Cohen, Arno Bakker
+# see LICENSE.txt for license information
+import sys, os
+import signal
+import re
+import pickle
+from threading import Event, Thread
+from urllib import quote, unquote
+from urlparse import urlparse
+from os.path import exists
+from cStringIO import StringIO
+from traceback import print_exc
+from time import time, gmtime, strftime, localtime
+from random import shuffle, seed
+from types import StringType, IntType, LongType, DictType
+from binascii import b2a_hex
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.BitTornado.parseargs import parseargs, formatDefinitions
+from BaseLib.Core.BitTornado.RawServer import RawServer
+from BaseLib.Core.BitTornado.HTTPHandler import HTTPHandler, months
+from BaseLib.Core.BitTornado.parsedir import parsedir
+from NatCheck import NatCheck
+from T2T import T2TList
+from Filter import Filter
+from BaseLib.Core.BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
+from BaseLib.Core.BitTornado.iprangeparse import IP_List as IP_Range_List
+from BaseLib.Core.BitTornado.torrentlistparse import parsetorrentlist
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode, Bencached
+from BaseLib.Core.BitTornado.zurllib import urlopen
+from BaseLib.Core.Utilities.Crypto import sha
+from BaseLib.Core.BitTornado.clock import clock
+from BaseLib.Core.BitTornado.__init__ import version_short, createPeerID
+from BaseLib.Core.simpledefs import TRIBLER_TORRENT_EXT
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG=False
+
+from BaseLib.Core.defaults import trackerdefaults
+
+defaults = []
+for k,v in trackerdefaults.iteritems():
+    defaults.append((k,v,"See triblerAPI"))
+
+
+def statefiletemplate(x):
+    if type(x) != DictType:
+        raise ValueError
+    for cname, cinfo in x.items():
+        if cname == 'peers':
+            for y in cinfo.values():      # The 'peers' key is a dictionary of SHA hashes (torrent ids)
+                if type(y) != DictType:   # ... for the active torrents, and each is a dictionary
+                    raise ValueError
+                for id, info in y.items(): # ... of client ids interested in that torrent
+                    if (len(id) != 20):
+                        raise ValueError
+                    if type(info) != DictType:  # ... each of which is also a dictionary
+                        raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
+                    if type(info.get('ip', '')) != StringType:
+                        raise ValueError
+                    port = info.get('port')
+                    if type(port) not in (IntType,LongType) or port < 0:
+                        raise ValueError
+                    left = info.get('left')
+                    if type(left) not in (IntType,LongType) or left < 0:
+                        raise ValueError
+        elif cname == 'completed':
+            if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
+                raise ValueError          # ... for keeping track of the total completions per torrent
+            for y in cinfo.values():      # ... each torrent has an integer value
+                if type(y) not in (IntType,LongType):
+                    raise ValueError      # ... for the number of reported completions for that torrent
+        elif cname == 'allowed':
+            if (type(cinfo) != DictType): # a list of info_hashes and included data
+                raise ValueError
+            if x.has_key('allowed_dir_files'):
+                adlist = [z[1] for z in x['allowed_dir_files'].values()]
+                for y in cinfo.keys():        # and each should have a corresponding key here
+                    if not y in adlist:
+                        raise ValueError
+        elif cname == 'allowed_dir_files':
+            if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
+                raise ValueError
+            dirkeys = {}
+            for y in cinfo.values():      # each entry should have a corresponding info_hash
+                if not y[1]:
+                    continue
+                if not x['allowed'].has_key(y[1]):
+                    raise ValueError
+                if dirkeys.has_key(y[1]): # and each should have a unique info_hash
+                    raise ValueError
+                dirkeys[y[1]] = 1
+            
+
+alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
+
+local_IPs = IP_List()
+local_IPs.set_intranet_addresses()
+
+
+def isotime(secs = None):
+    if secs == None:
+        secs = time()
+    return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
+
+http_via_filter = re.compile(' for ([0-9.]+)\Z')
+
+def _get_forwarded_ip(headers):
+    if headers.has_key('http_x_forwarded_for'):
+        header = headers['http_x_forwarded_for']
+        try:
+            x,y = header.split(',')
+        except:
+            return header
+        if not local_IPs.includes(x):
+            return x
+        return y
+    if headers.has_key('http_client_ip'):
+        return headers['http_client_ip']
+    if headers.has_key('http_via'):
+        x = http_via_filter.search(headers['http_via'])
+        try:
+            return x.group(1)
+        except:
+            pass
+    if headers.has_key('http_from'):
+        return headers['http_from']
+    return None
+
+def get_forwarded_ip(headers):
+    x = _get_forwarded_ip(headers)
+    if not is_valid_ip(x) or local_IPs.includes(x):
+        return None
+    return x
+
+def compact_peer_info(ip, port):
+    try:
+        s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+              + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
+        if len(s) != 6:
+            raise ValueError
+    except:
+        s = ''  # not a valid IP, must be a domain name
+    return s
+
+def compact_ip(ip):
+    return ''.join([chr(int(i)) for i in ip.split('.')])
+
+def decompact_ip(cip):
+    return '.'.join([str(ord(i)) for i in cip])
+
+
+class Tracker:
+    def __init__(self, config, rawserver):
+        self.config = config
+        self.response_size = config['tracker_response_size']
+        self.dfile = config['tracker_dfile']
+        self.natcheck = config['tracker_nat_check']
+        favicon = config['tracker_favicon']
+        self.parse_dir_interval = config['tracker_parse_dir_interval']
+        self.favicon = None
+        if favicon:
+            try:
+                h = open(favicon,'rb')
+                self.favicon = h.read()
+                h.close()
+            except:
+                print "**warning** specified favicon file -- %s -- does not exist." % favicon
+        self.rawserver = rawserver
+        self.cached = {}    # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]]
+        self.cached_t = {}  # format: infohash: [time, cache]
+        self.times = {}
+        self.state = {}
+        self.seedcount = {}
+
+        self.allowed_IPs = None
+        self.banned_IPs = None
+        if config['tracker_allowed_ips'] or config['tracker_banned_ips']:
+            self.allowed_ip_mtime = 0
+            self.banned_ip_mtime = 0
+            self.read_ip_lists()
+                
+        self.only_local_override_ip = config['tracker_only_local_override_ip']
+        if self.only_local_override_ip == 2:
+            self.only_local_override_ip = not config['tracker_nat_check']
+
+        if exists(self.dfile):
+            try:
+                h = open(self.dfile, 'rb')
+                if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE:
+                    ds = h.read()
+                    tempstate = bdecode(ds)
+                else:
+                    tempstate = pickle.load(h)
+                h.close()
+                if not tempstate.has_key('peers'):
+                    tempstate = {'peers': tempstate}
+                statefiletemplate(tempstate)
+                self.state = tempstate
+            except:
+                print '**warning** statefile '+self.dfile+' corrupt; resetting'
+        self.downloads = self.state.setdefault('peers', {})
+        self.completed = self.state.setdefault('completed', {})
+
+        self.becache = {}   # format: infohash: [[l1, s1], [l2, s2], [l3, s3]]
+        for infohash, ds in self.downloads.items():
+            self.seedcount[infohash] = 0
+            for x,y in ds.items():
+                ip = y['ip']
+                if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+                     or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+                    del ds[x]
+                    continue
+                if not y['left']:
+                    self.seedcount[infohash] += 1
+                if y.get('nat',-1):
+                    continue
+                gip = y.get('given_ip')
+                if is_valid_ip(gip) and (
+                    not self.only_local_override_ip or local_IPs.includes(ip) ):
+                    ip = gip
+                self.natcheckOK(infohash,x,ip,y['port'],y['left'])
+            
+        for x in self.downloads.keys():
+            self.times[x] = {}
+            for y in self.downloads[x].keys():
+                self.times[x][y] = 0
+
+        self.trackerid = createPeerID('-T-')
+        seed(self.trackerid)
+                
+        self.reannounce_interval = config['tracker_reannounce_interval']
+        self.save_dfile_interval = config['tracker_save_dfile_interval']
+        self.show_names = config['tracker_show_names']
+        rawserver.add_task(self.save_state, self.save_dfile_interval)
+        self.prevtime = clock()
+        self.timeout_downloaders_interval = config['tracker_timeout_downloaders_interval']
+        rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+        self.logfile = None
+        self.log = None
+        if (config['tracker_logfile']) and (config['tracker_logfile'] != '-'):
+            try:
+                self.logfile = config['tracker_logfile']
+                self.log = open(self.logfile,'a')
+                sys.stdout = self.log
+                print "# Log Started: ", isotime()
+            except:
+                print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0]
+
+        if config['tracker_hupmonitor']:
+            def huphandler(signum, frame, self = self):
+                try:
+                    self.log.close()
+                    self.log = open(self.logfile,'a')
+                    sys.stdout = self.log
+                    print "# Log reopened: ", isotime()
+                except:
+                    print "**warning** could not reopen logfile"
+             
+            signal.signal(signal.SIGHUP, huphandler)            
+                
+        self.allow_get = config['tracker_allow_get']
+        
+        self.t2tlist = T2TList(config['tracker_multitracker_enabled'], self.trackerid,
+                               config['tracker_multitracker_reannounce_interval'],
+                               config['tracker_multitracker_maxpeers'], config['tracker_multitracker_http_timeout'],
+                               self.rawserver)
+
+        if config['tracker_allowed_list']:
+            if config['tracker_allowed_dir']:
+                print '**warning** allowed_dir and allowed_list options cannot be used together'
+                print '**warning** disregarding allowed_dir'
+                config['tracker_allowed_dir'] = ''
+            self.allowed = self.state.setdefault('allowed_list',{})
+            self.allowed_list_mtime = 0
+            self.parse_allowed()
+            self.remove_from_state('allowed','allowed_dir_files')
+            if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT:
+                config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE
+            config['tracker_allowed_controls'] = 0
+
+        elif config['tracker_allowed_dir']:
+            self.allowed = self.state.setdefault('allowed',{})
+            self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
+            self.allowed_dir_blocked = {}
+            self.parse_allowed()
+            self.remove_from_state('allowed_list')
+
+        else:
+            self.allowed = None
+            self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
+            if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT:
+                config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE
+            config['tracker_allowed_controls'] = 0
+                
+        self.uq_broken = unquote('+') != ' '
+        self.keep_dead = config['tracker_keep_dead']
+        self.Filter = Filter(rawserver.add_task)
+        
+        aggregator = config['tracker_aggregator']
+        if aggregator == 0:
+            self.is_aggregator = False
+            self.aggregator_key = None
+        else:
+            self.is_aggregator = True
+            if aggregator == 1:
+                self.aggregator_key = None
+            else:
+                self.aggregator_key = aggregator
+            self.natcheck = False
+                
+        send = config['tracker_aggregate_forward']
+        if not send:
+            self.aggregate_forward = None
+        else:
+            try:
+                self.aggregate_forward, self.aggregate_password = send
+            except:
+                self.aggregate_forward = send
+                self.aggregate_password = None
+
+        self.cachetime = 0
+        self.track_cachetimeupdate()
+
+    def track_cachetimeupdate(self):
+        self.cachetime += 1     # raw clock, but more efficient for cache
+        self.rawserver.add_task(self.track_cachetimeupdate,1)
+
+    def aggregate_senddata(self, query):
+        url = self.aggregate_forward+'?'+query
+        if self.aggregate_password is not None:
+            url += '&password='+self.aggregate_password
+        rq = Thread(target = self._aggregate_senddata, args = [url])
+        rq.setName( "AggregateSendData"+rq.getName() )
+        rq.setDaemon(True)
+        rq.start()
+
+    def _aggregate_senddata(self, url):     # just send, don't attempt to error check,
+        try:                                # discard any returned data
+            h = urlopen(url)
+            h.read()
+            h.close()
+        except:
+            return
+
+
+    def get_infopage(self):
+        try:
+            if not self.config['tracker_show_infopage']:
+                return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+            red = self.config['tracker_infopage_redirect']
+            if red:
+                return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
+                        '<A HREF="'+red+'">Click Here</A>')
+            
+            s = StringIO()
+            s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
+                '<html><head><title>Tribler Tracker Statistics</title>\n')
+            if self.favicon is not None:
+                s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
+            s.write('</head>\n<body>\n' \
+                '<h3>Tribler Tracker Statistics</h3>\n')
+            if self.config['tracker_allowed_dir']:
+                if self.show_names:
+                    names = [ (self.allowed[hash]['name'],hash)
+                              for hash in self.allowed.keys() ]
+                else:
+                    names = [ (None,hash)
+                              for hash in self.allowed.keys() ]
+            else:
+                names = [ (None,hash) for hash in self.downloads.keys() ]
+            if not names:
+                s.write('<p>Not tracking any files yet...</p>\n')
+            else:
+                names.sort()
+                tn = 0
+                tc = 0
+                td = 0
+                tt = 0  # Total transferred
+                ts = 0  # Total size
+                nf = 0  # Number of files displayed
+                if self.config['tracker_allowed_dir'] and self.show_names:
+                    s.write('<table summary="files" border="1">\n' \
+                        '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
+                else:
+                    s.write('<table summary="files">\n' \
+                        '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
+                for name,hash in names:
+                    l = self.downloads[hash]
+                    n = self.completed.get(hash, 0)
+                    tn = tn + n
+                    c = self.seedcount[hash]
+                    tc = tc + c
+                    d = len(l) - c
+                    td = td + d
+                    if self.config['tracker_allowed_dir'] and self.show_names:
+                        if self.allowed.has_key(hash):
+                            nf = nf + 1
+                            sz = self.allowed[hash]['length']  # size
+                            ts = ts + sz
+                            szt = sz * n   # Transferred for this torrent
+                            tt = tt + szt
+                            if self.allow_get == 1:
+                                # P2PURL
+                                url = self.allowed[hash].get('url')
+                                if url:
+                                    linkname = '<a href="' + url + '">' + name + '</a>'
+                                else:
+                                    #linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>'
+                                    linkname = '<a href="/file?name=' + quote(name) + '">' + name + '</a>'
+                            else:
+                                linkname = name
+                            s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
+                                % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt)))
+                    else:
+                        s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
+                            % (b2a_hex(hash), c, d, n))
+                ttn = 0
+                for i in self.completed.values():
+                    ttn = ttn + i
+                if self.config['tracker_allowed_dir'] and self.show_names:
+                    s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td><td align="right">%s</td></tr>\n'
+                            % (nf, size_format(ts), tc, td, tn, ttn, size_format(tt)))
+                else:
+                    s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td></tr>\n'
+                            % (nf, tc, td, tn, ttn))
+                s.write('</table>\n' \
+                    '<ul>\n' \
+                    '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
+                    '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
+                    '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
+                    '<li><em>downloaded:</em> reported complete downloads (total: current/all)</li>\n' \
+                    '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
+                    '</ul>\n' \
+                    '<hr>\n'
+                    '<address>%s (%s)</address>\n' % (version_short, isotime()))
+
+
+            s.write('</body>\n' \
+                '</html>\n')
+            return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
+        except:
+            print_exc()
+            return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
+
+
+    def scrapedata(self, hash, return_name = True):
+        l = self.downloads[hash]
+        n = self.completed.get(hash, 0)
+        c = self.seedcount[hash]
+        d = len(l) - c
+        f = {'complete': c, 'incomplete': d, 'downloaded': n}
+        if return_name and self.show_names and self.config['tracker_allowed_dir']:
+            f['name'] = self.allowed[hash]['name']
+        return (f)
+
+    def get_scrape(self, paramslist):
+        fs = {}
+        if paramslist.has_key('info_hash'):
+            if self.config['tracker_scrape_allowed'] not in [ITRACKSCRAPE_ALLOW_SPECIFIC,ITRACKSCRAPE_ALLOW_FULL]:
+                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'specific scrape function is not available with this tracker.'}))
+            for hash in paramslist['info_hash']:
+                if self.allowed is not None:
+                    if self.allowed.has_key(hash):
+                        fs[hash] = self.scrapedata(hash)
+                else:
+                    if self.downloads.has_key(hash):
+                        fs[hash] = self.scrapedata(hash)
+        else:
+            if self.config['tracker_scrape_allowed'] != ITRACKSCRAPE_ALLOW_FULL:
+                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'full scrape function is not available with this tracker.'}))
+            if self.allowed is not None:
+                keys = self.allowed.keys()
+            else:
+                keys = self.downloads.keys()
+            for hash in keys:
+                fs[hash] = self.scrapedata(hash)
+
+        return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
+
+
+    def get_file_by_name(self,name):
+        # Assumption: name is in UTF-8, as is the names in self.allowed
+        for hash,rec in self.allowed.iteritems():
+            if 'name' in rec and rec['name'] == name:
+                return self.get_file(hash)
+        return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+
+    def get_file(self, hash):
+        if not self.allow_get:
+            return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                'get function is not available with this tracker.')
+        if not self.allowed.has_key(hash):
+            return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+        fname = self.allowed[hash]['file']
+        fpath = self.allowed[hash]['path']
+        return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
+            'Content-Disposition': 'attachment; filename=' + fname},
+            open(fpath, 'rb').read())
+
+
+    def get_tstream_from_httpseed(self, httpseedurl):
+        if not self.allow_get:
+            return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                'get function is not available with this tracker.')
+            
+        # TODO: normalize?
+        wanturlhash = sha(httpseedurl).digest()
+        # TODO: reverse table?
+        found = False
+        for infohash,a in self.allowed.iteritems():
+            for goturlhash in a['url-hash-list']:
+                if goturlhash == wanturlhash:
+                    found = True
+                    break
+            if found:
+                break
+                 
+        if not found or not self.allowed.has_key(infohash):
+            return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+            
+        fname = self.allowed[infohash]['file']
+        fpath = self.allowed[infohash]['path']
+        print >>sys.stderr,"tracker: get_stream: Sending",fname
+        return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
+            'Content-Disposition': 'attachment; filename=' + fname},
+            open(fpath, 'rb').read())
+
+
+    def check_allowed(self, infohash, paramslist):
+        if ( self.aggregator_key is not None
+                and not ( paramslist.has_key('password')
+                        and paramslist['password'][0] == self.aggregator_key ) ):
+            return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                bencode({'failure reason':
+                'Requested download is not authorized for use with this tracker.'}))
+
+        if self.allowed is not None:
+            if not self.allowed.has_key(infohash):
+                return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'Requested download is not authorized for use with this tracker.'}))
+            if self.config['tracker_allowed_controls']:
+                if self.allowed[infohash].has_key('failure reason'):
+                    return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                        bencode({'failure reason': self.allowed[infohash]['failure reason']}))
+
+        if paramslist.has_key('tracker'):
+            if ( self.config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_NONE or       # turned off
+                          paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself
+                return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason': 'disallowed'}))
+            
+            if ( self.config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT
+                        and not self.allowed[infohash].has_key('announce-list') ):
+                return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'Requested download is not authorized for multitracker use.'}))
+
+        return None
+
+
+    def add_data(self, infohash, event, ip, paramslist):
+        peers = self.downloads.setdefault(infohash, {})
+        ts = self.times.setdefault(infohash, {})
+        self.completed.setdefault(infohash, 0)
+        self.seedcount.setdefault(infohash, 0)
+
+        def params(key, default = None, l = paramslist):
+            if l.has_key(key):
+                return l[key][0]
+            return default
+        
+        myid = params('peer_id','')
+        if len(myid) != 20:
+            raise ValueError, 'id not of length 20'
+        if event not in ['started', 'completed', 'stopped', 'snooped', None]:
+            raise ValueError, 'invalid event'
+        port = long(params('port',''))
+        if port < 0 or port > 65535:
+            raise ValueError, 'invalid port'
+        left = long(params('left',''))
+        if left < 0:
+            raise ValueError, 'invalid amount left'
+        uploaded = long(params('uploaded',''))
+        downloaded = long(params('downloaded',''))
+
+        peer = peers.get(myid)
+        islocal = local_IPs.includes(ip)
+        mykey = params('key')
+        if peer:
+            auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
+
+        gip = params('ip')
+        if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
+            ip1 = gip
+        else:
+            ip1 = ip
+
+        if params('numwant') is not None:
+            rsize = min(int(params('numwant')),self.response_size)
+        else:
+            rsize = self.response_size
+
+        if event == 'stopped':
+            if peer:
+                if auth:
+                    self.delete_peer(infohash,myid)
+        
+        elif not peer:
+            ts[myid] = clock()
+            peer = {'ip': ip, 'port': port, 'left': left}
+            if mykey:
+                peer['key'] = mykey
+            if gip:
+                peer['given ip'] = gip
+            if port:
+                if not self.natcheck or islocal:
+                    peer['nat'] = 0
+                    self.natcheckOK(infohash,myid,ip1,port,left)
+                else:
+                    NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
+            else:
+                peer['nat'] = 2**30
+            if event == 'completed':
+                self.completed[infohash] += 1
+            if not left:
+                self.seedcount[infohash] += 1
+
+            peers[myid] = peer
+
+        else:
+            if not auth:
+                return rsize    # return w/o changing stats
+
+            ts[myid] = clock()
+            if not left and peer['left']:
+                self.completed[infohash] += 1
+                self.seedcount[infohash] += 1
+                if not peer.get('nat', -1):
+                    for bc in self.becache[infohash]:
+                        bc[1][myid] = bc[0][myid]
+                        del bc[0][myid]
+            if peer['left']:
+                peer['left'] = left
+
+            if port:
+                recheck = False
+                if ip != peer['ip']:
+                    peer['ip'] = ip
+                    recheck = True
+                if gip != peer.get('given ip'):
+                    if gip:
+                        peer['given ip'] = gip
+                    elif peer.has_key('given ip'):
+                        del peer['given ip']
+                    recheck = True
+
+                natted = peer.get('nat', -1)
+                if recheck:
+                    if natted == 0:
+                        l = self.becache[infohash]
+                        y = not peer['left']
+                        for x in l:
+                            del x[y][myid]
+                        if not self.natcheck or islocal:
+                            del peer['nat'] # restart NAT testing
+                if natted and natted < self.natcheck:
+                    recheck = True
+
+                if recheck:
+                    if not self.natcheck or islocal:
+                        peer['nat'] = 0
+                        self.natcheckOK(infohash,myid,ip1,port,left)
+                    else:
+                        NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
+
+        return rsize
+
+
+    def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize):
+        data = {}    # return data
+        seeds = self.seedcount[infohash]
+        data['complete'] = seeds
+        data['incomplete'] = len(self.downloads[infohash]) - seeds
+        
+        if ( self.config['tracker_allowed_controls']
+                and self.allowed[infohash].has_key('warning message') ):
+            data['warning message'] = self.allowed[infohash]['warning message']
+
+        if tracker:
+            data['interval'] = self.config['tracker_multitracker_reannounce_interval']
+            if not rsize:
+                return data
+            cache = self.cached_t.setdefault(infohash, None)
+            if ( not cache or len(cache[1]) < rsize
+                 or cache[0] + self.config['tracker_min_time_between_cache_refreshes'] < clock() ):
+                bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+                cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
+                self.cached_t[infohash] = cache
+                shuffle(cache[1])
+                cache = cache[1]
+
+            data['peers'] = cache[-rsize:]
+            del cache[-rsize:]
+            return data
+
+        data['interval'] = self.reannounce_interval
+        if stopped or not rsize:     # save some bandwidth
+            data['peers'] = []
+            return data
+
+        bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+        len_l = len(bc[0][0])
+        len_s = len(bc[0][1])
+        if not (len_l+len_s):   # caches are empty!
+            data['peers'] = []
+            return data
+        l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
+        cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
+        if cache and ( not cache[1]
+                       or (is_seed and len(cache[1]) < rsize)
+                       or len(cache[1]) < l_get_size
+                       or cache[0]+self.config['tracker_min_time_between_cache_refreshes'] < self.cachetime ):
+            cache = None
+        if not cache:
+            peers = self.downloads[infohash]
+            vv = [[],[],[]]
+            for key, ip, port in self.t2tlist.harvest(infohash):   # empty if disabled
+                if not peers.has_key(key):
+                    vv[0].append({'ip': ip, 'port': port, 'peer id': key})
+                    vv[1].append({'ip': ip, 'port': port})
+                    vv[2].append(compact_peer_info(ip, port))
+            cache = [ self.cachetime,
+                      bc[return_type][0].values()+vv[return_type],
+                      bc[return_type][1].values() ]
+            shuffle(cache[1])
+            shuffle(cache[2])
+            self.cached[infohash][return_type] = cache
+            for rr in xrange(len(self.cached[infohash])):
+                if rr != return_type:
+                    try:
+                        self.cached[infohash][rr][1].extend(vv[rr])
+                    except:
+                        pass
+        if len(cache[1]) < l_get_size:
+            peerdata = cache[1]
+            if not is_seed:
+                peerdata.extend(cache[2])
+            cache[1] = []
+            cache[2] = []
+        else:
+            if not is_seed:
+                peerdata = cache[2][l_get_size-rsize:]
+                del cache[2][l_get_size-rsize:]
+                rsize -= len(peerdata)
+            else:
+                peerdata = []
+            if rsize:
+                peerdata.extend(cache[1][-rsize:])
+                del cache[1][-rsize:]
+        if return_type == 2:
+            peerdata = ''.join(peerdata)
+        data['peers'] = peerdata
+        return data
+
+
+    def get(self, connection, path, headers):
+        real_ip = connection.get_ip()
+        ip = real_ip
+        if is_ipv4(ip):
+            ipv4 = True
+        else:
+            try:
+                ip = ipv6_to_ipv4(ip)
+                ipv4 = True
+            except ValueError:
+                ipv4 = False
+
+        # Arno: log received GET
+        if self.config['tracker_logfile']:
+            self.getlog(ip, path, headers)
+
+        if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+             or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+            return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                bencode({'failure reason':
+                'your IP is not allowed on this tracker'}))
+
+        nip = get_forwarded_ip(headers)
+        if nip and not self.only_local_override_ip:
+            ip = nip
+            try:
+                ip = to_ipv4(ip)
+                ipv4 = True
+            except ValueError:
+                ipv4 = False
+
+        paramslist = {}
+        def params(key, default = None, l = paramslist):
+            if l.has_key(key):
+                return l[key][0]
+            return default
+
+        try:
+            (scheme, netloc, path, pars, query, fragment) = urlparse(path)
+            if self.uq_broken == 1:
+                path = path.replace('+',' ')
+                query = query.replace('+',' ')
+            path = unquote(path)[1:]
+            for s in query.split('&'):
+                if s:
+                    i = s.find('=')
+                    if i == -1:
+                        break
+                    kw = unquote(s[:i])
+                    paramslist.setdefault(kw, [])
+                    paramslist[kw] += [unquote(s[i+1:])]
+                    
+            if DEBUG:
+                print >>sys.stderr,"tracker: Got request /"+path+'?'+query
+                    
+            if path == '' or path == 'index.html':
+                return self.get_infopage()
+            if (path == 'file'):
+                # Arno: 2010-02-26: name based retrieval
+                if paramslist.has_key('name'):
+                    return self.get_file_by_name(params('name'))
+                else:
+                    return self.get_file(params('info_hash'))
+            
+            if path == 'tlookup':
+                return self.get_tstream_from_httpseed(unquote(query))
+            if path == 'favicon.ico' and self.favicon is not None:
+                return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
+
+            # automated access from here on
+
+            if path == 'scrape':
+                return self.get_scrape(paramslist)
+            
+            if path != 'announce':
+                return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+
+            # main tracker function
+
+            filtered = self.Filter.check(real_ip, paramslist, headers)
+            if filtered:
+                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason': filtered}))
+            
+            infohash = params('info_hash')
+            if not infohash:
+                raise ValueError, 'no info hash'
+
+            notallowed = self.check_allowed(infohash, paramslist)
+            if notallowed:
+                return notallowed
+
+            event = params('event')
+
+            rsize = self.add_data(infohash, event, ip, paramslist)
+
+        except ValueError, e:
+            print_exc()
+            return (400, 'Bad Request', {'Content-Type': 'text/plain'}, 
+                'you sent me garbage - ' + str(e))
+
+        if self.aggregate_forward and not paramslist.has_key('tracker'):
+            self.aggregate_senddata(query)
+
+        if self.is_aggregator:      # don't return peer data here
+            return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'response': 'OK'}))
+
+        if params('compact') and ipv4:
+            return_type = 2
+        elif params('no_peer_id'):
+            return_type = 1
+        else:
+            return_type = 0
+            
+        data = self.peerlist(infohash, event=='stopped',
+                             params('tracker'), not params('left'),
+                             return_type, rsize)
+
+        if paramslist.has_key('scrape'):
+            data['scrape'] = self.scrapedata(infohash, False)
+            
+        return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
+
+
+    def natcheckOK(self, infohash, peerid, ip, port, not_seed):
+        if DEBUG:
+            print >>sys.stderr,"tracker: natcheck: Recorded succes"
+        bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+        bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
+                                              'peer id': peerid}))
+        bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
+        bc[2][not not_seed][peerid] = compact_peer_info(ip, port)
+
+
+    def natchecklog(self, peerid, ip, port, result):
+        year, month, day, hour, minute, second, a, b, c = localtime(time())
+        print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
+            ip, quote(peerid), day, months[month], year, hour, minute, second,
+            ip, port, result)
+
+    def getlog(self, ip, path, headers):
+        year, month, day, hour, minute, second, a, b, c = localtime(time())
+        print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "GET %s HTTP/1.1" 100 0 - -' % (
+            ip, ip, day, months[month], year, hour, minute, second, path)
+
+    def connectback_result(self, result, downloadid, peerid, ip, port):
+        record = self.downloads.get(downloadid, {}).get(peerid)
+        if ( record is None 
+                 or (record['ip'] != ip and record.get('given ip') != ip)
+                 or record['port'] != port ):
+            if self.config['tracker_log_nat_checks']:
+                self.natchecklog(peerid, ip, port, 404)
+            if DEBUG:
+                print >>sys.stderr,"tracker: natcheck: No record found for tested peer"
+            return
+        if self.config['tracker_log_nat_checks']:
+            if result:
+                x = 200
+            else:
+                x = 503
+            self.natchecklog(peerid, ip, port, x)
+        if not record.has_key('nat'):
+            record['nat'] = int(not result)
+            if result:
+                self.natcheckOK(downloadid,peerid,ip,port,record['left'])
+        elif result and record['nat']:
+            record['nat'] = 0
+            self.natcheckOK(downloadid,peerid,ip,port,record['left'])
+        elif not result:
+            record['nat'] += 1
+            if DEBUG:
+                print >>sys.stderr,"tracker: natcheck: Recorded failed attempt"
+
+
+    def remove_from_state(self, *l):
+        for s in l:
+            try:
+                del self.state[s]
+            except:
+                pass
+
+    def save_state(self):
+        self.rawserver.add_task(self.save_state, self.save_dfile_interval)
+        h = open(self.dfile, 'wb')
+        if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE:
+            h.write(bencode(self.state))
+        else:
+            pickle.dump(self.state,h,-1)
+        h.close()
+
+
+    def parse_allowed(self,source=None):
+        if DEBUG:
+            print >>sys.stderr,"tracker: parse_allowed: Source is",source,"alloweddir",self.config['tracker_allowed_dir']
+        
+        if source is None:
+            self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
+
+        if self.config['tracker_allowed_dir']:
+            r = parsedir( self.config['tracker_allowed_dir'], self.allowed,
+                          self.allowed_dir_files, self.allowed_dir_blocked,
+                          [".torrent",TRIBLER_TORRENT_EXT] )
+            ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
+                added, garbage2 ) = r
+            
+            if DEBUG:
+                print >>sys.stderr,"tracker: parse_allowed: Found new",`added`
+            
+            self.state['allowed'] = self.allowed
+            self.state['allowed_dir_files'] = self.allowed_dir_files
+
+            self.t2tlist.parse(self.allowed)
+            
+        else:
+            f = self.config['tracker_allowed_list']
+            if self.allowed_list_mtime == os.path.getmtime(f):
+                return
+            try:
+                r = parsetorrentlist(f, self.allowed)
+                (self.allowed, added, garbage2) = r
+                self.state['allowed_list'] = self.allowed
+            except (IOError, OSError):
+                print '**warning** unable to read allowed torrent list'
+                return
+            self.allowed_list_mtime = os.path.getmtime(f)
+
+        for infohash in added.keys():
+            self.downloads.setdefault(infohash, {})
+            self.completed.setdefault(infohash, 0)
+            self.seedcount.setdefault(infohash, 0)
+
+
+    def read_ip_lists(self):
+        self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
+            
+        f = self.config['tracker_allowed_ips']
+        if f and self.allowed_ip_mtime != os.path.getmtime(f):
+            self.allowed_IPs = IP_List()
+            try:
+                self.allowed_IPs.read_fieldlist(f)
+                self.allowed_ip_mtime = os.path.getmtime(f)
+            except (IOError, OSError):
+                print '**warning** unable to read allowed_IP list'
+                
+        f = self.config['tracker_banned_ips']
+        if f and self.banned_ip_mtime != os.path.getmtime(f):
+            self.banned_IPs = IP_Range_List()
+            try:
+                self.banned_IPs.read_rangelist(f)
+                self.banned_ip_mtime = os.path.getmtime(f)
+            except (IOError, OSError):
+                print '**warning** unable to read banned_IP list'
+                
+
+    def delete_peer(self, infohash, peerid):
+        dls = self.downloads[infohash]
+        peer = dls[peerid]
+        if not peer['left']:
+            self.seedcount[infohash] -= 1
+        if not peer.get('nat',-1):
+            l = self.becache[infohash]
+            y = not peer['left']
+            for x in l:
+                del x[y][peerid]
+        del self.times[infohash][peerid]
+        del dls[peerid]
+
+    def expire_downloaders(self):
+        for x in self.times.keys():
+            for myid, t in self.times[x].items():
+                if t < self.prevtime:
+                    self.delete_peer(x,myid)
+        self.prevtime = clock()
+        if (self.keep_dead != 1):
+            for key, value in self.downloads.items():
+                if len(value) == 0 and (
+                        self.allowed is None or not self.allowed.has_key(key) ):
+                    del self.times[key]
+                    del self.downloads[key]
+                    del self.seedcount[key]
+        self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+
+
+def track(args):
+    if not args:
+        print formatDefinitions(defaults, 80)
+        return
+    try:
+        config, files = parseargs(args, defaults, 0, 0)
+    except ValueError, e:
+        print 'error: ' + str(e)
+        print 'run with no arguments for parameter explanations'
+        return
+    r = RawServer(Event(), config['tracker_timeout_check_interval'],
+                  config['tracker_socket_timeout'], ipv6_enable = config['ipv6_enabled'])
+    t = Tracker(config, r)
+    r.bind(config['minport'], config['bind'],
+           reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
+    r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes']))
+    t.save_state()
+    print '# Shutting down: ' + isotime()
+
+def size_format(s):
+    if (s < 1024):
+        r = str(s) + 'B'
+    elif (s < 1048576):
+        r = str(int(s/1024)) + 'KiB'
+    elif (s < 1073741824L):
+        r = str(int(s/1048576)) + 'MiB'
+    elif (s < 1099511627776L):
+        r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
+    else:
+        r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
+    return(r)
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/CurrentRateMeasure.py b/instrumentation/next-share/BaseLib/Core/BitTornado/CurrentRateMeasure.py
new file mode 100644 (file)
index 0000000..43f8ce2
--- /dev/null
@@ -0,0 +1,38 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from clock import clock
+
+class Measure:
+    def __init__(self, max_rate_period, fudge = 1):
+        self.max_rate_period = max_rate_period
+        self.ratesince = clock() - fudge
+        self.last = self.ratesince
+        self.rate = 0.0
+        self.total = 0L
+
+    def update_rate(self, amount):
+        self.total += amount
+        t = clock()
+        self.rate = (self.rate * (self.last - self.ratesince) + 
+            amount) / (t - self.ratesince + 0.0001)
+        self.last = t
+        if self.ratesince < t - self.max_rate_period:
+            self.ratesince = t - self.max_rate_period
+
+    def get_rate(self):
+        self.update_rate(0)
+        #print 'Rate: %f (%d bytes)' % (self.rate, self.total)
+        return self.rate
+
+    def get_rate_noupdate(self):
+        return self.rate
+
+    def time_until_rate(self, newrate):
+        if self.rate <= newrate:
+            return 0
+        t = clock() - self.ratesince
+        return ((self.rate * t) / newrate) - t
+
+    def get_total(self):
+        return self.total
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/HTTPHandler.py b/instrumentation/next-share/BaseLib/Core/BitTornado/HTTPHandler.py
new file mode 100644 (file)
index 0000000..06c3211
--- /dev/null
@@ -0,0 +1,194 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+import sys
+import time
+from clock import clock
+from gzip import GzipFile
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+    'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+class HTTPConnection:
+    def __init__(self, handler, connection):
+        self.handler = handler
+        self.connection = connection
+        self.buf = ''
+        self.closed = False
+        self.done = False
+        self.donereading = False
+        self.next_func = self.read_type
+
+    def get_ip(self):
+        return self.connection.get_ip()
+
+    def data_came_in(self, data):
+        if self.donereading or self.next_func is None:
+            return True
+        self.buf += data
+        while 1:
+            try:
+                i = self.buf.index('\n')
+            except ValueError:
+                return True
+            val = self.buf[:i]
+            self.buf = self.buf[i+1:]
+            self.next_func = self.next_func(val)
+            if self.donereading:
+                return True
+            if self.next_func is None or self.closed:
+                return False
+
+    def read_type(self, data):
+        self.header = data.strip()
+        words = data.split()
+        if len(words) == 3:
+            self.command, self.path, garbage = words
+            self.pre1 = False
+        elif len(words) == 2:
+            self.command, self.path = words
+            self.pre1 = True
+            if self.command != 'GET':
+                return None
+        else:
+            return None
+        if self.command not in ('HEAD', 'GET'):
+            return None
+        self.headers = {}
+        return self.read_header
+
+    def read_header(self, data):
+        data = data.strip()
+        if data == '':
+            self.donereading = True
+            if self.headers.get('accept-encoding', '').find('gzip') > -1:
+                self.encoding = 'gzip'
+            else:
+                self.encoding = 'identity'
+            r = self.handler.getfunc(self, self.path, self.headers)
+            if r is not None:
+                self.answer(r)
+            return None
+        try:
+            i = data.index(':')
+        except ValueError:
+            return None
+        self.headers[data[:i].strip().lower()] = data[i+1:].strip()
+        if DEBUG:
+            print data[:i].strip() + ": " + data[i+1:].strip()
+        return self.read_header
+
+    def answer(self, (responsecode, responsestring, headers, data)):
+        if self.closed:
+            return
+        if self.encoding == 'gzip':
+            compressed = StringIO()
+            gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9)
+            gz.write(data)
+            gz.close()
+            cdata = compressed.getvalue()
+            if len(cdata) >= len(data):
+                self.encoding = 'identity'
+            else:
+                if DEBUG:
+                    print "Compressed: %i  Uncompressed: %i\n" % (len(cdata), len(data))
+                data = cdata
+                headers['Content-Encoding'] = 'gzip'
+
+        # i'm abusing the identd field here, but this should be ok
+        if self.encoding == 'identity':
+            ident = '-'
+        else:
+            ident = self.encoding
+        self.handler.log( self.connection.get_ip(), ident, '-',
+                          self.header, responsecode, len(data),
+                          self.headers.get('referer','-'),
+                          self.headers.get('user-agent','-') )
+        self.done = True
+        r = StringIO()
+        r.write('HTTP/1.0 ' + str(responsecode) + ' ' + 
+            responsestring + '\r\n')
+        if not self.pre1:
+            headers['Content-Length'] = len(data)
+            for key, value in headers.items():
+                r.write(key + ': ' + str(value) + '\r\n')
+            r.write('\r\n')
+        if self.command != 'HEAD':
+            r.write(data)
+        self.connection.write(r.getvalue())
+        if self.connection.is_flushed():
+            self.connection.shutdown(1)
+
+class HTTPHandler:
+    def __init__(self, getfunc, minflush):
+        self.connections = {}
+        self.getfunc = getfunc
+        self.minflush = minflush
+        self.lastflush = clock()
+
+    def external_connection_made(self, connection):
+        self.connections[connection] = HTTPConnection(self, connection)
+
+    def connection_flushed(self, connection):
+        if self.connections[connection].done:
+            connection.shutdown(1)
+
+    def connection_lost(self, connection):
+        ec = self.connections[connection]
+        ec.closed = True
+        del ec.connection
+        del ec.next_func
+        del self.connections[connection]
+
+    def data_came_in(self, connection, data):
+        c = self.connections[connection]
+        if not c.data_came_in(data) and not c.closed:
+            c.connection.shutdown(1)
+
+    def log(self, ip, ident, username, header,
+            responsecode, length, referrer, useragent):
+        year, month, day, hour, minute, second, a, b, c = time.localtime(time.time())
+        if DEBUG:
+            print >>sys.stderr,'HTTPHandler: %s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % (
+                ip, ident, username, day, months[month], year, hour,
+                minute, second, header, responsecode, length, referrer, useragent)
+        t = clock()
+        if t - self.lastflush > self.minflush:
+            self.lastflush = t
+            sys.stdout.flush()
+
+
+class DummyHTTPHandler:
+    def __init__(self):
+        pass
+
+    def external_connection_made(self, connection):
+        print >> sys.stderr,"DummyHTTPHandler: ext_conn_made"
+        reply = 'HTTP/1.1 404 Not Found\r\nContent-Type: text/plain\r\n\r\nTribler Internal Tracker not activated.\r\n'
+        connection.write(reply)
+        connection.close()
+
+    def connection_flushed(self, connection):
+        pass
+
+    def connection_lost(self, connection):
+        pass
+
+    def data_came_in(self, connection, data):
+        print >> sys.stderr,"DummyHTTPHandler: data_came_in",len(data)
+        pass
+
+    def log(self, ip, ident, username, header,
+            responsecode, length, referrer, useragent):
+        year, month, day, hour, minute, second, a, b, c = time.localtime(time.time())
+        pass
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/PSYCO.py b/instrumentation/next-share/BaseLib/Core/BitTornado/PSYCO.py
new file mode 100644 (file)
index 0000000..58fd571
--- /dev/null
@@ -0,0 +1,8 @@
+# Written by BitTornado authors
+# see LICENSE.txt for license information
+
+# edit this file to enable/disable Psyco
+# psyco = 1 -- enabled
+# psyco = 0 -- disabled
+
+psyco = 0
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/RateLimiter.py b/instrumentation/next-share/BaseLib/Core/BitTornado/RateLimiter.py
new file mode 100644 (file)
index 0000000..f6a7d73
--- /dev/null
@@ -0,0 +1,178 @@
+# Written by Bram Cohen and Pawel Garbacki
+# see LICENSE.txt for license information
+
+from clock import clock
+from CurrentRateMeasure import Measure
+from math import sqrt
+import sys
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+try:
+    sum([1])
+except:
+    sum = lambda a: reduce(lambda x, y: x+y, a, 0)
+
+DEBUG = False
+
+MAX_RATE_PERIOD = 20.0
+MAX_RATE = 10e10
+PING_BOUNDARY = 1.2
+PING_SAMPLES = 7
+PING_DISCARDS = 1
+PING_THRESHHOLD = 5
+PING_DELAY = 5  # cycles 'til first upward adjustment
+PING_DELAY_NEXT = 2  # 'til next
+ADJUST_UP = 1.05
+ADJUST_DOWN = 0.95
+UP_DELAY_FIRST = 5
+UP_DELAY_NEXT = 2
+SLOTS_STARTING = 6
+SLOTS_FACTOR = 1.66/1000
+
+class RateLimiter:
+    def __init__(self, sched, unitsize, slotsfunc = lambda x: None):
+        self.sched = sched
+        self.last = None
+        self.unitsize = unitsize
+        self.slotsfunc = slotsfunc
+        self.measure = Measure(MAX_RATE_PERIOD)
+        self.autoadjust = False
+        self.upload_rate = MAX_RATE * 1000
+        self.slots = SLOTS_STARTING    # garbage if not automatic
+
+    def set_upload_rate(self, rate):
+        if DEBUG: 
+            print >>sys.stderr, "RateLimiter: set_upload_rate", rate
+            
+        # rate = -1 # test automatic
+        if rate < 0:
+            if self.autoadjust:
+                return
+            self.autoadjust = True
+            self.autoadjustup = 0
+            self.pings = []
+            rate = MAX_RATE
+            self.slots = SLOTS_STARTING
+            self.slotsfunc(self.slots)
+        else:
+            self.autoadjust = False
+        if not rate:
+            rate = MAX_RATE
+        self.upload_rate = rate * 1000
+        self.lasttime = clock()
+        self.bytes_sent = 0
+
+    def queue(self, conn):
+        if DEBUG: print >>sys.stderr, "RateLimiter: queue", conn
+        assert conn.next_upload is None
+        if self.last is None:
+            self.last = conn
+            conn.next_upload = conn
+            self.try_send(True)
+        else:
+            conn.next_upload = self.last.next_upload
+            self.last.next_upload = conn
+# 2fastbt_
+            if not conn.connection.is_coordinator_con():
+                self.last = conn
+# _2fastbt
+
+    def try_send(self, check_time = False):
+        if DEBUG: print >>sys.stderr, "RateLimiter: try_send"
+        t = clock()
+        self.bytes_sent -= (t - self.lasttime) * self.upload_rate
+        #print 'try_send: bytes_sent: %s' % self.bytes_sent
+        self.lasttime = t
+        if check_time:
+            self.bytes_sent = max(self.bytes_sent, 0)
+        cur = self.last.next_upload
+        while self.bytes_sent <= 0:
+            bytes = cur.send_partial(self.unitsize)
+            self.bytes_sent += bytes
+            self.measure.update_rate(bytes)
+            if bytes == 0 or cur.backlogged():
+                if self.last is cur:
+                    self.last = None
+                    cur.next_upload = None
+                    break
+                else:
+                    self.last.next_upload = cur.next_upload
+                    cur.next_upload = None
+                    cur = self.last.next_upload
+            else:
+# 2fastbt_
+                if not cur.connection.is_coordinator_con() or not cur.upload.buffer:
+# _2fastbt
+                    self.last = cur
+                    cur = cur.next_upload
+# 2fastbt_
+                else:
+                    pass
+# _2fastbt
+        else:
+            # 01/04/10 Boudewijn: because we use a -very- small value
+            # to indicate a 0bps rate, we will schedule the call to be
+            # made in a very long time.  This results in no upload for
+            # a very long time.
+            #
+            # the try_send method has protection again calling to
+            # soon, so we can simply schedule the call to be made
+            # sooner.
+            delay = min(5.0, self.bytes_sent / self.upload_rate)
+            self.sched(self.try_send, delay)
+
+    def adjust_sent(self, bytes):
+        # if DEBUG: print >>sys.stderr, "RateLimiter: adjust_sent", bytes
+        self.bytes_sent = min(self.bytes_sent+bytes, self.upload_rate*3)
+        self.measure.update_rate(bytes)
+
+
+    def ping(self, delay):
+        ##raise Exception('Is this called?')
+        if DEBUG:
+            print >>sys.stderr, delay
+        if not self.autoadjust:
+            return
+        self.pings.append(delay > PING_BOUNDARY)
+        if len(self.pings) < PING_SAMPLES+PING_DISCARDS:
+            return
+        if DEBUG:
+            print >>sys.stderr, 'RateLimiter: cycle'
+        pings = sum(self.pings[PING_DISCARDS:])
+        del self.pings[:]
+        if pings >= PING_THRESHHOLD:   # assume flooded
+            if self.upload_rate == MAX_RATE:
+                self.upload_rate = self.measure.get_rate()*ADJUST_DOWN
+            else:
+                self.upload_rate = min(self.upload_rate, 
+                                       self.measure.get_rate()*1.1)
+            self.upload_rate = max(int(self.upload_rate*ADJUST_DOWN), 2)
+            self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR))
+            self.slotsfunc(self.slots)
+            if DEBUG:
+                print >>sys.stderr, 'RateLimiter: adjust down to '+str(self.upload_rate)
+            self.lasttime = clock()
+            self.bytes_sent = 0
+            self.autoadjustup = UP_DELAY_FIRST
+        else:   # not flooded
+            if self.upload_rate == MAX_RATE:
+                return
+            self.autoadjustup -= 1
+            if self.autoadjustup:
+                return
+            self.upload_rate = int(self.upload_rate*ADJUST_UP)
+            self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR))
+            self.slotsfunc(self.slots)
+            if DEBUG:
+                print >>sys.stderr, 'RateLimiter: adjust up to '+str(self.upload_rate)
+            self.lasttime = clock()
+            self.bytes_sent = 0
+            self.autoadjustup = UP_DELAY_NEXT
+
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/RateMeasure.py b/instrumentation/next-share/BaseLib/Core/BitTornado/RateMeasure.py
new file mode 100644 (file)
index 0000000..1c42a2f
--- /dev/null
@@ -0,0 +1,70 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from clock import clock
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+FACTOR = 0.999
+
+class RateMeasure:
+    def __init__(self):
+        self.last = None
+        self.time = 1.0
+        self.got = 0.0
+        self.remaining = None
+        self.broke = False
+        self.got_anything = False
+        self.last_checked = None
+        self.rate = 0
+
+    def data_came_in(self, amount):
+        if not self.got_anything:
+            self.got_anything = True
+            self.last = clock()
+            return
+        self.update(amount)
+
+    def data_rejected(self, amount):
+        pass
+
+    def get_time_left(self, left):
+        t = clock()
+        if not self.got_anything:
+            return None
+        if t - self.last > 15:
+            self.update(0)
+        try:
+            remaining = left/self.rate
+            delta = max(remaining/20, 2)
+            if self.remaining is None:
+                self.remaining = remaining
+            elif abs(self.remaining-remaining) > delta:
+                self.remaining = remaining
+            else:
+                self.remaining -= t - self.last_checked
+        except ZeroDivisionError:
+            self.remaining = None
+        if self.remaining is not None and self.remaining < 0.1:
+            self.remaining = 0.1
+        self.last_checked = t
+        return self.remaining
+
+    def update(self, amount):
+        t = clock()
+        t1 = int(t)
+        l1 = int(self.last)
+        for i in xrange(l1, t1):
+            self.time *= FACTOR
+            self.got *= FACTOR
+        self.got += amount
+        if t - self.last < 20:
+            self.time += t - self.last
+        self.last = t
+        try:
+            self.rate = self.got / self.time
+        except ZeroDivisionError:
+            pass
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/RawServer.py b/instrumentation/next-share/BaseLib/Core/BitTornado/RawServer.py
new file mode 100644 (file)
index 0000000..0ebaf35
--- /dev/null
@@ -0,0 +1,274 @@
+# Written by Bram Cohen and Pawel Garbacki
+# see LICENSE.txt for license information
+
+from bisect import insort
+from SocketHandler import SocketHandler
+import socket
+from cStringIO import StringIO
+from traceback import print_exc
+from select import error
+from threading import Event, RLock
+from thread import get_ident
+from clock import clock
+import sys
+import time
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+def autodetect_ipv6():
+    try:
+        assert sys.version_info >= (2, 3)
+        assert socket.has_ipv6
+        socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+    except:
+        return 0
+    return 1
+
+def autodetect_socket_style():
+    if sys.platform.find('linux') < 0:
+        return 1
+    else:
+        try:
+            f = open('/proc/sys/net/ipv6/bindv6only', 'r')
+            dual_socket_style = int(f.read())
+            f.close()
+            return int(not dual_socket_style)
+        except:
+            return 0
+
+
+READSIZE = 100000
+
+class RawServer:
+    def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True,
+                 ipv6_enable = True, failfunc = lambda x: None, errorfunc = None,
+                 sockethandler = None, excflag = Event()):
+        self.timeout_check_interval = timeout_check_interval
+        self.timeout = timeout
+        self.servers = {}
+        self.single_sockets = {}
+        self.dead_from_write = []
+        self.doneflag = doneflag
+        self.noisy = noisy
+        self.failfunc = failfunc
+        self.errorfunc = errorfunc
+        self.exccount = 0
+        self.funcs = []
+        self.externally_added = []
+        self.finished = Event()
+        self.tasks_to_kill = []
+        self.excflag = excflag
+        self.lock = RLock()        
+
+        if sockethandler is None:
+            sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE)
+        self.sockethandler = sockethandler
+
+        self.thread_ident = None
+        self.interrupt_socket = sockethandler.get_interrupt_socket()
+        
+        self.add_task(self.scan_for_timeouts, timeout_check_interval)
+
+    def get_exception_flag(self):
+        return self.excflag
+
+    def _add_task(self, func, delay, id = None):
+        if delay < 0:
+            delay = 0
+        insort(self.funcs, (clock() + delay, func, id))
+
+    def add_task(self, func, delay = 0, id = None):
+        #if DEBUG:
+        #    print >>sys.stderr,"rawserver: add_task(",func,delay,")"
+        if delay < 0:
+            delay = 0
+        self.lock.acquire()
+        self.externally_added.append((func, delay, id))
+        if self.thread_ident != get_ident():
+            self.interrupt_socket.interrupt()
+        self.lock.release()
+
+    def scan_for_timeouts(self):
+        self.add_task(self.scan_for_timeouts, self.timeout_check_interval)
+        self.sockethandler.scan_for_timeouts()
+
+    def bind(self, port, bind = '', reuse = False,
+                        ipv6_socket_style = 1):
+        self.sockethandler.bind(port, bind, reuse, ipv6_socket_style)
+
+    def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False, 
+                      ipv6_socket_style = 1, randomizer = False):
+# 2fastbt_
+        result = self.sockethandler.find_and_bind(first_try, minport, maxport, bind, reuse, 
+                                 ipv6_socket_style, randomizer)
+# _2fastbt
+        return result
+
+    def start_connection_raw(self, dns, socktype, handler = None):
+        return self.sockethandler.start_connection_raw(dns, socktype, handler)
+
+    def start_connection(self, dns, handler = None, randomize = False):
+        return self.sockethandler.start_connection(dns, handler, randomize)
+
+    def get_stats(self):
+        return self.sockethandler.get_stats()
+
+    def pop_external(self):
+        self.lock.acquire()
+        while self.externally_added:
+            (a, b, c) = self.externally_added.pop(0)
+            self._add_task(a, b, c)
+        self.lock.release()
+
+    def listen_forever(self, handler):
+        if DEBUG:
+            print >>sys.stderr,"rawserver: listen forever()"
+        # handler=btlanuchmany: MultiHandler, btdownloadheadless: Encoder
+        self.thread_ident = get_ident()
+        self.sockethandler.set_handler(handler)
+        try:
+            while not self.doneflag.isSet():
+                try:
+                    self.pop_external()
+                    self._kill_tasks()
+                    if self.funcs:
+                        period = self.funcs[0][0] + 0.001 - clock()
+                    else:
+                        period = 2 ** 30
+                    if period < 0:
+                        period = 0
+                        
+                    #if DEBUG:
+                    #    print >>sys.stderr,"rawserver: do_poll",period
+                    events = self.sockethandler.do_poll(period)
+
+                    if self.doneflag.isSet():
+                        if DEBUG:
+                            print >> sys.stderr,"rawserver: stopping because done flag set"
+                        return
+                    
+                    #print >>sys.stderr,"RawServer: funcs is",`self.funcs`
+                    
+                    
+                    while self.funcs and self.funcs[0][0] <= clock():
+                        garbage1, func, id = self.funcs.pop(0)
+                        if id in self.tasks_to_kill:
+                            pass
+                        try:
+#                            print func.func_name
+                            if DEBUG:
+                                if func.func_name != "_bgalloc":
+                                    print >> sys.stderr,"RawServer:f",func.func_name
+                            #st = time.time()
+                            func()
+                            #et = time.time()
+                            #diff = et - st
+                            #print >>sys.stderr,func,"took %.5f" % (diff)
+                            
+                        except (SystemError, MemoryError), e:
+                            self.failfunc(e)
+                            return
+                        except KeyboardInterrupt,e:
+#                            self.exception(e)
+                            return
+                        except error:
+                            if DEBUG:
+                                print >> sys.stderr,"rawserver: func: ERROR exception"
+                                print_exc()
+                            pass
+                        except Exception,e:
+                            if DEBUG:
+                                print >> sys.stderr,"rawserver: func: any exception"
+                                print_exc()
+                            if self.noisy:
+                                self.exception(e)
+                    self.sockethandler.close_dead()
+                    self.sockethandler.handle_events(events)
+                    if self.doneflag.isSet():
+                        if DEBUG:
+                            print >> sys.stderr,"rawserver: stopping because done flag set2"
+                        return
+                    self.sockethandler.close_dead()
+                except (SystemError, MemoryError), e:
+                    if DEBUG:
+                        print >> sys.stderr,"rawserver: SYS/MEM exception",e
+                    self.failfunc(e)
+                    return
+                except error:
+                    if DEBUG:
+                        print >> sys.stderr,"rawserver: ERROR exception"
+                        print_exc()
+                    if self.doneflag.isSet():
+                        return
+                except KeyboardInterrupt,e:
+                    self.failfunc(e)
+                    return
+                except Exception,e:
+                    if DEBUG:
+                        print >> sys.stderr,"rawserver: other exception"
+                    print_exc()
+                    self.exception(e)
+                ## Arno: Don't stop till we drop
+                ##if self.exccount > 10:
+                ##    print >> sys.stderr,"rawserver: stopping because exccount > 10"
+                ##    return
+        finally:
+#            self.sockethandler.shutdown()
+            self.finished.set()
+
+    def is_finished(self):
+        return self.finished.isSet()
+
+    def wait_until_finished(self):
+        self.finished.wait()
+
+    def _kill_tasks(self):
+        if self.tasks_to_kill:
+            new_funcs = []
+            for (t, func, id) in self.funcs:
+                if id not in self.tasks_to_kill:
+                    new_funcs.append((t, func, id))
+            self.funcs = new_funcs
+            self.tasks_to_kill = []
+
+    def kill_tasks(self, id):
+        self.tasks_to_kill.append(id)
+
+    def exception(self,e,kbint=False):
+        if not kbint:
+            self.excflag.set()
+        self.exccount += 1
+        if self.errorfunc is None:
+            print_exc()
+        else:
+            if not kbint:   # don't report here if it's a keyboard interrupt
+                self.errorfunc(e)
+
+    def shutdown(self):
+        self.sockethandler.shutdown()
+
+
+    #
+    # Interface for Khashmir 
+    #
+    def create_udpsocket(self,port,host):
+        if DEBUG:
+            print >>sys.stderr,"rawudp: create_udp_socket",host,port
+        return self.sockethandler.create_udpsocket(port,host)
+        
+    def start_listening_udp(self,serversocket,handler):
+        if DEBUG:
+            print >>sys.stderr,"rawudp: start_listen:",serversocket,handler
+        self.sockethandler.start_listening_udp(serversocket,handler)
+    
+    def stop_listening_udp(self,serversocket):
+        if DEBUG:
+            print >>sys.stderr,"rawudp: stop_listen:",serversocket
+        self.sockethandler.stop_listening_udp(serversocket)
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/ServerPortHandler.py b/instrumentation/next-share/BaseLib/Core/BitTornado/ServerPortHandler.py
new file mode 100644 (file)
index 0000000..b3eceeb
--- /dev/null
@@ -0,0 +1,238 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+import sys
+from cStringIO import StringIO
+from binascii import b2a_hex
+#from RawServer import RawServer
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+from BT1.Encrypter import protocol_name
+
+
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+default_task_id = []
+
+DEBUG = False
+
+def show(s):
+    for i in xrange(len(s)): 
+        print ord(s[i]),
+    print
+
+class SingleRawServer:
+    def __init__(self, info_hash, multihandler, doneflag, protocol):
+        self.info_hash = info_hash
+        self.doneflag = doneflag
+        self.protocol = protocol
+        self.multihandler = multihandler
+        self.rawserver = multihandler.rawserver
+        self.finished = False
+        self.running = False
+        self.handler = None
+        self.taskqueue = []
+
+    def shutdown(self):
+        if not self.finished:
+            self.multihandler.shutdown_torrent(self.info_hash)
+
+    def _shutdown(self):
+        if DEBUG:
+            print >>sys.stderr,"SingleRawServer: _shutdown"
+        if not self.finished:
+            self.finished = True
+            self.running = False
+            self.rawserver.kill_tasks(self.info_hash)
+            if self.handler:
+                self.handler.close_all()
+
+    def _external_connection_made(self, c, options, msg_remainder):
+        if DEBUG:
+            print >> sys.stderr,"SingleRawServer: _external_conn_made, running?",self.running
+        if self.running:
+            c.set_handler(self.handler)
+            self.handler.externally_handshaked_connection_made(
+                c, options, msg_remainder)
+
+    ### RawServer functions ###
+
+    def add_task(self, func, delay=0, id = default_task_id):
+        if id is default_task_id:
+            id = self.info_hash
+        if not self.finished:
+            self.rawserver.add_task(func, delay, id)
+
+#    def bind(self, port, bind = '', reuse = False):
+#        pass    # not handled here
+        
+    def start_connection(self, dns, handler = None):
+        if not handler:
+            handler = self.handler
+        c = self.rawserver.start_connection(dns, handler)
+        return c
+
+#    def listen_forever(self, handler):
+#        pass    # don't call with this
+    
+    def start_listening(self, handler):
+        self.handler = handler    # Encoder
+        self.running = True
+        return self.shutdown    # obviously, doesn't listen forever
+
+    def is_finished(self):
+        return self.finished
+
+    def get_exception_flag(self):
+        return self.rawserver.get_exception_flag()
+
+class NewSocketHandler:     # hand a new socket off where it belongs
+    def __init__(self, multihandler, connection):    # connection: SingleSocket
+        self.multihandler = multihandler
+        self.connection = connection
+        connection.set_handler(self)
+        self.closed = False
+        self.buffer = StringIO()
+        self.complete = False
+        self.next_len, self.next_func = 1, self.read_header_len
+        self.multihandler.rawserver.add_task(self._auto_close, 15)
+
+    def _auto_close(self):
+        if not self.complete:
+            self.close()
+        
+    def close(self):
+        if not self.closed:
+            self.connection.close()
+            self.closed = True
+
+#   header format:
+#        connection.write(chr(len(protocol_name)) + protocol_name + 
+#            (chr(0) * 8) + self.encrypter.download_id + self.encrypter.my_id)
+
+    # copied from Encrypter and modified
+    
+    def read_header_len(self, s):
+        if s == 'G':
+            self.protocol = 'HTTP'
+            self.firstbyte = s
+            if DEBUG:
+                print >>sys.stderr,"NewSocketHandler: Got HTTP connection"
+            return True
+        else:
+            l = ord(s)
+            return l, self.read_header
+
+    def read_header(self, s):
+        self.protocol = s
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        self.options = s
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if DEBUG:
+            print >>sys.stderr,"NewSocketHandler: Swarm id is",`s`,self.connection.socket.getpeername()
+        if self.multihandler.singlerawservers.has_key(s):
+            if self.multihandler.singlerawservers[s].protocol == self.protocol:
+                if DEBUG:
+                    print >>sys.stderr,"NewSocketHandler: Found rawserver for swarm id"
+                return True
+        if DEBUG:
+            print >>sys.stderr,"NewSocketHandler: No rawserver found for swarm id",`s`
+        return None
+
+    def read_dead(self, s):
+        return None
+
+    def data_came_in(self, garbage, s):
+#        if DEBUG:
+#            print "NewSocketHandler data came in", sha(s).hexdigest()
+        while 1:
+            if self.closed:
+                return
+            i = self.next_len - self.buffer.tell()
+            if i > len(s):
+                self.buffer.write(s)
+                return
+            self.buffer.write(s[:i])
+            s = s[i:]
+            m = self.buffer.getvalue()
+            self.buffer.reset()
+            self.buffer.truncate()
+            try:
+                x = self.next_func(m)
+            except:
+                self.next_len, self.next_func = 1, self.read_dead
+                raise
+            if x is None:
+                if DEBUG:
+                    print >> sys.stderr,"NewSocketHandler:",self.next_func,"returned None"
+                self.close()
+                return
+            if x == True:       # ready to process
+                if self.protocol == 'HTTP':
+                    if DEBUG:
+                        print >> sys.stderr,"NewSocketHandler: Reporting HTTP connection"
+                    self.multihandler.httphandler.external_connection_made(self.connection)
+                    self.multihandler.httphandler.data_came_in(self.connection,self.firstbyte)
+                    self.multihandler.httphandler.data_came_in(self.connection,s)
+                else:
+                    if DEBUG:
+                        print >> sys.stderr,"NewSocketHandler: Reporting connection via",self.multihandler.singlerawservers[m]._external_connection_made
+                    self.multihandler.singlerawservers[m]._external_connection_made(self.connection, self.options, s)
+                self.complete = True
+                return
+            self.next_len, self.next_func = x
+
+    def connection_flushed(self, ss):
+        pass
+
+    def connection_lost(self, ss):
+        self.closed = True
+
+class MultiHandler:
+    def __init__(self, rawserver, doneflag):
+        self.rawserver = rawserver
+        self.masterdoneflag = doneflag
+        self.singlerawservers = {}
+        self.connections = {}
+        self.taskqueues = {}
+        self.httphandler = None
+
+    def newRawServer(self, info_hash, doneflag, protocol=protocol_name):
+        new = SingleRawServer(info_hash, self, doneflag, protocol)
+        self.singlerawservers[info_hash] = new
+        return new
+
+    def shutdown_torrent(self, info_hash):
+        if DEBUG:
+            print >>sys.stderr,"MultiHandler: shutdown_torrent",`info_hash`
+        self.singlerawservers[info_hash]._shutdown()
+        del self.singlerawservers[info_hash]
+
+    def listen_forever(self):
+        if DEBUG:
+            print >>sys.stderr,"MultiHandler: listen_forever()"
+        self.rawserver.listen_forever(self)
+        for srs in self.singlerawservers.values():
+            srs.finished = True
+            srs.running = False
+            srs.doneflag.set()
+
+    def set_httphandler(self,httphandler):
+        self.httphandler = httphandler
+        
+    ### RawServer handler functions ###
+    # be wary of name collisions
+
+    def external_connection_made(self, ss):
+        # ss: SingleSocket
+        NewSocketHandler(self, ss)
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/SocketHandler.py b/instrumentation/next-share/BaseLib/Core/BitTornado/SocketHandler.py
new file mode 100644 (file)
index 0000000..6c1b360
--- /dev/null
@@ -0,0 +1,629 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+import socket
+import errno
+try:
+    from select import poll, POLLIN, POLLOUT, POLLERR, POLLHUP
+    timemult = 1000
+except ImportError:
+    from selectpoll import poll, POLLIN, POLLOUT, POLLERR, POLLHUP
+    timemult = 1
+from time import sleep
+from clock import clock
+import sys
+from random import shuffle, randrange
+from traceback import print_exc
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+all = POLLIN | POLLOUT
+
+if sys.platform == 'win32':
+    SOCKET_BLOCK_ERRORCODE=10035    # WSAEWOULDBLOCK
+else:
+    SOCKET_BLOCK_ERRORCODE=errno.EWOULDBLOCK
+
+class InterruptSocketHandler:
+    @staticmethod
+    def data_came_in(interrupt_socket, data):
+        pass
+
+class InterruptSocket:
+    """
+    When we need the poll to return before the timeout expires, we
+    will send some data to the InterruptSocket and discard the data.
+    """
+    def __init__(self, socket_handler):
+        self.socket_handler = socket_handler
+        self.handler = InterruptSocketHandler
+
+        self.ip = "127.0.0.1"
+        self.port = None
+        self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        self.interrupt_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+
+        # we assume that one port in the range below is free
+        for self.port in xrange(10000, 12345):
+            try:
+                if DEBUG: print >>sys.stderr, "InterruptSocket: Trying to start InterruptSocket on port", self.port
+                self.socket.bind((self.ip, self.port))
+                break
+            except:
+                pass
+
+        # start listening to the InterruptSocket
+        self.socket_handler.single_sockets[self.socket.fileno()] = self
+        self.socket_handler.poll.register(self.socket, POLLIN)
+
+    def interrupt(self):
+        self.interrupt_socket.sendto("+", (self.ip, self.port))
+
+    def get_ip(self):
+        return self.ip
+
+    def get_port(self):
+        return self.port
+        
+class UdpSocket:
+    """ Class to hold socket and handler for a UDP socket. """
+    def __init__(self, socket, handler):
+        self.socket = socket
+        self.handler = handler
+
+class SingleSocket:
+    """ 
+    There are two places to create SingleSocket:
+    incoming connection -- SocketHandler.handle_events
+    outgoing connection -- SocketHandler.start_connection_raw
+    """
+    
+    def __init__(self, socket_handler, sock, handler, ip = None):
+        self.socket_handler = socket_handler
+        self.socket = sock
+        self.handler = handler
+        self.buffer = []
+        self.last_hit = clock()
+        self.fileno = sock.fileno()
+        self.connected = False
+        self.skipped = 0
+#        self.check = StreamCheck()
+        self.myip = None
+        self.myport = -1
+        self.ip = None
+        self.port = -1
+        try:
+            myname = self.socket.getsockname()
+            self.myip = myname[0]
+            self.myport = myname[1]
+            peername = self.socket.getpeername() 
+            self.ip = peername[0]
+            self.port = peername[1]
+        except:
+            #print_exc()
+            if ip is None:
+                self.ip = 'unknown'
+            else:
+                self.ip = ip
+        # RePEX: Measurement TODO: Remove when measurement test has been done
+        self.data_sent = 0
+        self.data_received = 0
+        
+    def get_ip(self, real=False):
+        if real:
+            try:
+                peername = self.socket.getpeername() 
+                self.ip = peername[0]
+                self.port = peername[1]
+            except:
+                ## print_exc()
+                pass
+        return self.ip
+    
+    def get_port(self, real=False):
+        if real:
+            self.get_ip(True)
+        return self.port
+
+    def get_myip(self, real=False):
+        if real:
+            try:
+                myname = self.socket.getsockname()
+                self.myip = myname[0]
+                self.myport = myname[1]
+            except:
+                print_exc()
+                pass
+        return self.myip
+    
+    def get_myport(self, real=False):
+        if real:
+            self.get_myip(True)
+        return self.myport
+        
+    def close(self):
+        '''
+        for x in xrange(5,0,-1):
+            try:
+                f = inspect.currentframe(x).f_code
+                print (f.co_filename,f.co_firstlineno,f.co_name)
+                del f
+            except:
+                pass
+        print ''
+        '''
+        assert self.socket
+        self.connected = False
+        sock = self.socket
+        self.socket = None
+        self.buffer = []
+        del self.socket_handler.single_sockets[self.fileno]
+        self.socket_handler.poll.unregister(sock)
+        sock.close()
+
+    def shutdown(self, val):
+        self.socket.shutdown(val)
+
+    def is_flushed(self):
+        return not self.buffer
+
+    def write(self, s):
+#        self.check.write(s)
+        # Arno: fishy concurrency problem, sometimes self.socket is None
+        if self.socket is None:
+            return
+        #assert self.socket is not None
+        self.buffer.append(s)
+        if len(self.buffer) == 1:
+            self.try_write()
+
+    def try_write(self):
+        
+        if self.connected:
+            dead = False
+            try:
+                while self.buffer:
+                    buf = self.buffer[0]
+                    amount = self.socket.send(buf)
+                    self.data_sent += amount # RePEX: Measurement TODO: Remove when measurement test has been done
+                    if amount == 0:
+                        self.skipped += 1
+                        break
+                    self.skipped = 0
+                    if amount != len(buf):
+                        self.buffer[0] = buf[amount:]
+                        break
+                    del self.buffer[0]
+            except socket.error, e:
+                #if DEBUG:
+                #    print_exc(file=sys.stderr)
+                blocked=False
+                try:
+                    blocked = (e[0] == SOCKET_BLOCK_ERRORCODE) 
+                    dead = not blocked
+                except:
+                    dead = True
+                if not blocked:
+                    self.skipped += 1
+            if self.skipped >= 5:
+                dead = True
+            if dead:
+                self.socket_handler.dead_from_write.append(self)
+                return
+        if self.buffer:
+            self.socket_handler.poll.register(self.socket, all)
+        else:
+            self.socket_handler.poll.register(self.socket, POLLIN)
+        
+    def set_handler(self, handler):    # can be: NewSocketHandler, Encoder, En_Connection
+        self.handler = handler
+
+
+class SocketHandler:
+    def __init__(self, timeout, ipv6_enable, readsize = 100000):
+        self.timeout = timeout
+        self.ipv6_enable = ipv6_enable
+        self.readsize = readsize
+        self.poll = poll()
+        # {socket: SingleSocket}
+        self.single_sockets = {}
+        self.dead_from_write = []
+        self.max_connects = 1000
+        self.servers = {}
+        self.btengine_said_reachable = False
+        self.interrupt_socket = None
+        self.udp_sockets = {}
+
+    def scan_for_timeouts(self):
+        t = clock() - self.timeout
+        tokill = []
+        for s in self.single_sockets.values():
+            # Only SingleSockets can be closed because of timeouts
+            if type(s) is SingleSocket and s.last_hit < t:
+                tokill.append(s)
+        for k in tokill:
+            if k.socket is not None:
+                if DEBUG:
+                    print >> sys.stderr,"SocketHandler: scan_timeout closing connection",k.get_ip()
+                self._close_socket(k)
+
+    def bind(self, port, bind = [], reuse = False, ipv6_socket_style = 1):
+        port = int(port)
+        addrinfos = []
+        self.servers = {}
+        self.interfaces = []
+        # if bind != [] bind to all specified addresses (can be IPs or hostnames)
+        # else bind to default ipv6 and ipv4 address
+        if bind:
+            if self.ipv6_enable:
+                socktype = socket.AF_UNSPEC
+            else:
+                socktype = socket.AF_INET
+            for addr in bind:
+                if sys.version_info < (2, 2):
+                    addrinfos.append((socket.AF_INET, None, None, None, (addr, port)))
+                else:
+                    addrinfos.extend(socket.getaddrinfo(addr, port,
+                                               socktype, socket.SOCK_STREAM))
+        else:
+            if self.ipv6_enable:
+                addrinfos.append([socket.AF_INET6, None, None, None, ('', port)])
+            if not addrinfos or ipv6_socket_style != 0:
+                addrinfos.append([socket.AF_INET, None, None, None, ('', port)])
+        for addrinfo in addrinfos:
+            try:
+                server = socket.socket(addrinfo[0], socket.SOCK_STREAM)
+                if reuse:
+                    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                server.setblocking(0)
+                if DEBUG:
+                    print >> sys.stderr,"SocketHandler: Try to bind socket on", addrinfo[4], "..."
+                server.bind(addrinfo[4])
+                self.servers[server.fileno()] = server
+                if bind:
+                    self.interfaces.append(server.getsockname()[0])
+                if DEBUG:
+                    print >> sys.stderr,"SocketHandler: OK"
+                server.listen(64)
+                self.poll.register(server, POLLIN)
+            except socket.error, e:
+                for server in self.servers.values():
+                    try:
+                        server.close()
+                    except:
+                        pass
+                if self.ipv6_enable and ipv6_socket_style == 0 and self.servers:
+                    raise socket.error('blocked port (may require ipv6_binds_v4 to be set)')
+                raise socket.error(str(e))
+        if not self.servers:
+            raise socket.error('unable to open server port')
+        self.port = port
+
+    def find_and_bind(self, first_try, minport, maxport, bind = '', reuse = False,
+                      ipv6_socket_style = 1, randomizer = False):
+        e = 'maxport less than minport - no ports to check'
+        if maxport-minport < 50 or not randomizer:
+            portrange = range(minport, maxport+1)
+            if randomizer:
+                shuffle(portrange)
+                portrange = portrange[:20]  # check a maximum of 20 ports
+        else:
+            portrange = []
+            while len(portrange) < 20:
+                listen_port = randrange(minport, maxport+1)
+                if not listen_port in portrange:
+                    portrange.append(listen_port)
+        if first_try != 0:    # try 22 first, because TU only opens port 22 for SSH...
+            try:
+                self.bind(first_try, bind, reuse = reuse, 
+                               ipv6_socket_style = ipv6_socket_style)
+                return first_try
+            except socket.error, e:
+                pass
+        for listen_port in portrange:
+            try:
+                #print >> sys.stderr, listen_port, bind, reuse
+                self.bind(listen_port, bind, reuse = reuse,
+                               ipv6_socket_style = ipv6_socket_style)
+                return listen_port
+            except socket.error, e:
+                raise
+        raise socket.error(str(e))
+
+
+    def set_handler(self, handler):
+        self.handler = handler
+
+
+    def start_connection_raw(self, dns, socktype = socket.AF_INET, handler = None):
+        # handler = Encoder, self.handler = Multihandler
+        if handler is None:
+            handler = self.handler
+        sock = socket.socket(socktype, socket.SOCK_STREAM)
+        sock.setblocking(0)
+        try:
+            if DEBUG:
+                print >>sys.stderr,"SocketHandler: Initiate connection to",dns,"with socket #",sock.fileno()
+            # Arno,2007-01-23: http://docs.python.org/lib/socket-objects.html 
+            # says that connect_ex returns an error code (and can still throw 
+            # exceptions). The original code never checked the return code.
+            #
+            err = sock.connect_ex(dns)
+            if DEBUG:
+                if err == 0:
+                    msg = 'No error'
+                else:
+                    msg = errno.errorcode[err]
+                print >>sys.stderr,"SocketHandler: connect_ex on socket #",sock.fileno(),"returned",err,msg
+            if err != 0:
+                if sys.platform == 'win32' and err == 10035:
+                    # Arno, 2007-02-23: win32 always returns WSAEWOULDBLOCK, whether 
+                    # the connect is to a live peer or not. Win32's version 
+                    # of EINPROGRESS
+                    pass
+                elif err == errno.EINPROGRESS: # or err == errno.EALREADY or err == errno.EWOULDBLOCK:
+                    # [Stevens98] says that UNICES return EINPROGRESS when the connect
+                    # does not immediately succeed, which is almost always the case. 
+                    pass
+                else:
+                    raise socket.error((err,errno.errorcode[err]))
+        except socket.error, e:
+            if DEBUG:
+                print >> sys.stderr,"SocketHandler: SocketError in connect_ex",str(e)
+            raise
+        except Exception, e:
+            if DEBUG:
+                print >> sys.stderr,"SocketHandler: Exception in connect_ex",str(e)      
+            raise socket.error(str(e))
+        self.poll.register(sock, POLLIN)
+        s = SingleSocket(self, sock, handler, dns[0])    # create socket to connect the peers obtained from tracker
+        self.single_sockets[sock.fileno()] = s
+        #if DEBUG:
+        #    print >> sys.stderr,"SocketHandler: Created Socket"
+        return s
+
+
+    def start_connection(self, dns, handler = None, randomize = False):
+        if handler is None:
+            handler = self.handler
+        if sys.version_info < (2, 2):
+            s = self.start_connection_raw(dns, socket.AF_INET, handler)
+        else:
+#            if self.ipv6_enable:
+#                socktype = socket.AF_UNSPEC
+#            else:
+#                socktype = socket.AF_INET
+            try:
+                try:
+                    """
+                    Arno: When opening a new connection, the network thread calls the
+                    getaddrinfo() function (=DNS resolve), as apparently the input
+                    sometimes is a hostname. At the same time the tracker thread uses 
+                    this same function to resolve the tracker name to an IP address. 
+                    However, on Python for Windows this method has concurrency control
+                    protection that allows only 1 request at a time. 
+
+                    In some cases resolving the tracker name takes a very long time,
+                    meanwhile blocking the network thread!!!! And that only wanted to
+                    resolve some IP address to some IP address, i.e., do nothing!!! 
+                    
+                    Sol: don't call getaddrinfo() is the input is an IP address, and
+                    submit a bug to python that it shouldn't lock when the op is
+                    a null op
+                    """
+                    socket.inet_aton(dns[0]) # IPVSIX: change to inet_pton()
+                    #print >>sys.stderr,"SockHand: start_conn: after inet_aton",dns[0],"<",dns,">"
+                    addrinfos=[(socket.AF_INET, None, None, None, (dns[0], dns[1]))]
+                except:
+                    #print_exc()
+                    try:
+                        # Jie: we attempt to use this socktype to connect ipv6 addresses.
+                        socktype = socket.AF_UNSPEC
+                        addrinfos = socket.getaddrinfo(dns[0], int(dns[1]),
+                                                       socktype, socket.SOCK_STREAM)
+                    except:
+                        socktype = socket.AF_INET
+                        addrinfos = socket.getaddrinfo(dns[0], int(dns[1]),
+                                                       socktype, socket.SOCK_STREAM)
+            except socket.error, e:
+                raise
+            except Exception, e:
+                raise socket.error(str(e))
+            if randomize:
+                shuffle(addrinfos)
+            for addrinfo in addrinfos:
+                try:
+                    s = self.start_connection_raw(addrinfo[4], addrinfo[0], handler)
+                    break
+                except Exception,e:
+                    print_exc()
+                    pass # FIXME Arno: ???? raise e
+            else:
+                raise socket.error('unable to connect')
+        return s
+
+
+    def _sleep(self):
+        sleep(1)
+        
+    def handle_events(self, events):
+        for sock, event in events:
+            #print >>sys.stderr,"SocketHandler: event on sock#",sock
+            s = self.servers.get(sock)    # socket.socket
+            if s:
+                if event & (POLLHUP | POLLERR) != 0:
+                    if DEBUG:
+                        print >> sys.stderr,"SocketHandler: Got event, close server socket"
+                    self.poll.unregister(s)
+                    del self.servers[sock]
+                else:
+                    try:
+                        newsock, addr = s.accept()
+                        if DEBUG:
+                            print >> sys.stderr,"SocketHandler: Got connection from",newsock.getpeername()
+                        if not self.btengine_said_reachable:
+                            try:
+                                from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+                                dmh = DialbackMsgHandler.getInstance()
+                                dmh.network_btengine_reachable_callback()
+                            except ImportError:
+                                if DEBUG:
+                                    print_exc()
+                                pass
+                            self.btengine_said_reachable = True
+
+                        # Only use the new socket if we can spare the
+                        # connections. Otherwise we will silently drop
+                        # the connection.
+                        if len(self.single_sockets) < self.max_connects:
+                            newsock.setblocking(0)
+                            nss = SingleSocket(self, newsock, self.handler)    # create socket for incoming peers and tracker
+                            self.single_sockets[newsock.fileno()] = nss
+                            self.poll.register(newsock, POLLIN)
+                            self.handler.external_connection_made(nss)
+                        else:
+                            print >> sys.stderr,"SocketHandler: too many connects"
+                            newsock.close()
+                        
+                    except socket.error,e:
+                        if DEBUG:
+                            print >> sys.stderr,"SocketHandler: SocketError while accepting new connection",str(e)
+                        self._sleep()
+                continue
+
+            s = self.udp_sockets.get(sock)
+            if s:
+                try:
+                    (data, addr) = s.socket.recvfrom(65535)
+                    if not data:
+                        if DEBUG:
+                            print >> sys.stderr, "SocketHandler: UDP no-data", addr
+                    else:
+                        if DEBUG:
+                            print >> sys.stderr,"SocketHandler: Got UDP data",addr,"len",len(data)
+                        s.handler.data_came_in(addr, data)
+
+                except socket.error, e:
+                    if DEBUG:
+                        print >> sys.stderr,"SocketHandler: UDP Socket error",str(e)
+                    continue
+
+            s = self.single_sockets.get(sock)
+            if s:
+                if (event & (POLLHUP | POLLERR)):
+                    if DEBUG:
+                        print >> sys.stderr,"SocketHandler: Got event, connect socket got error"
+                        print >> sys.stderr,"SocketHandler: Got event, connect socket got error",s.ip,s.port
+                    self._close_socket(s)
+                    continue
+                if (event & POLLIN):
+                    try:
+                        s.last_hit = clock()
+                        data = s.socket.recv(100000)
+                        if not data:
+                            if DEBUG:
+                                print >> sys.stderr,"SocketHandler: no-data closing connection",s.get_ip(),s.get_port()
+                            self._close_socket(s)
+                        else:
+                            #if DEBUG:
+                            #    print >> sys.stderr,"SocketHandler: Got data",s.get_ip(),s.get_port(),"len",len(data)
+
+                            # btlaunchmany: NewSocketHandler, btdownloadheadless: Encrypter.Connection
+                            if hasattr(s, 'data_received'): s.data_received += len(data) # RePEX: Measurement TODO: Remove when measurement test has been done
+                            s.handler.data_came_in(s, data)
+                    except socket.error, e:
+                        if DEBUG:
+                            print >> sys.stderr,"SocketHandler: Socket error",str(e)
+                        code, msg = e
+                        if code != SOCKET_BLOCK_ERRORCODE:
+                            if DEBUG:
+                                print >> sys.stderr,"SocketHandler: closing connection because not WOULDBLOCK",s.get_ip(),"error",code
+                            self._close_socket(s)
+                            continue
+                if (event & POLLOUT) and s.socket and not s.is_flushed():
+                    s.connected = True
+                    s.try_write()
+                    if s.is_flushed():
+                        s.handler.connection_flushed(s)
+
+    def close_dead(self):
+        while self.dead_from_write:
+            old = self.dead_from_write
+            self.dead_from_write = []
+            for s in old:
+                if s.socket:
+                    if DEBUG:
+                        print >> sys.stderr,"SocketHandler: close_dead closing connection",s.get_ip()
+                    self._close_socket(s)
+
+    def _close_socket(self, s):
+        if DEBUG:
+            print >> sys.stderr,"SocketHandler: closing connection to ",s.get_ip()
+        s.close()
+        s.handler.connection_lost(s)
+
+    def do_poll(self, t):
+        r = self.poll.poll(t*timemult)
+        if r is None:
+            connects = len(self.single_sockets)
+            to_close = int(connects*0.05)+1 # close 5% of sockets
+            self.max_connects = connects-to_close
+            closelist = self.single_sockets.values()
+            shuffle(closelist)
+            closelist = closelist[:to_close]
+            for sock in closelist:
+                if DEBUG:
+                    print >> sys.stderr,"SocketHandler: do_poll closing connection",sock.get_ip()
+                self._close_socket(sock)
+            return []
+        return r     
+
+    def get_stats(self):
+        return { 'interfaces': self.interfaces, 
+                 'port': self.port }
+
+
+    def shutdown(self):
+        for ss in self.single_sockets.values():
+            try:
+                ss.close()
+            except:
+                pass
+        for server in self.servers.values():
+            try:
+                server.close()
+            except:
+                pass
+
+    #
+    # Interface for Khasmir, called from RawServer
+    #
+    #
+    def create_udpsocket(self,port,host):
+        server = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
+        server.bind((host,port))
+        server.setblocking(0)
+        return server
+        
+    def start_listening_udp(self,serversocket,handler):
+        self.udp_sockets[serversocket.fileno()] = UdpSocket(serversocket, handler)
+        self.poll.register(serversocket, POLLIN)
+    
+    def stop_listening_udp(self,serversocket):
+        self.poll.unregister(serversocket)
+        del self.udp_sockets[serversocket.fileno()]
+
+    def get_interrupt_socket(self):
+        """
+        Create a socket to interrupt the poll when the thread needs to
+        continue without waiting for the timeout
+        """
+        if not self.interrupt_socket:
+            self.interrupt_socket = InterruptSocket(self)
+        return self.interrupt_socket
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/__init__.py b/instrumentation/next-share/BaseLib/Core/BitTornado/__init__.py
new file mode 100644 (file)
index 0000000..c16bf91
--- /dev/null
@@ -0,0 +1,109 @@
+# Written by BitTornado authors and Arno Bakker
+# see LICENSE.txt for license information
+
+## Arno: FIXME _idprefix is also defined in BitTornado.__init__ and that's the one
+## actually used in connections, so make sure they are defined in one place
+## (here) and correct.
+##
+
+from BaseLib.__init__ import LIBRARYNAME
+
+if LIBRARYNAME == "Tribler":
+    version_id = '5.2.1'
+    product_name = 'Tribler'
+    version_short = 'Tribler-' + version_id
+    report_email = 'tribler@tribler.org'
+    # Arno: looking at Azureus BTPeerIDByteDecoder this letter is free
+    # 'T' is BitTornado, 'A' is ABC, 'TR' is Transmission
+    TRIBLER_PEERID_LETTER='R'
+else:
+    version_id = '3.2.0' # aka M32
+    product_name = 'NextShare'
+    version_short = 'NextShare-' + version_id
+    report_email = 'support@p2p-next.org'
+    # Arno: looking at Azureus BTPeerIDByteDecoder this letter is free
+    # 'T' is BitTornado, 'A' is ABC, 'TR' is Transmission
+    TRIBLER_PEERID_LETTER='N'
+    
+
+version = version_short + ' (' + product_name + ')'
+_idprefix = TRIBLER_PEERID_LETTER
+
+
+from types import StringType
+from time import time, clock
+from string import strip
+import socket
+import random
+try:
+    from os import getpid
+except ImportError:
+    def getpid():
+        return 1
+from base64 import decodestring 
+import sys
+from traceback import print_exc
+    
+mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
+
+#for subver in version_short[2:].split('.'):
+for subver in version_short.split('-')[1].split('.'):
+    try:
+        subver = int(subver)
+    except:
+        subver = 0
+    _idprefix += mapbase64[subver]
+_idprefix += ('-' * (6-len(_idprefix)))
+_idrandom = [None]
+
+
+
+
+def resetPeerIDs():
+    try:
+        f = open('/dev/urandom', 'rb')
+        x = f.read(20)
+        f.close()
+    except:
+        # Arno: make sure there is some randomization when on win32
+        random.seed()
+        x = ''
+        while len(x) < 20:
+            #r = random.randint(0,sys.maxint)
+            r = random.randint(0,255)
+            x += chr(r)
+        x = x[:20]
+
+    s = ''
+    for i in x:
+        s += mapbase64[ord(i) & 0x3F]
+    _idrandom[0] = s[:11] # peer id = iprefix (6) + ins (3) + random
+        
+def createPeerID(ins = '---'):
+    assert type(ins) is StringType
+    assert len(ins) == 3
+    resetPeerIDs()
+    return _idprefix + ins + _idrandom[0]
+
+def decodePeerID(id):
+    client = None
+    version = None
+    try:
+        if id[0] == '-':
+            # Azureus type ID: 
+            client = id[1:3]
+            encversion = id[3:7]
+        else:
+            # Shadow type ID:
+            client = id[0]
+            encversion = id[1:4] 
+        version = ''
+        for i in range(len(encversion)):
+            for j in range(len(mapbase64)):
+                if mapbase64[j] == encversion[i]:
+                    if len(version) > 0:
+                        version += '.'
+                    version += str(j)
+    except:
+        print_exc(file=sys.stderr)
+    return [client,version]
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/bencode.py b/instrumentation/next-share/BaseLib/Core/BitTornado/bencode.py
new file mode 100644 (file)
index 0000000..c8ff912
--- /dev/null
@@ -0,0 +1,351 @@
+# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman
+# see LICENSE.txt for license information
+
+from types import IntType, LongType, StringType, ListType, TupleType, DictType
+try:
+    from types import BooleanType
+except ImportError:
+    BooleanType = None
+try:
+    from types import UnicodeType
+except ImportError:
+    UnicodeType = None
+
+from traceback import print_exc,print_stack
+import sys
+
+DEBUG = False
+
+def decode_int(x, f):
+    f += 1
+    newf = x.index('e', f)
+    try:
+        n = int(x[f:newf])
+    except:
+        n = long(x[f:newf])
+    if x[f] == '-':
+        if x[f + 1] == '0':
+            raise ValueError
+    elif x[f] == '0' and newf != f+1:
+        raise ValueError
+    return (n, newf+1)
+  
+def decode_string(x, f):
+    colon = x.index(':', f)
+    try:
+        n = int(x[f:colon])
+    except (OverflowError, ValueError):
+        n = long(x[f:colon])
+    if x[f] == '0' and colon != f+1:
+        raise ValueError
+    colon += 1
+    return (x[colon:colon+n], colon+n)
+
+def decode_unicode(x, f):
+    s, f = decode_string(x, f+1)
+    return (s.decode('UTF-8'), f)
+
+def decode_list(x, f):
+    r, f = [], f+1
+    while x[f] != 'e':
+        v, f = decode_func[x[f]](x, f)
+        r.append(v)
+    return (r, f + 1)
+
+def decode_dict(x, f):
+    r, f = {}, f+1
+    lastkey = None
+    while x[f] != 'e':
+        k, f = decode_string(x, f)
+        # Arno, 2008-09-12: uTorrent 1.8 violates the bencoding spec, its keys
+        # in an EXTEND handshake message are not sorted. Be liberal in what we 
+        # receive:
+        ##if lastkey >= k:
+        ##    raise ValueError
+        lastkey = k
+        r[k], f = decode_func[x[f]](x, f)
+    return (r, f + 1)
+
+decode_func = {}
+decode_func['l'] = decode_list
+decode_func['d'] = decode_dict
+decode_func['i'] = decode_int
+decode_func['0'] = decode_string
+decode_func['1'] = decode_string
+decode_func['2'] = decode_string
+decode_func['3'] = decode_string
+decode_func['4'] = decode_string
+decode_func['5'] = decode_string
+decode_func['6'] = decode_string
+decode_func['7'] = decode_string
+decode_func['8'] = decode_string
+decode_func['9'] = decode_string
+#decode_func['u'] = decode_unicode
+  
+def bdecode(x, sloppy = 0):
+    try:
+        r, l = decode_func[x[0]](x, 0)
+#    except (IndexError, KeyError):
+    except (IndexError, KeyError, ValueError):
+        if DEBUG:
+            print_exc()
+        raise ValueError, "bad bencoded data"
+    if not sloppy and l != len(x):
+        raise ValueError, "bad bencoded data"
+    return r
+
+def test_bdecode():
+    try:
+        bdecode('0:0:')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('ie')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i341foo382e')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('i4e') == 4L
+    assert bdecode('i0e') == 0L
+    assert bdecode('i123456789e') == 123456789L
+    assert bdecode('i-10e') == -10L
+    try:
+        bdecode('i-0e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i123')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i6easd')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('35208734823ljdahflajhdf')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('2:abfdjslhfld')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('0:') == ''
+    assert bdecode('3:abc') == 'abc'
+    assert bdecode('10:1234567890') == '1234567890'
+    try:
+        bdecode('02:xy')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('l')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('le') == []
+    try:
+        bdecode('leanfdldjfh')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('l0:0:0:e') == ['', '', '']
+    try:
+        bdecode('relwjhrlewjh')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('li1ei2ei3ee') == [1, 2, 3]
+    assert bdecode('l3:asd2:xye') == ['asd', 'xy']
+    assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
+    try:
+        bdecode('d')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('defoobar')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('de') == {}
+    assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
+    assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
+    try:
+        bdecode('d3:fooe')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('di1e0:e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d1:b0:1:a0:e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d1:a0:1:a0:e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i03e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('l01:ae')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('9999:x')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('l0:')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d0:0:')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d0:')
+        assert 0
+    except ValueError:
+        pass
+
+bencached_marker = []
+
+class Bencached:
+    def __init__(self, s):
+        self.marker = bencached_marker
+        self.bencoded = s
+
+BencachedType = type(Bencached('')) # insufficient, but good as a filter
+
+def encode_bencached(x, r):
+    assert x.marker == bencached_marker
+    r.append(x.bencoded)
+
+def encode_int(x, r):
+    r.extend(('i', str(x), 'e'))
+
+def encode_bool(x, r):
+    encode_int(int(x), r)
+
+def encode_string(x, r):    
+    r.extend((str(len(x)), ':', x))
+
+def encode_unicode(x, r):
+    #r.append('u')
+    encode_string(x.encode('UTF-8'), r)
+
+def encode_list(x, r):
+    r.append('l')
+    for e in x:
+        encode_func[type(e)](e, r)
+    r.append('e')
+
+def encode_dict(x, r):
+    r.append('d')
+    ilist = x.items()
+    ilist.sort()
+    for k, v in ilist:
+        
+        if DEBUG:
+            print >>sys.stderr,"bencode: Encoding",`k`,`v`
+        
+        try:
+            r.extend((str(len(k)), ':', k))
+        except:
+            print >> sys.stderr, "k: %s" % k
+            raise
+            
+        encode_func[type(v)](v, r)
+    r.append('e')
+
+encode_func = {}
+encode_func[BencachedType] = encode_bencached
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[StringType] = encode_string
+encode_func[ListType] = encode_list
+encode_func[TupleType] = encode_list
+encode_func[DictType] = encode_dict
+if BooleanType:
+    encode_func[BooleanType] = encode_bool
+# Arno, 2010-01-27: No more implicit Unicode support.
+# We should disable this now and then to see if the higher layers properly
+# UTF-8 encode their fields before calling bencode    
+if UnicodeType:
+    encode_func[UnicodeType] = encode_unicode
+    
+def bencode(x):
+    r = []
+    try:
+        encode_func[type(x)](x, r)
+    except:
+        print >>sys.stderr,"bencode: *** error *** could not encode type %s (value: %s)" % (type(x), x)
+        print_stack()
+        
+        print_exc()
+        assert 0
+    try:
+        return ''.join(r)
+    except:
+        if DEBUG:
+            print >>sys.stderr,"bencode: join error",x
+            for elem in r:
+                print >>sys.stderr,"elem",elem,"has type",type(elem)
+            print_exc()
+        return ''
+
+def test_bencode():
+    assert bencode(4) == 'i4e'
+    assert bencode(0) == 'i0e'
+    assert bencode(-10) == 'i-10e'
+    assert bencode(12345678901234567890L) == 'i12345678901234567890e'
+    assert bencode('') == '0:'
+    assert bencode('abc') == '3:abc'
+    assert bencode('1234567890') == '10:1234567890'
+    assert bencode([]) == 'le'
+    assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
+    assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
+    assert bencode({}) == 'de'
+    assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
+    assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
+    try:
+        bencode({1: 'foo'})
+        assert 0
+    except AssertionError:
+        pass
+
+  
+try:
+    import psyco
+    psyco.bind(bdecode)
+    psyco.bind(bencode)
+except ImportError:
+    pass
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/bitfield.py b/instrumentation/next-share/BaseLib/Core/BitTornado/bitfield.py
new file mode 100644 (file)
index 0000000..e54d5de
--- /dev/null
@@ -0,0 +1,226 @@
+# Written by Bram Cohen, Uoti Urpala, and John Hoffman
+# see LICENSE.txt for license information
+
+import sys
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+try:
+    sum([1])
+    negsum = lambda a: len(a) - sum(a)
+except:
+    negsum = lambda a: reduce(lambda x, y: x + (not y), a, 0)
+    
+def _int_to_booleans(x):
+    r = []
+    for i in range(8):
+        r.append(bool(x & 0x80))
+        x <<= 1
+    return tuple(r)
+
+lookup_table = []
+reverse_lookup_table = {}
+for i in xrange(256):
+    x = _int_to_booleans(i)
+    lookup_table.append(x)
+    reverse_lookup_table[x] = chr(i)
+
+
+class Bitfield:
+    def __init__(self, length = None, bitstring = None, copyfrom = None, fromarray = None, calcactiveranges=False):
+        """
+        STBSPEED 
+        @param calcactivetanges   Calculate which parts of the piece-space 
+        are non-zero, used an optimization for hooking in whilst live streaming.
+        Only works in combination with bitstring parameter.
+        """
+        
+        self.activeranges = []
+        
+        if copyfrom is not None:
+            self.length = copyfrom.length
+            self.array = copyfrom.array[:]
+            self.numfalse = copyfrom.numfalse
+            return
+        if length is None:
+            raise ValueError, "length must be provided unless copying from another array"
+        self.length = length
+        if bitstring is not None:
+            extra = len(bitstring) * 8 - length
+            if extra < 0 or extra >= 8:
+                raise ValueError
+            t = lookup_table
+            r = []
+            
+            chr0 = chr(0)
+            inrange = False
+            startpiece = 0
+            countpiece = 0
+            for c in bitstring:
+                r.extend(t[ord(c)])
+        
+                # STBSPEED        
+                if calcactiveranges:
+                    if c != chr0:
+                        # Non-zero value, either start or continuation of range
+                        if inrange:
+                            # Stay in activerange
+                            pass
+                        else:
+                            # Start activerange
+                            startpiece = countpiece
+                            inrange = True
+                    else:
+                        # Zero, either end or continuation of zeroness
+                        if inrange:
+                            # End of activerange
+                            self.activeranges.append((startpiece,countpiece))
+                            inrange = False
+                        else:
+                            # Stay in zero
+                            pass
+                    countpiece += 8
+
+            if calcactiveranges:
+                if inrange:
+                    # activerange ended at end of piece space 
+                    self.activeranges.append((startpiece,min(countpiece,self.length-1)))
+                       
+            if extra > 0:
+                if r[-extra:] != [0] * extra:
+                    raise ValueError
+                del r[-extra:]
+            self.array = r
+            self.numfalse = negsum(r)
+            
+        elif fromarray is not None:
+            self.array = fromarray
+            self.numfalse = negsum(self.array)
+        else:
+            self.array = [False] * length
+            self.numfalse = length
+
+    def __setitem__(self, index, val):
+        val = bool(val)
+        self.numfalse += self.array[index]-val
+        self.array[index] = val
+
+    def __getitem__(self, index):
+        return self.array[index]
+
+    def __len__(self):
+        return self.length
+
+    def tostring(self):
+        booleans = self.array
+        t = reverse_lookup_table
+        s = len(booleans) % 8
+        r = [ t[tuple(booleans[x:x+8])] for x in xrange(0, len(booleans)-s, 8) ]
+        if s:
+            r += t[tuple(booleans[-s:] + ([0] * (8-s)))]
+        return ''.join(r)
+
+    def complete(self):
+        return not self.numfalse
+
+    def copy(self):
+        return self.array[:self.length]
+
+    def toboollist(self):
+        bools = [False] * self.length
+        for piece in range(0,self.length):
+            bools[piece] = self.array[piece]
+        return bools
+
+    def get_active_ranges(self):
+        # STBSPEED
+        return self.activeranges
+
+    def get_numtrue(self):
+        return self.length - self.numfalse
+
+
+def test_bitfield():
+    try:
+        x = Bitfield(7, 'ab')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(7, 'ab')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(9, 'abc')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(0, 'a')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(1, '')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(7, '')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(8, '')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(9, 'a')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(7, chr(1))
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(9, chr(0) + chr(0x40))
+        assert False
+    except ValueError:
+        pass
+    assert Bitfield(0, '').tostring() == ''
+    assert Bitfield(1, chr(0x80)).tostring() == chr(0x80)
+    assert Bitfield(7, chr(0x02)).tostring() == chr(0x02)
+    assert Bitfield(8, chr(0xFF)).tostring() == chr(0xFF)
+    assert Bitfield(9, chr(0) + chr(0x80)).tostring() == chr(0) + chr(0x80)
+    x = Bitfield(1)
+    assert x.numfalse == 1
+    x[0] = 1
+    assert x.numfalse == 0
+    x[0] = 1
+    assert x.numfalse == 0
+    assert x.tostring() == chr(0x80)
+    x = Bitfield(7)
+    assert len(x) == 7
+    x[6] = 1
+    assert x.numfalse == 6
+    assert x.tostring() == chr(0x02)
+    x = Bitfield(8)
+    x[7] = 1
+    assert x.tostring() == chr(1)
+    x = Bitfield(9)
+    x[8] = 1
+    assert x.numfalse == 8
+    assert x.tostring() == chr(0) + chr(0x80)
+    x = Bitfield(8, chr(0xC4))
+    assert len(x) == 8
+    assert x.numfalse == 5
+    assert x.tostring() == chr(0xC4)
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/clock.py b/instrumentation/next-share/BaseLib/Core/BitTornado/clock.py
new file mode 100644 (file)
index 0000000..459e1ea
--- /dev/null
@@ -0,0 +1,30 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+import sys
+
+from time import time
+
+_MAXFORWARD = 100
+_FUDGE = 1
+
+class RelativeTime:
+    def __init__(self):
+        self.time = time()
+        self.offset = 0
+
+    def get_time(self):        
+        t = time() + self.offset
+        if t < self.time or t > self.time + _MAXFORWARD:
+            self.time += _FUDGE
+            self.offset += self.time - t
+            return self.time
+        self.time = t
+        return t
+
+if sys.platform != 'win32':
+    _RTIME = RelativeTime()
+    def clock():
+        return _RTIME.get_time()
+else:
+    from time import clock
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/download_bt1.py b/instrumentation/next-share/BaseLib/Core/BitTornado/download_bt1.py
new file mode 100644 (file)
index 0000000..82a71c0
--- /dev/null
@@ -0,0 +1,837 @@
+# Written by Bram Cohen and Pawel Garbacki, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+import os
+import time
+from zurllib import urlopen
+from urlparse import urlparse
+from BT1.btformats import check_message
+from BT1.Choker import Choker
+from BT1.Storage import Storage
+from BT1.StorageWrapper import StorageWrapper
+from BT1.FileSelector import FileSelector
+from BT1.Uploader import Upload
+from BT1.Downloader import Downloader
+from BT1.GetRightHTTPDownloader import GetRightHTTPDownloader
+from BT1.HoffmanHTTPDownloader import HoffmanHTTPDownloader
+from BT1.Connecter import Connecter
+from RateLimiter import RateLimiter
+from BT1.Encrypter import Encoder
+from RawServer import RawServer, autodetect_socket_style
+from BT1.Rerequester import Rerequester
+from BT1.DownloaderFeedback import DownloaderFeedback
+from RateMeasure import RateMeasure
+from CurrentRateMeasure import Measure
+from BT1.PiecePicker import PiecePicker
+from BT1.Statistics import Statistics
+from bencode import bencode, bdecode
+from BaseLib.Core.Utilities.Crypto import sha
+from os import path, makedirs, listdir
+from parseargs import parseargs, formatDefinitions, defaultargs
+from socket import error as socketerror
+from random import seed
+from threading import Event
+from clock import clock
+import re
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.Merkle.merkle import create_fake_hashes
+from BaseLib.Core.Utilities.unicode import bin2unicode, dunno2unicode
+from BaseLib.Core.Video.PiecePickerStreaming import PiecePickerVOD
+# Ric: added svc
+from BaseLib.Core.Video.PiecePickerSVC import PiecePickerSVC
+from BaseLib.Core.Video.SVCTransporter import SVCTransporter
+from BaseLib.Core.Video.VideoOnDemand import MovieOnDemandTransporter
+from BaseLib.Core.APIImplementation.maketorrent import torrentfilerec2savefilename,savefilenames2finaldest
+
+#ProxyService_
+#
+from BaseLib.Core.ProxyService.Coordinator import Coordinator
+from BaseLib.Core.ProxyService.Helper import Helper
+from BaseLib.Core.ProxyService.RatePredictor import ExpSmoothRatePredictor
+import sys
+from traceback import print_exc,print_stack
+#
+#_ProxyService
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+class BT1Download:    
+    def __init__(self, statusfunc, finfunc, errorfunc, excfunc, logerrorfunc, doneflag, 
+                 config, response, infohash, id, rawserver, get_extip_func, port,
+                 videoanalyserpath):
+        self.statusfunc = statusfunc
+        self.finfunc = finfunc
+        self.errorfunc = errorfunc
+        self.excfunc = excfunc
+        self.logerrorfunc = logerrorfunc
+        self.doneflag = doneflag
+        self.config = config
+        self.response = response
+        self.infohash = infohash
+        self.myid = id
+        self.rawserver = rawserver
+        self.get_extip_func = get_extip_func
+        self.port = port
+        self.info = self.response['info']
+        
+        # Merkle: Create list of fake hashes. This will be filled if we're an
+        # initial seeder
+        if self.info.has_key('root hash') or self.info.has_key('live'):
+            self.pieces = create_fake_hashes(self.info)
+        else:
+            self.pieces = [self.info['pieces'][x:x+20]
+                           for x in xrange(0, len(self.info['pieces']), 20)]
+        self.len_pieces = len(self.pieces)
+        self.piecesize = self.info['piece length']
+        self.unpauseflag = Event()
+        self.unpauseflag.set()
+        self.downloader = None
+        self.storagewrapper = None
+        self.fileselector = None
+        self.super_seeding_active = False
+        self.filedatflag = Event()
+        self.spewflag = Event()
+        self.superseedflag = Event()
+        self.whenpaused = None
+        self.finflag = Event()
+        self.rerequest = None
+        self.tcp_ack_fudge = config['tcp_ack_fudge']
+        # Ric added SVC case
+        self.svc_video = (config['mode'] == DLMODE_SVC)
+        self.play_video = (config['mode'] == DLMODE_VOD)
+        self.am_video_source = bool(config['video_source'])
+        # i.e. if VOD then G2G, if live then BT 
+        self.use_g2g = self.play_video and not ('live' in response['info'])
+        self.videoinfo = None
+        self.videoanalyserpath = videoanalyserpath
+        self.voddownload = None
+        
+
+        self.selector_enabled = config['selector_enabled']
+
+        self.excflag = self.rawserver.get_exception_flag()
+        self.failed = False
+        self.checking = False
+        self.started = False
+
+        # ProxyService_
+        #
+        self.helper = None
+        self.coordinator = None
+        self.rate_predictor = None
+        #
+        # _ProxyService
+
+        # 2fastbt_
+        try:
+            
+            if self.config['download_help']:
+                if DEBUG:
+                    print >>sys.stderr,"BT1Download: coopdl_role is",self.config['coopdl_role'],`self.config['coopdl_coordinator_permid']`
+                
+                if self.config['coopdl_role'] == COOPDL_ROLE_COORDINATOR:
+                    from BaseLib.Core.ProxyService.Coordinator import Coordinator
+                    
+                    self.coordinator = Coordinator(self.infohash, self.len_pieces)
+                #if self.config['coopdl_role'] == COOPDL_ROLE_COORDINATOR or self.config['coopdl_role'] == COOPDL_ROLE_HELPER:
+                # Arno, 2008-05-20: removed Helper when coordinator, shouldn't need it.
+                # Reason to remove it is because it messes up PiecePicking: when a 
+                # helper, it calls _next() again after it returned None, probably
+                # to provoke a RESERVE_PIECE request to the coordinator.
+                # This change passes test_dlhelp.py
+                #
+                if self.config['coopdl_role'] == COOPDL_ROLE_HELPER:
+                    from BaseLib.Core.ProxyService.Helper import Helper
+                    
+                    self.helper = Helper(self.infohash, self.len_pieces, self.config['coopdl_coordinator_permid'], coordinator = self.coordinator)
+                    self.config['coopdl_role'] = ''
+                    self.config['coopdl_coordinator_permid'] = ''
+
+
+            if self.am_video_source:
+                from BaseLib.Core.Video.VideoSource import PiecePickerSource
+
+                self.picker = PiecePickerSource(self.len_pieces, config['rarest_first_cutoff'], 
+                             config['rarest_first_priority_cutoff'], helper = self.helper, coordinator = self.coordinator)
+            elif self.play_video:
+                # Jan-David: Start video-on-demand service
+                self.picker = PiecePickerVOD(self.len_pieces, config['rarest_first_cutoff'], 
+                             config['rarest_first_priority_cutoff'], helper = self.helper, coordinator = self.coordinator, piecesize=self.piecesize)
+            elif self.svc_video:
+                # Ric: Start SVC VoD service TODO
+                self.picker = PiecePickerSVC(self.len_pieces, config['rarest_first_cutoff'], 
+                             config['rarest_first_priority_cutoff'], helper = self.helper, coordinator = self.coordinator, piecesize=self.piecesize)
+            else:
+                self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'], 
+                             config['rarest_first_priority_cutoff'], helper = self.helper, coordinator = self.coordinator)
+        except:
+            print_exc()
+            print >> sys.stderr,"BT1Download: EXCEPTION in __init__ :'" + str(sys.exc_info()) + "' '"
+# _2fastbt
+
+        self.choker = Choker(config, rawserver.add_task, 
+                             self.picker, self.finflag.isSet)
+
+        #print >>sys.stderr,"download_bt1.BT1Download: play_video is",self.play_video
+
+    def set_videoinfo(self,videoinfo,videostatus):
+        self.videoinfo = videoinfo
+        self.videostatus = videostatus
+
+        # Ric: added svc case
+        if self.play_video or self.svc_video:
+            self.picker.set_videostatus( self.videostatus )
+
+    def checkSaveLocation(self, loc):
+        if self.info.has_key('length'):
+            return path.exists(loc)
+        for x in self.info['files']:
+            if path.exists(path.join(loc, x['path'][0])):
+                return True
+        return False
+                
+
+    def saveAs(self, filefunc, pathfunc = None):
+        """ Now throws Exceptions """
+        def make(f, forcedir = False):
+            if not forcedir:
+                f = path.split(f)[0]
+            if f != '' and not path.exists(f):
+                makedirs(f)
+
+        if self.info.has_key('length'):
+            file_length = self.info['length']
+            file = filefunc(self.info['name'], file_length, 
+                            self.config['saveas'], False)
+            # filefunc throws exc if filename gives IOError
+
+            make(file)
+            files = [(file, file_length)]
+        else:
+            file_length = 0L
+            for x in self.info['files']:
+                file_length += x['length']
+            file = filefunc(self.info['name'], file_length, 
+                            self.config['saveas'], True)
+            # filefunc throws exc if filename gives IOError
+
+            # if this path exists, and no files from the info dict exist, we assume it's a new download and 
+            # the user wants to create a new directory with the default name
+            existing = 0
+            if path.exists(file):
+                if not path.isdir(file):
+                    raise IOError(file + 'is not a dir')
+                if listdir(file):  # if it's not empty
+                    for x in self.info['files']:
+                        savepath1 = torrentfilerec2savefilename(x,1)
+                        if path.exists(path.join(file, savepath1)):
+                            existing = 1
+                    if not existing:
+                        try:
+                            file = path.join(file, self.info['name'])
+                        except UnicodeDecodeError:
+                            file = path.join(file, dunno2unicode(self.info['name']))
+                        if path.exists(file) and not path.isdir(file):
+                            if file.endswith('.torrent') or file.endswith(TRIBLER_TORRENT_EXT):
+                                (prefix,ext) = os.path.splitext(file)
+                                file = prefix
+                            if path.exists(file) and not path.isdir(file):
+                                raise IOError("Can't create dir - " + self.info['name'])
+            make(file, True)
+
+            # alert the UI to any possible change in path
+            if pathfunc != None:
+                pathfunc(file)
+
+            files = []
+            for x in self.info['files']:
+                savepath = torrentfilerec2savefilename(x)
+                full = savefilenames2finaldest(file,savepath)
+                # Arno: TODO: this sometimes gives too long filenames for 
+                # Windows. When fixing this take into account that 
+                # Download.get_dest_files() should still produce the same
+                # filenames as your modifications here.
+                files.append((full, x['length']))
+                make(full)
+
+        self.filename = file
+        self.files = files
+        self.datalength = file_length
+        
+        if DEBUG:
+            print >>sys.stderr,"BT1Download: saveas returning ",`file`,"self.files is",`self.files`
+                
+        return file
+
+    def getFilename(self):
+        return self.filename
+
+    def get_dest(self,index):
+        return self.files[index][0]
+
+    def get_datalength(self):
+        return self.datalength 
+
+    def _finished(self):
+        self.finflag.set()
+        try:
+            self.storage.set_readonly()
+        except (IOError, OSError), e:
+            self.errorfunc('trouble setting readonly at end - ' + str(e))
+        if self.superseedflag.isSet():
+            self._set_super_seed()
+        self.choker.set_round_robin_period(
+            max( self.config['round_robin_period'],
+                 self.config['round_robin_period'] *
+                                     self.info['piece length'] / 200000 ) )
+        self.rerequest_complete()
+        self.finfunc()
+
+    def _data_flunked(self, amount, index):
+        self.ratemeasure_datarejected(amount)
+        if not self.doneflag.isSet():
+            self.logerrorfunc('piece %d failed hash check, re-downloading it' % index)
+
+    def _piece_from_live_source(self,index,data):
+        if self.videostatus.live_streaming and self.voddownload is not None:
+            return self.voddownload.piece_from_live_source(index,data)
+        else:
+            return True
+
+    def _failed(self, reason):
+        self.failed = True
+        self.doneflag.set()
+        if reason is not None:
+            self.errorfunc(reason)
+        
+
+    def initFiles(self, old_style = False, statusfunc = None, resumedata = None):
+        """ Now throws exceptions """
+        if self.doneflag.isSet():
+            return None
+        if not statusfunc:
+            statusfunc = self.statusfunc
+
+        disabled_files = None
+        if self.selector_enabled:
+            self.priority = self.config['priority']
+            if self.priority:
+                try:
+                    self.priority = self.priority.split(',')
+                    assert len(self.priority) == len(self.files)
+                    self.priority = [int(p) for p in self.priority]
+                    for p in self.priority:
+                        assert p >= -1
+                        assert p <= 2
+                except:
+                    raise ValueError('bad priority list given, ignored')
+                    self.priority = None
+            try:
+                disabled_files = [x == -1 for x in self.priority]
+            except:
+                pass
+
+        self.storage = Storage(self.files, self.info['piece length'], 
+                               self.doneflag, self.config, disabled_files)
+
+        # Merkle: Are we dealing with a Merkle torrent y/n?
+        if self.info.has_key('root hash'):
+            root_hash = self.info['root hash']
+        else:
+            root_hash = None
+        self.storagewrapper = StorageWrapper(self.videoinfo, self.storage, self.config['download_slice_size'],
+            self.pieces, self.info['piece length'], root_hash, 
+            self._finished, self._failed,
+            statusfunc, self.doneflag, self.config['check_hashes'],
+            self._data_flunked, self._piece_from_live_source, self.rawserver.add_task,
+            self.config, self.unpauseflag)
+            
+        if self.selector_enabled:
+            self.fileselector = FileSelector(self.files, self.info['piece length'], 
+                                             None, 
+                                             self.storage, self.storagewrapper, 
+                                             self.rawserver.add_task, 
+                                             self._failed)
+
+            if resumedata:
+                self.fileselector.unpickle(resumedata)
+                
+        self.checking = True
+        if old_style:
+            return self.storagewrapper.old_style_init()
+        return self.storagewrapper.initialize
+
+
+    def _make_upload(self, connection, ratelimiter, totalup):
+        return Upload(connection, ratelimiter, totalup, 
+                      self.choker, self.storagewrapper, self.picker, 
+                      self.config)
+
+    def _kick_peer(self, connection):
+        def k(connection = connection):
+            connection.close()
+        self.rawserver.add_task(k, 0)
+
+    def _ban_peer(self, ip):
+        self.encoder_ban(ip)
+
+    def _received_raw_data(self, x):
+        if self.tcp_ack_fudge:
+            x = int(x*self.tcp_ack_fudge)
+            self.ratelimiter.adjust_sent(x)
+#            self.upmeasure.update_rate(x)
+
+    def _received_data(self, x):
+        self.downmeasure.update_rate(x)
+        self.ratemeasure.data_came_in(x)
+
+    def _received_http_data(self, x):
+        self.downmeasure.update_rate(x)
+        self.ratemeasure.data_came_in(x)
+        self.downloader.external_data_received(x)
+
+    def _cancelfunc(self, pieces):
+        self.downloader.cancel_piece_download(pieces)
+        self.ghttpdownloader.cancel_piece_download(pieces)
+        self.hhttpdownloader.cancel_piece_download(pieces)
+    def _reqmorefunc(self, pieces):
+        self.downloader.requeue_piece_download(pieces)
+
+    def startEngine(self, ratelimiter = None, vodeventfunc = None):
+        
+        if DEBUG:
+            print >>sys.stderr,"BT1Download: startEngine",`self.info['name']`
+        
+        if self.doneflag.isSet():
+            return
+        
+        self.checking = False
+
+        # Arno, 2010-08-11: STBSPEED: if at all, loop only over pieces I have, 
+        # not piece range.
+        completeondisk = (self.storagewrapper.get_amount_left() == 0)
+        if DEBUG:
+            print >>sys.stderr,"BT1Download: startEngine: complete on disk?",completeondisk,"found",len(self.storagewrapper.get_pieces_on_disk_at_startup())
+        self.picker.fast_initialize(completeondisk)
+        if not completeondisk:
+            for i in self.storagewrapper.get_pieces_on_disk_at_startup(): # empty when completeondisk
+                self.picker.complete(i)
+            
+        self.upmeasure = Measure(self.config['max_rate_period'], 
+                            self.config['upload_rate_fudge'])
+        self.downmeasure = Measure(self.config['max_rate_period'])
+
+        if ratelimiter:
+            self.ratelimiter = ratelimiter
+        else:
+            self.ratelimiter = RateLimiter(self.rawserver.add_task, 
+                                           self.config['upload_unit_size'], 
+                                           self.setConns)
+            self.ratelimiter.set_upload_rate(self.config['max_upload_rate'])
+        
+        self.ratemeasure = RateMeasure()
+        self.ratemeasure_datarejected = self.ratemeasure.data_rejected
+
+        self.downloader = Downloader(self.infohash, self.storagewrapper, self.picker, 
+            self.config['request_backlog'], self.config['max_rate_period'], 
+            self.len_pieces, self.config['download_slice_size'], 
+            self._received_data, self.config['snub_time'], self.config['auto_kick'], 
+            self._kick_peer, self._ban_peer, scheduler = self.rawserver.add_task)
+        self.downloader.set_download_rate(self.config['max_download_rate'])
+
+        self.picker.set_downloader(self.downloader)
+# 2fastbt_
+        if self.coordinator is not None:
+            self.coordinator.set_downloader(self.downloader)
+
+        self.connecter = Connecter(self.response, self._make_upload, self.downloader, self.choker, 
+                            self.len_pieces, self.piecesize, self.upmeasure, self.config, 
+                            self.ratelimiter, self.info.has_key('root hash'),
+                            self.rawserver.add_task, self.coordinator, self.helper, self.get_extip_func, self.port, self.use_g2g,self.infohash,self.response.get('announce',None),self.info.has_key('live'))
+# _2fastbt
+        self.encoder = Encoder(self.connecter, self.rawserver, 
+            self.myid, self.config['max_message_length'], self.rawserver.add_task, 
+            self.config['keepalive_interval'], self.infohash, 
+            self._received_raw_data, self.config)
+        self.encoder_ban = self.encoder.ban
+        if "initial peers" in self.response:
+            if DEBUG:
+                print >> sys.stderr, "BT1Download: startEngine: Using initial peers", self.response["initial peers"]
+            self.encoder.start_connections([(address, 0) for address in self.response["initial peers"]])
+#--- 2fastbt_
+        if DEBUG:
+            print str(self.config['exclude_ips'])
+        for ip in self.config['exclude_ips']:
+            if DEBUG:
+                print >>sys.stderr,"BT1Download: startEngine: Banning ip: " + str(ip)
+            self.encoder_ban(ip)
+
+        if self.helper is not None:
+            from BaseLib.Core.ProxyService.RatePredictor import ExpSmoothRatePredictor
+
+            self.helper.set_encoder(self.encoder)
+            self.rate_predictor = ExpSmoothRatePredictor(self.rawserver, 
+                self.downmeasure, self.config['max_download_rate'])
+            self.picker.set_rate_predictor(self.rate_predictor)
+            self.rate_predictor.update()
+# _2fastbt
+
+        self.ghttpdownloader = GetRightHTTPDownloader(self.storagewrapper, self.picker, 
+            self.rawserver, self.finflag, self.logerrorfunc, self.downloader, 
+            self.config['max_rate_period'], self.infohash, self._received_http_data, 
+            self.connecter.got_piece)
+        if self.response.has_key('url-list') and not self.finflag.isSet():
+            for u in self.response['url-list']:
+                self.ghttpdownloader.make_download(u)
+
+        self.hhttpdownloader = HoffmanHTTPDownloader(self.storagewrapper, self.picker, 
+            self.rawserver, self.finflag, self.logerrorfunc, self.downloader, 
+            self.config['max_rate_period'], self.infohash, self._received_http_data, 
+            self.connecter.got_piece)
+        if self.response.has_key('httpseeds') and not self.finflag.isSet():
+            for u in self.response['httpseeds']:
+                self.hhttpdownloader.make_download(u)
+
+        if self.selector_enabled:
+            self.fileselector.tie_in(self.picker, self._cancelfunc, self._reqmorefunc)
+            if self.priority:
+                self.fileselector.set_priorities_now(self.priority)
+                                # erase old data once you've started modifying it
+
+        # Ric: added svc case TODO check with play_video
+        if self.svc_video:
+            if self.picker.am_I_complete():
+                # TODO do something
+                pass
+            self.voddownload = SVCTransporter(self,self.videostatus,self.videoinfo,self.videoanalyserpath,vodeventfunc)
+        
+        elif self.play_video:
+            if self.picker.am_I_complete():
+                if DEBUG:
+                    print >>sys.stderr,"BT1Download: startEngine: VOD requested, but file complete on disk",self.videoinfo
+                # Added bitrate parameter for html5 playback
+                vodeventfunc( self.videoinfo, VODEVENT_START, {
+                    "complete":  True,
+                    "filename":  self.videoinfo["outpath"],
+                    "mimetype":  self.videoinfo["mimetype"],
+                    "stream":    None,
+                    "length":    self.videostatus.selected_movie["size"],
+                    "bitrate":   self.videoinfo["bitrate"]
+                } )
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"BT1Download: startEngine: Going into VOD mode",self.videoinfo
+
+                self.voddownload = MovieOnDemandTransporter(self,self.videostatus,self.videoinfo,self.videoanalyserpath,vodeventfunc,self.ghttpdownloader)
+        elif DEBUG:
+            print >>sys.stderr,"BT1Download: startEngine: Going into standard mode"
+
+        if self.am_video_source:
+            from BaseLib.Core.Video.VideoSource import VideoSourceTransporter,RateLimitedVideoSourceTransporter
+
+            if DEBUG:
+                print >>sys.stderr,"BT1Download: startEngine: Acting as VideoSource"
+            if self.config['video_ratelimit']:
+                self.videosourcetransporter = RateLimitedVideoSourceTransporter(self.config['video_ratelimit'],self.config['video_source'],self,self.config['video_source_authconfig'],self.config['video_source_restartstatefilename'])
+            else:
+                self.videosourcetransporter = VideoSourceTransporter(self.config['video_source'],self,self.config['video_source_authconfig'],self.config['video_source_restartstatefilename'])
+            self.videosourcetransporter.start()
+        elif DEBUG:
+            print >>sys.stderr,"BT1Download: startEngine: Not a VideoSource"
+            
+        if not self.doneflag.isSet():
+            self.started = True
+
+    def rerequest_complete(self):
+        if self.rerequest:
+            self.rerequest.announce(1)
+
+    def rerequest_stopped(self):
+        if self.rerequest:
+            self.rerequest.announce(2)
+
+    def rerequest_lastfailed(self):
+        if self.rerequest:
+            return self.rerequest.last_failed
+        return False
+    
+    def startRerequester(self, paused=False):
+        # RePEX:
+        # Moved the creation of the Rerequester to a separate method,
+        # allowing us to only create the Rerequester without starting
+        # it from SingleDownload.
+        if self.rerequest is None:
+            self.rerequest = self.createRerequester()
+            self.encoder.set_rerequester(self.rerequest)
+        
+        if not paused:
+            self.rerequest.start()
+            
+        
+    def createRerequester(self, callback=None):
+        if self.response.has_key ('announce-list'):
+            trackerlist = self.response['announce-list']
+            for tier in range(len(trackerlist)):
+                for t in range(len(trackerlist[tier])):
+                    trackerlist[tier][t] = bin2unicode(trackerlist[tier][t])
+        else:
+            tracker = bin2unicode(self.response.get('announce', ''))
+            if tracker:
+                trackerlist = [[tracker]]
+            else:
+                trackerlist = [[]]
+
+        if callback is None:
+            callback = self.encoder.start_connections
+
+        rerequest = Rerequester(trackerlist, self.config['rerequest_interval'], 
+            self.rawserver.add_task,self.connecter.how_many_connections, 
+            self.config['min_peers'], callback, 
+            self.rawserver.add_task, self.storagewrapper.get_amount_left, 
+            self.upmeasure.get_total, self.downmeasure.get_total, self.port, self.config['ip'], 
+            self.myid, self.infohash, self.config['http_timeout'], 
+            self.logerrorfunc, self.excfunc, self.config['max_initiate'], 
+            self.doneflag, self.upmeasure.get_rate, self.downmeasure.get_rate, 
+            self.unpauseflag,self.config)
+
+        if self.play_video and self.voddownload is not None:
+            rerequest.add_notifier( lambda x: self.voddownload.peers_from_tracker_report( len( x ) ) )
+
+        return rerequest
+
+
+    def _init_stats(self):
+        self.statistics = Statistics(self.upmeasure, self.downmeasure, 
+                    self.connecter, self.ghttpdownloader, self.hhttpdownloader, self.ratelimiter, 
+                    self.rerequest_lastfailed, self.filedatflag)
+        if self.info.has_key('files'):
+            self.statistics.set_dirstats(self.files, self.info['piece length'])
+
+    def autoStats(self, displayfunc = None):
+        if not displayfunc:
+            displayfunc = self.statusfunc
+
+        self._init_stats()
+        DownloaderFeedback(self.choker, self.ghttpdownloader, self.hhttpdownloader, self.rawserver.add_task, 
+            self.upmeasure.get_rate, self.downmeasure.get_rate, 
+            self.ratemeasure, self.storagewrapper.get_stats, 
+            self.datalength, self.finflag, self.spewflag, self.statistics, 
+            displayfunc, self.config['display_interval'], 
+            infohash = self.infohash,voddownload=self.voddownload)
+
+    def startStats(self):
+        self._init_stats()
+        self.spewflag.set()    # start collecting peer cache
+        d = DownloaderFeedback(self.choker, self.ghttpdownloader, self.hhttpdownloader, self.rawserver.add_task, 
+            self.upmeasure.get_rate, self.downmeasure.get_rate, 
+            self.ratemeasure, self.storagewrapper.get_stats, 
+            self.datalength, self.finflag, self.spewflag, self.statistics, 
+            infohash = self.infohash,voddownload=self.voddownload)
+        return d.gather
+
+
+    def getPortHandler(self):
+        return self.encoder
+
+
+    def checkpoint(self): # Added by Arno
+        """ Called by network thread """
+        if self.fileselector and self.started:
+            # self.fileselector.finish() does nothing at the moment, so as
+            # long as the network thread calls this, it should be OK.
+            return self.fileselector.pickle()
+        else:
+            return None
+
+    def shutdown(self):
+        if self.checking or self.started:
+            self.storagewrapper.sync()
+            self.storage.close()
+            self.rerequest_stopped()
+        resumedata = None
+        if self.fileselector and self.started:
+            if not self.failed:
+                self.fileselector.finish()
+                resumedata = self.fileselector.pickle()
+        if self.voddownload is not None:
+            self.voddownload.stop()
+        return resumedata
+
+
+    def setUploadRate(self, rate, networkcalling=False):
+        try:
+            def s(self = self, rate = rate):
+                if DEBUG:
+                    print >>sys.stderr,"BT1Download: set max upload to",rate
+                self.config['max_upload_rate'] = rate
+                self.ratelimiter.set_upload_rate(rate)
+            if networkcalling:
+                s()
+            else:
+                self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def setConns(self, conns, conns2 = None,networkcalling=False):
+        if not conns2:
+            conns2 = conns
+        try:
+            def s(self = self, conns = conns, conns2 = conns2):
+                self.config['min_uploads'] = conns
+                self.config['max_uploads'] = conns2
+                if (conns > 30):
+                    self.config['max_initiate'] = conns + 10
+            if networkcalling:
+                s()
+            else:
+                self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+        
+    def setDownloadRate(self, rate,networkcalling=False):
+        try:
+            def s(self = self, rate = rate):
+                self.config['max_download_rate'] = rate
+                self.downloader.set_download_rate(rate)
+            if networkcalling:
+                s()
+            else:
+                self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def startConnection(self, ip, port, id):
+        self.encoder._start_connection((ip, port), id)
+      
+    def _startConnection(self, ipandport, id):
+        self.encoder._start_connection(ipandport, id)
+        
+    def setInitiate(self, initiate,networkcalling=False):
+        try:
+            def s(self = self, initiate = initiate):
+                self.config['max_initiate'] = initiate
+            if networkcalling:
+                s()
+            else:
+                self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def setMaxConns(self,nconns,networkcalling=False):
+        try:
+            def s(self = self, nconns = nconns):
+                self.config['max_connections'] = nconns
+            if networkcalling:
+                s()
+            else:
+                self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+
+    def getConfig(self):
+        return self.config
+
+    def reannounce(self, special = None):
+        try:
+            def r(self = self, special = special):
+                if special is None:
+                    self.rerequest.announce()
+                else:
+                    self.rerequest.announce(specialurl = special)
+            self.rawserver.add_task(r)
+        except AttributeError:
+            pass
+
+    def getResponse(self):
+        try:
+            return self.response
+        except:
+            return None
+
+#    def Pause(self):
+#        try:
+#            if self.storagewrapper:
+#                self.rawserver.add_task(self._pausemaker, 0)
+#        except:
+#            return False
+#        self.unpauseflag.clear()
+#        return True
+#
+#    def _pausemaker(self):
+#        self.whenpaused = clock()
+#        self.unpauseflag.wait()   # sticks a monkey wrench in the main thread
+#
+#    def Unpause(self):
+#        self.unpauseflag.set()
+#        if self.whenpaused and clock()-self.whenpaused > 60:
+#            def r(self = self):
+#                self.rerequest.announce(3)      # rerequest automatically if paused for >60 seconds
+#            self.rawserver.add_task(r)
+
+    def Pause(self):
+        if not self.storagewrapper:
+            return False
+        self.unpauseflag.clear()
+        self.rawserver.add_task(self.onPause)
+        return True
+
+    def onPause(self):
+        self.whenpaused = clock()
+        if not self.downloader:
+            return
+        self.downloader.pause(True)
+        self.encoder.pause(True)
+        self.choker.pause(True)
+
+    def Unpause(self):
+        self.unpauseflag.set()
+        self.rawserver.add_task(self.onUnpause)
+
+    def onUnpause(self):
+        if not self.downloader:
+            return
+        self.downloader.pause(False)
+        self.encoder.pause(False)
+        self.choker.pause(False)
+        if self.rerequest and self.whenpaused and clock()-self.whenpaused > 60:
+            self.rerequest.announce(3)      # rerequest automatically if paused for >60 seconds
+
+    def set_super_seed(self,networkcalling=False):
+        self.superseedflag.set()
+        if networkcalling:
+            self._set_super_seed()
+        else:
+            self.rawserver.add_task(self._set_super_seed)
+
+    def _set_super_seed(self):
+        if not self.super_seeding_active and self.finflag.isSet():
+            self.super_seeding_active = True
+            self.logerrorfunc('        ** SUPER-SEED OPERATION ACTIVE **\n' +
+                           '  please set Max uploads so each peer gets 6-8 kB/s')
+            def s(self = self):
+                self.downloader.set_super_seed()
+                self.choker.set_super_seed()
+            self.rawserver.add_task(s)
+            if self.finflag.isSet():        # mode started when already finished
+                def r(self = self):
+                    self.rerequest.announce(3)  # so after kicking everyone off, reannounce
+                self.rawserver.add_task(r)
+
+    def am_I_finished(self):
+        return self.finflag.isSet()
+
+    def get_transfer_stats(self):
+        return self.upmeasure.get_total(), self.downmeasure.get_total()
+
+    def get_moviestreamtransport(self):
+        return self.voddownload
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/inifile.py b/instrumentation/next-share/BaseLib/Core/BitTornado/inifile.py
new file mode 100644 (file)
index 0000000..4802f0e
--- /dev/null
@@ -0,0 +1,169 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+__fool_epydoc = 481
+'''
+reads/writes a Windows-style INI file
+format:
+
+  aa = "bb"
+  cc = 11
+
+  [eee]
+  ff = "gg"
+
+decodes to:
+d = { '': {'aa':'bb','cc':'11'}, 'eee': {'ff':'gg'} }
+
+the encoder can also take this as input:
+
+d = { 'aa': 'bb, 'cc': 11, 'eee': {'ff':'gg'} }
+
+though it will only decode in the above format.  Keywords must be strings.
+Values that are strings are written surrounded by quotes, and the decoding
+routine automatically strips any.
+Booleans are written as integers.  Anything else aside from string/int/float
+may have unpredictable results.
+'''
+
+from traceback import print_exc
+from types import DictType, StringType
+try:
+    from types import BooleanType
+except ImportError:
+    BooleanType = None
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+def ini_write(f, d, comment=''):
+    try:
+        a = {'':{}}
+        for k, v in d.items():
+            assert type(k) == StringType
+            k = k.lower()
+            if type(v) == DictType:
+                if DEBUG:
+                    print 'new section:' +k
+                if k:
+                    assert not a.has_key(k)
+                    a[k] = {}
+                aa = a[k]
+                for kk, vv in v:
+                    assert type(kk) == StringType
+                    kk = kk.lower()
+                    assert not aa.has_key(kk)
+                    if type(vv) == BooleanType:
+                        vv = int(vv)
+                    if type(vv) == StringType:
+                        vv = '"'+vv+'"'
+                    aa[kk] = str(vv)
+                    if DEBUG:
+                        print 'a['+k+']['+kk+'] = '+str(vv)
+            else:
+                aa = a['']
+                assert not aa.has_key(k)
+                if type(v) == BooleanType:
+                    v = int(v)
+                if type(v) == StringType:
+                    v = '"'+v+'"'
+                aa[k] = str(v)
+                if DEBUG:
+                    print 'a[\'\']['+k+'] = '+str(v)
+        r = open(f, 'w')
+        if comment:
+            for c in comment.split('\n'):
+                r.write('# '+c+'\n')
+            r.write('\n')
+        l = a.keys()
+        l.sort()
+        for k in l:
+            if k:
+                r.write('\n['+k+']\n')
+            aa = a[k]
+            ll = aa.keys()
+            ll.sort()
+            for kk in ll:
+                r.write(kk+' = '+aa[kk]+'\n')
+        success = True
+    except:
+        if DEBUG:
+            print_exc()
+        success = False
+    try:
+        r.close()
+    except:
+        pass
+    return success
+
+
+if DEBUG:
+    def errfunc(lineno, line, err):
+        print '('+str(lineno)+') '+err+': '+line
+else:
+    errfunc = lambda lineno, line, err: None
+
+def ini_read(f, errfunc = errfunc):
+    try:
+        r = open(f, 'r')
+        ll = r.readlines()
+        d = {}
+        dd = {'':d}
+        for i in xrange(len(ll)):
+            l = ll[i]
+            l = l.strip()
+            if not l:
+                continue
+            if l[0] == '#':
+                continue
+            if l[0] == '[':
+                if l[-1] != ']':
+                    errfunc(i, l, 'syntax error')
+                    continue
+                l1 = l[1:-1].strip().lower()
+                if not l1:
+                    errfunc(i, l, 'syntax error')
+                    continue
+                if dd.has_key(l1):
+                    errfunc(i, l, 'duplicate section')
+                    d = dd[l1]
+                    continue
+                d = {}
+                dd[l1] = d
+                continue
+            try:
+                k, v = l.split('=', 1)
+            except:
+                try:
+                    k, v = l.split(':', 1)
+                except:
+                    errfunc(i, l, 'syntax error')
+                    continue
+            k = k.strip().lower()
+            v = v.strip()
+            if len(v) > 1 and ( (v[0] == '"' and v[-1] == '"') or
+                                (v[0] == "'" and v[-1] == "'") ):
+                v = v[1:-1]
+            if not k:
+                errfunc(i, l, 'syntax error')
+                continue
+            if d.has_key(k):
+                errfunc(i, l, 'duplicate entry')
+                continue
+            d[k] = v
+        if DEBUG:
+            print dd
+    except:
+        if DEBUG:
+            print_exc()
+        dd = None
+    try:
+        r.close()
+    except:
+        pass
+    return dd
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/iprangeparse.py b/instrumentation/next-share/BaseLib/Core/BitTornado/iprangeparse.py
new file mode 100644 (file)
index 0000000..9304c39
--- /dev/null
@@ -0,0 +1,194 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from bisect import bisect, insort
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+
+def to_long_ipv4(ip):
+    ip = ip.split('.')
+    if len(ip) != 4:
+        raise ValueError, "bad address"
+    b = 0L
+    for n in ip:
+        b *= 256
+        b += int(n)
+    return b
+
+
+def to_long_ipv6(ip):
+    if not ip:
+        raise ValueError, "bad address"
+    if ip == '::':      # boundary handling
+        ip = ''
+    elif ip[:2] == '::':
+        ip = ip[1:]
+    elif ip[0] == ':':
+        raise ValueError, "bad address"
+    elif ip[-2:] == '::':
+        ip = ip[:-1]
+    elif ip[-1] == ':':
+        raise ValueError, "bad address"
+
+    b = []
+    doublecolon = False
+    for n in ip.split(':'):
+        if n == '':     # double-colon
+            if doublecolon:
+                raise ValueError, "bad address"
+            doublecolon = True
+            b.append(None)
+            continue
+        if n.find('.') >= 0: # IPv4
+            n = n.split('.')
+            if len(n) != 4:
+                raise ValueError, "bad address"
+            for i in n:
+                b.append(int(i))
+            continue
+        n = ('0'*(4-len(n))) + n
+        b.append(int(n[:2], 16))
+        b.append(int(n[2:], 16))
+    bb = 0L
+    for n in b:
+        if n is None:
+            for i in xrange(17-len(b)):
+                bb *= 256
+            continue
+        bb *= 256
+        bb += n
+    return bb
+
+ipv4addrmask = 65535L*256*256*256*256
+
+class IP_List:
+    def __init__(self):
+        self.ipv4list = []  # starts of ranges
+        self.ipv4dict = {}  # start: end of ranges
+        self.ipv6list = []  # "
+        self.ipv6dict = {}  # "
+
+    def __nonzero__(self):
+        return bool(self.ipv4list or self.ipv6list)
+
+
+    def append(self, ip_beg, ip_end = None):
+        if ip_end is None:
+            ip_end = ip_beg
+        else:
+            assert ip_beg <= ip_end
+        if ip_beg.find(':') < 0:        # IPv4
+            ip_beg = to_long_ipv4(ip_beg)
+            ip_end = to_long_ipv4(ip_end)
+            l = self.ipv4list
+            d = self.ipv4dict
+        else:
+            ip_beg = to_long_ipv6(ip_beg)
+            ip_end = to_long_ipv6(ip_end)
+            bb = ip_beg % (256*256*256*256)
+            if bb == ipv4addrmask:
+                ip_beg -= bb
+                ip_end -= bb
+                l = self.ipv4list
+                d = self.ipv4dict
+            else:
+                l = self.ipv6list
+                d = self.ipv6dict
+
+        pos = bisect(l, ip_beg)-1
+        done = pos < 0
+        while not done:
+            p = pos
+            while p < len(l):
+                range_beg = l[p]
+                if range_beg > ip_end+1:
+                    done = True
+                    break
+                range_end = d[range_beg]
+                if range_end < ip_beg-1:
+                    p += 1
+                    if p == len(l):
+                        done = True
+                        break
+                    continue
+                # if neither of the above conditions is true, the ranges overlap
+                ip_beg = min(ip_beg, range_beg)
+                ip_end = max(ip_end, range_end)
+                del l[p]
+                del d[range_beg]
+                break
+
+        insort(l, ip_beg)
+        d[ip_beg] = ip_end
+
+
+    def includes(self, ip):
+        if not (self.ipv4list or self.ipv6list):
+            return False
+        if ip.find(':') < 0:        # IPv4
+            ip = to_long_ipv4(ip)
+            l = self.ipv4list
+            d = self.ipv4dict
+        else:
+            ip = to_long_ipv6(ip)
+            bb = ip % (256*256*256*256)
+            if bb == ipv4addrmask:
+                ip -= bb
+                l = self.ipv4list
+                d = self.ipv4dict
+            else:
+                l = self.ipv6list
+                d = self.ipv6dict
+        for ip_beg in l[bisect(l, ip)-1:]:
+            if ip == ip_beg:
+                return True
+            ip_end = d[ip_beg]
+            if ip > ip_beg and ip <= ip_end:
+                return True
+        return False
+
+
+    # reads a list from a file in the format 'whatever:whatever:ip-ip'
+    # (not IPv6 compatible at all)
+    def read_rangelist(self, file):
+        f = open(file, 'r')
+        while 1:
+            line = f.readline()
+            if not line:
+                break
+            line = line.strip()
+            if not line or line[0] == '#':
+                continue
+            line = line.split(':')[-1]
+            try:
+                ip1, ip2 = line.split('-')
+            except:
+                ip1 = line
+                ip2 = line
+            try:
+                self.append(ip1.strip(), ip2.strip())
+            except:
+                print '*** WARNING *** could not parse IP range: '+line
+        f.close()
+
+def is_ipv4(ip):
+    return ip.find(':') < 0
+
+def is_valid_ip(ip):
+    try:
+        if is_ipv4(ip):
+            a = ip.split('.')
+            assert len(a) == 4
+            for i in a:
+                chr(int(i))
+            return True
+        to_long_ipv6(ip)
+        return True
+    except:
+        return False
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/natpunch.py b/instrumentation/next-share/BaseLib/Core/BitTornado/natpunch.py
new file mode 100644 (file)
index 0000000..4cebfb6
--- /dev/null
@@ -0,0 +1,381 @@
+# Written by John Hoffman, Arno Bakker
+# derived from NATPortMapping.py by Yejun Yang
+# and from example code by Myers Carpenter
+# see LICENSE.txt for license information
+
+import sys
+import socket
+from traceback import print_exc
+from subnetparse import IP_List
+from clock import clock
+from __init__ import createPeerID
+
+from BaseLib.Core.NATFirewall.upnp import UPnPPlatformIndependent,UPnPError
+from BaseLib.Core.NATFirewall.guessip import get_my_wan_ip
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+EXPIRE_CACHE = 30 # seconds
+ID = "BT-"+createPeerID()[-4:]
+
+try:
+    import pythoncom, win32com.client
+    win32_imported = 1
+except ImportError:
+    if DEBUG and (sys.platform == 'win32'):
+        print >>sys.stderr,"natpunch: ERROR: pywin32 package not installed, UPnP mode 2 won't work now" 
+    win32_imported = 0
+
+UPnPError = UPnPError
+
+class _UPnP1:   # derived from Myers Carpenter's code
+                # seems to use the machine's local UPnP
+                # system for its operation.  Runs fairly fast
+
+    def __init__(self):
+        self.map = None
+        self.last_got_map = -10e10
+
+    def _get_map(self):
+        if self.last_got_map + EXPIRE_CACHE < clock():
+            try:
+                dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP")
+                self.map = dispatcher.StaticPortMappingCollection
+                self.last_got_map = clock()
+            except:
+                if DEBUG:
+                    print_exc()
+                self.map = None
+        return self.map
+
+    def test(self):
+        try:
+            assert self._get_map()     # make sure a map was found
+            success = True
+        except:
+            if DEBUG:
+                print_exc()
+            success = False
+        return success
+
+
+    def open(self, ip, p, iproto='TCP'):
+        map = self._get_map()
+        try:
+            map.Add(p, iproto, p, ip, True, ID)
+            if DEBUG:
+                print >>sys.stderr,'upnp1: succesfully opened port: '+ip+':'+str(p)
+            success = True
+        except:
+            if DEBUG:
+                print >>sys.stderr,"upnp1: COULDN'T OPEN "+str(p)
+                print_exc()
+            success = False
+        return success
+
+
+    def close(self, p, iproto='TCP'):
+        map = self._get_map()
+        try:
+            map.Remove(p, iproto)
+            success = True
+            if DEBUG:
+                print >>sys.stderr,'upnp1: succesfully closed port: '+str(p)
+        except:
+            if DEBUG:
+                print >>sys.stderr,"upnp1: COULDN'T CLOSE "+str(p)
+                print_exc()
+            success = False
+        return success
+
+
+    def clean(self, retry = False, iproto='TCP'):
+        if not win32_imported:
+            return
+        try:
+            map = self._get_map()
+            ports_in_use = []
+            for i in xrange(len(map)):
+                try:
+                    mapping = map[i]
+                    port = mapping.ExternalPort
+                    prot = str(mapping.Protocol).lower()
+                    desc = str(mapping.Description).lower()
+                except:
+                    port = None
+                if port and prot == iproto.lower() and desc[:3] == 'bt-':
+                    ports_in_use.append(port)
+            success = True
+            for port in ports_in_use:
+                try:
+                    map.Remove(port, iproto)
+                except:
+                    success = False
+            if not success and not retry:
+                self.clean(retry = True)
+        except:
+            pass
+
+    def get_ext_ip(self):
+        return None
+
+
+class _UPnP2:   # derived from Yejun Yang's code
+                # apparently does a direct search for UPnP hardware
+                # may work in some cases where _UPnP1 won't, but is slow
+                # still need to implement "clean" method
+
+    def __init__(self):
+        self.services = None
+        self.last_got_services = -10e10
+                           
+    def _get_services(self):
+        if not self.services or self.last_got_services + EXPIRE_CACHE < clock():
+            self.services = []
+            try:
+                f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder")
+                for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1",
+                           "urn:schemas-upnp-org:service:WANPPPConnection:1" ):
+                    try:
+                        conns = f.FindByType(t, 0)
+                        for c in xrange(len(conns)):
+                            try:
+                                svcs = conns[c].Services
+                                for s in xrange(len(svcs)):
+                                    try:
+                                        self.services.append(svcs[s])
+                                    except:
+                                        if DEBUG:
+                                            print_exc()
+                            except:
+                                if DEBUG:
+                                    print_exc()
+                    except:
+                        if DEBUG:
+                            print_exc()
+            except:
+                if DEBUG:
+                    print_exc()
+            self.last_got_services = clock()
+        return self.services
+
+    def test(self):
+        try:
+            assert self._get_services()    # make sure some services can be found
+            success = True
+        except:
+            success = False
+        return success
+
+
+    def open(self, ip, p, iproto='TCP'):
+        svcs = self._get_services()
+        success = False
+        for s in svcs:
+            try:
+                s.InvokeAction('AddPortMapping', ['', p, iproto, p, ip, True, ID, 0], '')
+                success = True
+            except:
+                if DEBUG:
+                    print_exc()
+        if DEBUG and not success:
+            print >>sys.stderr,"upnp2: COULDN'T OPEN "+str(p)
+            print_exc()
+        return success
+
+
+    def close(self, p, iproto='TCP'):
+        svcs = self._get_services()
+        success = False
+        for s in svcs:
+            try:
+                s.InvokeAction('DeletePortMapping', ['', p, iproto], '')
+                success = True
+            except:
+                if DEBUG:
+                    print_exc()
+        if DEBUG and not success:
+            print >>sys.stderr,"upnp2: COULDN'T CLOSE "+str(p)
+            print_exc()
+        return success
+
+
+    def get_ext_ip(self):
+        svcs = self._get_services()
+        success = None
+        for s in svcs:
+            try:
+                ret = s.InvokeAction('GetExternalIPAddress',[],'')
+                # With MS Internet Connection Sharing:
+                # - Good reply is: (None, (u'130.37.168.199',))
+                # - When router disconnected from Internet:  (None, (u'',))
+                if DEBUG:
+                    print >>sys.stderr,"upnp2: GetExternapIPAddress returned",ret
+                dns = ret[1]
+                if str(dns[0]) != '':
+                    success = str(dns[0])
+                elif DEBUG:
+                    print >>sys.stderr,"upnp2: RETURNED IP ADDRESS EMPTY"
+            except:
+                if DEBUG:
+                    print_exc()
+        if DEBUG and not success:
+            print >>sys.stderr,"upnp2: COULDN'T GET EXT IP ADDR"
+        return success
+
+class _UPnP3:
+    def __init__(self):
+        self.u = UPnPPlatformIndependent()
+
+    def test(self):
+        try:
+            self.u.discover()
+            return self.u.found_wanted_services()
+        except:
+            if DEBUG:
+                print_exc()
+            return False
+
+    def open(self,ip,p,iproto='TCP'):
+        """ Return False in case of network failure, 
+            Raises UPnPError in case of a properly reported error from the server
+        """
+        try:
+            self.u.add_port_map(ip,p,iproto=iproto)
+            return True
+        except UPnPError,e:
+            if DEBUG:
+                print_exc()
+            raise e
+        except:
+            if DEBUG:
+                print_exc()
+            return False
+
+    def close(self,p,iproto='TCP'):
+        """ Return False in case of network failure, 
+            Raises UPnPError in case of a properly reported error from the server
+        """
+        try:
+            self.u.del_port_map(p,iproto=iproto)
+            return True
+        except UPnPError,e:
+            if DEBUG:
+                print_exc()
+            raise e
+        except:
+            if DEBUG:
+                print_exc()
+            return False
+
+    def get_ext_ip(self):
+        """ Return False in case of network failure, 
+            Raises UPnPError in case of a properly reported error from the server
+        """
+        try:
+            return self.u.get_ext_ip()
+        except UPnPError,e:
+            if DEBUG:
+                print_exc()
+            raise e
+        except:
+            if DEBUG:
+                print_exc()
+            return None
+
+class UPnPWrapper:    # master holding class
+    
+    __single = None
+    
+    def __init__(self):
+        if UPnPWrapper.__single:
+            raise RuntimeError, "UPnPWrapper is singleton"
+        UPnPWrapper.__single = self
+
+        self.upnp1 = _UPnP1()
+        self.upnp2 = _UPnP2()
+        self.upnp3 = _UPnP3()
+        self.upnplist = (None, self.upnp1, self.upnp2, self.upnp3)
+        self.upnp = None
+        self.local_ip = None
+        self.last_got_ip = -10e10
+
+    def getInstance(*args, **kw):
+        if UPnPWrapper.__single is None:
+            UPnPWrapper(*args, **kw)
+        return UPnPWrapper.__single
+    getInstance = staticmethod(getInstance)
+
+    def register(self,guessed_localip):
+        self.local_ip = guessed_localip
+
+    def get_ip(self):
+        if self.last_got_ip + EXPIRE_CACHE < clock():
+            if self.local_ip is None:
+                local_ips = IP_List()
+                local_ips.set_intranet_addresses()
+                try:
+                    for info in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET):
+                                # exception if socket library isn't recent
+                        self.local_ip = info[4][0]
+                        if local_ips.includes(self.local_ip):
+                            self.last_got_ip = clock()
+                            if DEBUG:
+                                print >>sys.stderr,'upnpX: Local IP found: '+self.local_ip
+                            break
+                    else:
+                        raise ValueError('upnpX: couldn\'t find intranet IP')
+                except:
+                    self.local_ip = None
+                    if DEBUG:
+                        print >>sys.stderr,'upnpX: Error finding local IP'
+                        print_exc()
+        return self.local_ip
+
+    def test(self, upnp_type):
+        if DEBUG:
+            print >>sys.stderr,'upnpX: testing UPnP type '+str(upnp_type)
+        if not upnp_type or self.get_ip() is None or (upnp_type <= 2 and not win32_imported):
+            if DEBUG:
+                print >>sys.stderr,'upnpX: UPnP not supported'
+            return 0
+        if upnp_type != 3:
+            pythoncom.CoInitialize()                # leave initialized
+        self.upnp = self.upnplist[upnp_type]    # cache this
+        if self.upnp.test():
+            if DEBUG:
+                print >>sys.stderr,'upnpX: ok'
+            return upnp_type
+        if DEBUG:
+            print >>sys.stderr,'upnpX: tested bad'
+        return 0
+
+    def open(self, p, iproto='TCP'):
+        assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first"
+        return self.upnp.open(self.get_ip(), p, iproto=iproto)
+
+    def close(self, p, iproto='TCP'):
+        assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first"
+        return self.upnp.close(p,iproto=iproto)
+
+    def clean(self,iproto='TCP'):
+        return self.upnp1.clean(iproto=iproto)
+
+    def get_ext_ip(self):
+        assert self.upnp, "upnpX: must run UPnP_test() with the desired UPnP access type first"
+        return self.upnp.get_ext_ip()
+
+if __name__ == '__main__':
+    ip = get_my_wan_ip()
+    print >>sys.stderr,"guessed ip",ip
+    u = UPnPWrapper()
+    u.register(ip)
+    print >>sys.stderr,"TEST RETURNED",u.test(3)
+    print >>sys.stderr,"IGD says my external IP is",u.get_ext_ip()
+    print >>sys.stderr,"IGD open returned",u.open(6881)
+    print >>sys.stderr,"IGD close returned",u.close(6881)
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/parseargs.py b/instrumentation/next-share/BaseLib/Core/BitTornado/parseargs.py
new file mode 100644 (file)
index 0000000..7d05819
--- /dev/null
@@ -0,0 +1,142 @@
+# Written by Bill Bumgarner and Bram Cohen
+# see LICENSE.txt for license information
+
+from types import *
+from cStringIO import StringIO
+
+
+def splitLine(line, COLS=80, indent=10):
+    indent = " " * indent
+    width = COLS - (len(indent) + 1)
+    if indent and width < 15:
+        width = COLS - 2
+        indent = " "
+    s = StringIO()
+    i = 0
+    for word in line.split():
+        if i == 0:
+            s.write(indent+word)
+            i = len(word)
+            continue
+        if i + len(word) >= width:
+            s.write('\n'+indent+word)
+            i = len(word)
+            continue
+        s.write(' '+word)
+        i += len(word) + 1
+    return s.getvalue()
+
+def formatDefinitions(options, COLS, presets = {}):
+    s = StringIO()
+    for (longname, default, doc) in options:
+        s.write('--' + longname + ' <arg>\n')
+        default = presets.get(longname, default)
+        if type(default) in (IntType, LongType):
+            try:
+                default = int(default)
+            except:
+                pass
+        if default is not None:
+            doc += ' (defaults to ' + repr(default) + ')'
+        s.write(splitLine(doc, COLS, 10))
+        s.write('\n\n')
+    return s.getvalue()
+
+
+def usage(string):
+    raise ValueError(string)
+
+
+def defaultargs(options):
+    l = {}
+    for (longname, default, doc) in options:
+        if default is not None:
+            l[longname] = default
+    return l
+        
+
+def parseargs(argv, options, minargs = None, maxargs = None, presets = {}):
+    config = {}
+    longkeyed = {}
+    for option in options:
+        longname, default, doc = option
+        longkeyed[longname] = option
+        config[longname] = default
+    for longname in presets.keys():        # presets after defaults but before arguments
+        config[longname] = presets[longname]
+    options = []
+    args = []
+    pos = 0
+    while pos < len(argv):
+        if argv[pos][:2] != '--':
+            args.append(argv[pos])
+            pos += 1
+        else:
+            if pos == len(argv) - 1:
+                usage('parameter passed in at end with no value')
+            key, value = argv[pos][2:], argv[pos+1]
+            pos += 2
+            if not longkeyed.has_key(key):
+                usage('unknown key --' + key)
+            longname, default, doc = longkeyed[key]
+            try:
+                t = type(config[longname])
+                if t is NoneType or t is StringType:
+                    config[longname] = value
+                elif t is IntType:
+                    config[longname] = int(value)
+                elif t is LongType:
+                    config[longname] = long(value)
+                elif t is FloatType:
+                    config[longname] = float(value)
+                elif t is BooleanType:
+                    config[longname] = bool(value)
+                else:
+                    print 'parseargs: unknown type is',t
+                    assert 0
+            except ValueError, e:
+                usage('wrong format of --%s - %s' % (key, str(e)))
+    for key, value in config.items():
+        if value is None:
+            usage("Option --%s is required." % key)
+    if minargs is not None and len(args) < minargs:
+        usage("Must supply at least %d args." % minargs)
+    if maxargs is not None and len(args) > maxargs:
+        usage("Too many args - %d max." % maxargs)
+    return (config, args)
+
+def test_parseargs():
+    assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f'])
+    assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, [])
+    assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, [])
+    try:
+        parseargs([], [('a', 'x', '')])
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a', 'x'], [])
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a'], [('a', 'x', '')])
+    except ValueError:
+        pass
+    try:
+        parseargs([], [], 1, 2)
+    except ValueError:
+        pass
+    assert parseargs(['x'], [], 1, 2) == ({}, ['x'])
+    assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y'])
+    try:
+        parseargs(['x', 'y', 'z'], [], 1, 2)
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a', '2.0'], [('a', 3, '')])
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a', 'z'], [('a', 2.1, '')])
+    except ValueError:
+        pass
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/parsedir.py b/instrumentation/next-share/BaseLib/Core/BitTornado/parsedir.py
new file mode 100644 (file)
index 0000000..051123c
--- /dev/null
@@ -0,0 +1,162 @@
+# Written by John Hoffman and Uoti Urpala
+# see LICENSE.txt for license information
+import os
+import sys
+from traceback import print_exc
+
+from bencode import bencode, bdecode
+from BT1.btformats import check_info
+from BaseLib.Core.simpledefs import TRIBLER_TORRENT_EXT
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.Utilities.Crypto import sha
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+def _errfunc(x):
+    print >>sys.stderr,"tracker: parsedir: "+x
+
+def parsedir(directory, parsed, files, blocked,
+             exts = ['.torrent', TRIBLER_TORRENT_EXT], return_metainfo = False, errfunc = _errfunc):
+    if DEBUG:
+        errfunc('checking dir')
+    dirs_to_check = [directory]
+    new_files = {}
+    new_blocked = {}
+    torrent_type = {}
+    while dirs_to_check:    # first, recurse directories and gather torrents
+        directory = dirs_to_check.pop()
+        newtorrents = False
+        for f in os.listdir(directory):
+            newtorrent = None
+            for ext in exts:
+                if f.endswith(ext):
+                    newtorrent = ext[1:]
+                    break
+            if newtorrent:
+                newtorrents = True
+                p = os.path.join(directory, f)
+                new_files[p] = [(int(os.path.getmtime(p)), os.path.getsize(p)), 0]
+                torrent_type[p] = newtorrent
+        if not newtorrents:
+            for f in os.listdir(directory):
+                p = os.path.join(directory, f)
+                if os.path.isdir(p):
+                    dirs_to_check.append(p)
+
+    new_parsed = {}
+    to_add = []
+    added = {}
+    removed = {}
+    # files[path] = [(modification_time, size), hash], hash is 0 if the file
+    # has not been successfully parsed
+    for p, v in new_files.items():   # re-add old items and check for changes
+        oldval = files.get(p)
+        if not oldval:          # new file
+            to_add.append(p)
+            continue
+        h = oldval[1]
+        if oldval[0] == v[0]:   # file is unchanged from last parse
+            if h:
+                if blocked.has_key(p):  # parseable + blocked means duplicate
+                    to_add.append(p)    # other duplicate may have gone away
+                else:
+                    new_parsed[h] = parsed[h]
+                new_files[p] = oldval
+            else:
+                new_blocked[p] = 1  # same broken unparseable file
+            continue
+        if parsed.has_key(h) and not blocked.has_key(p):
+            if DEBUG:
+                errfunc('removing '+p+' (will re-add)')
+            removed[h] = parsed[h]
+        to_add.append(p)
+
+    to_add.sort()
+    for p in to_add:                # then, parse new and changed torrents
+        new_file = new_files[p]
+        v, h = new_file
+        if new_parsed.has_key(h): # duplicate
+            if not blocked.has_key(p) or files[p][0] != v:
+                errfunc('**warning** '+
+                    p +' is a duplicate torrent for '+new_parsed[h]['path'])
+            new_blocked[p] = 1
+            continue
+                
+        if DEBUG:
+            errfunc('adding '+p)
+        try:
+            # Arno: P2PURL
+            tdef = TorrentDef.load(p)
+            h = tdef.get_infohash()
+            d = tdef.get_metainfo()
+            
+            new_file[1] = h
+            if new_parsed.has_key(h):
+                errfunc('**warning** '+
+                    p +' is a duplicate torrent for '+new_parsed[h]['path'])
+                new_blocked[p] = 1
+                continue
+
+            a = {}
+            a['path'] = p
+            f = os.path.basename(p)
+            a['file'] = f
+            a['type'] = torrent_type[p]
+            if tdef.get_url_compat():
+                a['url'] = tdef.get_url()
+            i = d['info']
+            l = 0
+            nf = 0
+            if i.has_key('length'):
+                l = i.get('length', 0)
+                nf = 1
+            elif i.has_key('files'):
+                for li in i['files']:
+                    nf += 1
+                    if li.has_key('length'):
+                        l += li['length']
+            a['numfiles'] = nf
+            a['length'] = l
+            a['name'] = i.get('name', f)
+            def setkey(k, d = d, a = a):
+                if d.has_key(k):
+                    a[k] = d[k]
+            setkey('failure reason')
+            setkey('warning message')
+            setkey('announce-list')
+            # Arno, LOOKUP SERVICE
+            if tdef.get_urllist() is not None:
+                httpseedhashes = []
+                for url in tdef.get_urllist():
+                    # TODO: normalize?
+                    urlhash = sha(url).digest()
+                    httpseedhashes.append(urlhash)
+                a['url-hash-list'] = httpseedhashes
+            if return_metainfo:
+                a['metainfo'] = d
+        except:
+            print_exc()
+            errfunc('**warning** '+p+' has errors')
+            new_blocked[p] = 1
+            continue
+        if DEBUG:
+            errfunc('... successful')
+        new_parsed[h] = a
+        added[h] = a
+
+    for p, v in files.items():       # and finally, mark removed torrents
+        if not new_files.has_key(p) and not blocked.has_key(p):
+            if DEBUG:
+                errfunc('removing '+p)
+            removed[v[1]] = parsed[v[1]]
+
+    if DEBUG:
+        errfunc('done checking')
+    return (new_parsed, new_files, new_blocked, added, removed)
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/piecebuffer.py b/instrumentation/next-share/BaseLib/Core/BitTornado/piecebuffer.py
new file mode 100644 (file)
index 0000000..75e3e07
--- /dev/null
@@ -0,0 +1,86 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from array import array
+from threading import Lock
+# import inspect
+try:
+    True
+except:
+    True = 1
+    False = 0
+    
+DEBUG = False
+
+class SingleBuffer:
+    def __init__(self, pool):
+        self.pool = pool
+        self.buf = array('c')
+
+    def init(self):
+        if DEBUG:
+            print self.pool.count
+            '''
+            for x in xrange(6,1,-1):
+                try:
+                    f = inspect.currentframe(x).f_code
+                    print (f.co_filename,f.co_firstlineno,f.co_name)
+                    del f
+                except:
+                    pass
+            print ''
+            '''
+        self.length = 0
+
+    def append(self, s):
+        l = self.length+len(s)
+        self.buf[self.length:l] = array('c', s)
+        self.length = l
+
+    def __len__(self):
+        return self.length
+
+    def __getslice__(self, a, b):
+        if b > self.length:
+            b = self.length
+        if b < 0:
+            b += self.length
+        if a == 0 and b == self.length and len(self.buf) == b:
+            return self.buf  # optimization
+        return self.buf[a:b]
+
+    def getarray(self):
+        return self.buf[:self.length]
+
+    def release(self):
+        if DEBUG:
+            print -self.pool.count
+        self.pool.release(self)
+
+
+class BufferPool:
+    def __init__(self):
+        self.pool = []
+        self.lock = Lock()
+        if DEBUG:
+            self.count = 0
+
+    def new(self):
+        self.lock.acquire()
+        if self.pool:
+            x = self.pool.pop()
+        else:
+            x = SingleBuffer(self)
+            if DEBUG:
+                self.count += 1
+                x.count = self.count
+        x.init()
+        self.lock.release()
+        return x
+
+    def release(self, x):
+        self.pool.append(x)
+
+
+_pool = BufferPool()
+PieceBuffer = _pool.new
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/selectpoll.py b/instrumentation/next-share/BaseLib/Core/BitTornado/selectpoll.py
new file mode 100644 (file)
index 0000000..7be1a55
--- /dev/null
@@ -0,0 +1,130 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+# Arno,2007-02-23: this poll class is used on win32
+
+import sys
+from select import select
+from time import sleep
+from types import IntType
+from bisect import bisect
+from sets import Set
+POLLIN = 1
+POLLOUT = 2
+POLLERR = 8
+POLLHUP = 16
+
+DEBUG = False
+
+class poll:
+    def __init__(self):
+        self.rlist = []
+        self.wlist = []
+        
+    def register(self, f, t):
+        if type(f) != IntType:
+            f = f.fileno()
+        if (t & POLLIN):
+            insert(self.rlist, f)
+        else:
+            remove(self.rlist, f)
+        if (t & POLLOUT):
+            insert(self.wlist, f)
+        else:
+            remove(self.wlist, f)
+
+    def unregister(self, f):
+        if type(f) != IntType:
+            f = f.fileno()
+        remove(self.rlist, f)
+        remove(self.wlist, f)
+
+    def poll(self, timeout = None):
+        if self.rlist or self.wlist:
+            try:
+                # Arno, 2007-02-23: The original code never checked for errors
+                # on any file descriptors. 
+                elist = Set(self.rlist)
+                elist = elist.union(self.wlist)
+                elist = list(elist)    # in Python2.3, elist must be a list type
+                if DEBUG:
+                    print >>sys.stderr,"selectpoll: elist = ",elist
+                    
+                #print >>sys.stderr,"selectpoll: rlist",self.rlist,"wlist",self.wlist,"elist",elist
+                    
+                r, w, e = select(self.rlist, self.wlist, elist, timeout)
+                if DEBUG:
+                    print >>sys.stderr,"selectpoll: e = ",e
+            except ValueError:
+                if DEBUG:
+                    print >>sys.stderr,"selectpoll: select: bad param"
+                return None
+        else:
+            sleep(timeout)
+            return []
+        result = []
+        for s in r:
+            result.append((s, POLLIN))
+        for s in w:
+            result.append((s, POLLOUT))
+        for s in e:
+            result.append((s, POLLERR))
+        return result
+
+def remove(list, item):
+    i = bisect(list, item)
+    if i > 0 and list[i-1] == item:
+        del list[i-1]
+
+def insert(list, item):
+    i = bisect(list, item)
+    if i == 0 or list[i-1] != item:
+        list.insert(i, item)
+
+def test_remove():
+    x = [2, 4, 6]
+    remove(x, 2)
+    assert x == [4, 6]
+    x = [2, 4, 6]
+    remove(x, 4)
+    assert x == [2, 6]
+    x = [2, 4, 6]
+    remove(x, 6)
+    assert x == [2, 4]
+    x = [2, 4, 6]
+    remove(x, 5)
+    assert x == [2, 4, 6]
+    x = [2, 4, 6]
+    remove(x, 1)
+    assert x == [2, 4, 6]
+    x = [2, 4, 6]
+    remove(x, 7)
+    assert x == [2, 4, 6]
+    x = [2, 4, 6]
+    remove(x, 5)
+    assert x == [2, 4, 6]
+    x = []
+    remove(x, 3)
+    assert x == []
+
+def test_insert():
+    x = [2, 4]
+    insert(x, 1)
+    assert x == [1, 2, 4]
+    x = [2, 4]
+    insert(x, 3)
+    assert x == [2, 3, 4]
+    x = [2, 4]
+    insert(x, 5)
+    assert x == [2, 4, 5]
+    x = [2, 4]
+    insert(x, 2)
+    assert x == [2, 4]
+    x = [2, 4]
+    insert(x, 4)
+    assert x == [2, 4]
+    x = [2, 3, 4]
+    insert(x, 3)
+    assert x == [2, 3, 4]
+    x = []
+    insert(x, 3)
+    assert x == [3]
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/subnetparse.py b/instrumentation/next-share/BaseLib/Core/BitTornado/subnetparse.py
new file mode 100644 (file)
index 0000000..18e4187
--- /dev/null
@@ -0,0 +1,218 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from bisect import bisect, insort
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+hexbinmap = {
+    '0': '0000',
+    '1': '0001',
+    '2': '0010',
+    '3': '0011',
+    '4': '0100',
+    '5': '0101',
+    '6': '0110',
+    '7': '0111',
+    '8': '1000',
+    '9': '1001',
+    'a': '1010',
+    'b': '1011',
+    'c': '1100',
+    'd': '1101',
+    'e': '1110',
+    'f': '1111',
+    'x': '0000',
+}
+
+chrbinmap = {}
+for n in xrange(256):
+    b = []
+    nn = n
+    for i in xrange(8):
+        if nn & 0x80:
+            b.append('1')
+        else:
+            b.append('0')
+        nn <<= 1
+    chrbinmap[n] = ''.join(b)
+
+
+def to_bitfield_ipv4(ip):
+    ip = ip.split('.')
+    if len(ip) != 4:
+        raise ValueError, "bad address"
+    b = []
+    for i in ip:
+        b.append(chrbinmap[int(i)])
+    return ''.join(b)
+
+def to_bitfield_ipv6(ip):
+    b = ''
+    doublecolon = False
+
+    if not ip:
+        raise ValueError, "bad address"
+    if ip == '::':      # boundary handling
+        ip = ''
+    elif ip[:2] == '::':
+        ip = ip[1:]
+    elif ip[0] == ':':
+        raise ValueError, "bad address"
+    elif ip[-2:] == '::':
+        ip = ip[:-1]
+    elif ip[-1] == ':':
+        raise ValueError, "bad address"
+    for n in ip.split(':'):
+        if n == '':     # double-colon
+            if doublecolon:
+                raise ValueError, "bad address"
+            doublecolon = True
+            b += ':'
+            continue
+        if n.find('.') >= 0: # IPv4
+            n = to_bitfield_ipv4(n)
+            b += n + '0'*(32-len(n))
+            continue
+        n = ('x'*(4-len(n))) + n
+        for i in n:
+            b += hexbinmap[i]
+    if doublecolon:
+        pos = b.find(':')
+        b = b[:pos]+('0'*(129-len(b)))+b[pos+1:]
+    if len(b) != 128:   # always check size
+        raise ValueError, "bad address"
+    return b
+
+ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96]
+
+class IP_List:
+    def __init__(self):
+        self.ipv4list = []
+        self.ipv6list = []
+
+    def __nonzero__(self):
+        return bool(self.ipv4list or self.ipv6list)
+
+
+    def append(self, ip, depth = 256):
+        if ip.find(':') < 0:        # IPv4
+            insort(self.ipv4list, to_bitfield_ipv4(ip)[:depth])
+        else:
+            b = to_bitfield_ipv6(ip)
+            if b.startswith(ipv4addrmask):
+                insort(self.ipv4list, b[96:][:depth-96])
+            else:
+                insort(self.ipv6list, b[:depth])
+
+
+    def includes(self, ip):
+        if not (self.ipv4list or self.ipv6list):
+            return False
+        if ip.find(':') < 0:        # IPv4
+            b = to_bitfield_ipv4(ip)
+        else:
+            b = to_bitfield_ipv6(ip)
+            if b.startswith(ipv4addrmask):
+                b = b[96:]
+        if len(b) > 32:
+            l = self.ipv6list
+        else:
+            l = self.ipv4list
+        for map in l[bisect(l, b)-1:]:
+            if b.startswith(map):
+                return True
+            if map > b:
+                return False
+        return False
+
+
+    def read_fieldlist(self, file):   # reads a list from a file in the format 'ip/len <whatever>'
+        f = open(file, 'r')
+        while 1:
+            line = f.readline()
+            if not line:
+                break
+            line = line.strip().expandtabs()
+            if not line or line[0] == '#':
+                continue
+            try:
+                line, garbage = line.split(' ', 1)
+            except:
+                pass
+            try:
+                line, garbage = line.split('#', 1)
+            except:
+                pass
+            try:
+                ip, depth = line.split('/')
+            except:
+                ip = line
+                depth = None
+            try:
+                if depth is not None:                
+                    depth = int(depth)
+                self.append(ip, depth)
+            except:
+                print '*** WARNING *** could not parse IP range: '+line
+        f.close()
+
+
+    def set_intranet_addresses(self):
+        self.append('127.0.0.1', 8)
+        self.append('10.0.0.0', 8)
+        self.append('172.16.0.0', 12)
+        self.append('192.168.0.0', 16)
+        self.append('169.254.0.0', 16)
+        self.append('::1')
+        self.append('fe80::', 16)
+        self.append('fec0::', 16)
+
+    def set_ipv4_addresses(self):
+        self.append('::ffff:0:0', 96)
+
+def ipv6_to_ipv4(ip):
+    ip = to_bitfield_ipv6(ip)
+    if not ip.startswith(ipv4addrmask):
+        raise ValueError, "not convertible to IPv4"
+    ip = ip[-32:]
+    x = ''
+    for i in range(4):
+        x += str(int(ip[:8], 2))
+        if i < 3:
+            x += '.'
+        ip = ip[8:]
+    return x
+
+def to_ipv4(ip):
+    if is_ipv4(ip):
+        _valid_ipv4(ip)
+        return ip
+    return ipv6_to_ipv4(ip)
+
+def is_ipv4(ip):
+    return ip.find(':') < 0
+
+def _valid_ipv4(ip):
+    ip = ip.split('.')
+    if len(ip) != 4:
+        raise ValueError
+    for i in ip:
+        chr(int(i))
+
+def is_valid_ip(ip):
+    try:
+        if not ip:
+            return False
+        if is_ipv4(ip):
+            _valid_ipv4(ip)
+            return True
+        to_bitfield_ipv6(ip)
+        return True
+    except:
+        return False
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/torrentlistparse.py b/instrumentation/next-share/BaseLib/Core/BitTornado/torrentlistparse.py
new file mode 100644 (file)
index 0000000..668c245
--- /dev/null
@@ -0,0 +1,38 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from binascii import unhexlify
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+
+# parses a list of torrent hashes, in the format of one hash per line in hex format
+
+def parsetorrentlist(filename, parsed):
+    new_parsed = {}
+    added = {}
+    removed = parsed
+    f = open(filename, 'r')
+    while 1:
+        l = f.readline()
+        if not l:
+            break
+        l = l.strip()
+        try:
+            if len(l) != 40:
+                raise ValueError, 'bad line'
+            h = unhexlify(l)
+        except:
+            print '*** WARNING *** could not parse line in torrent list: '+l
+        if parsed.has_key(h):
+            del removed[h]
+        else:
+            added[h] = True
+        new_parsed[h] = True
+    f.close()
+    return (new_parsed, added, removed)
+
diff --git a/instrumentation/next-share/BaseLib/Core/BitTornado/zurllib.py b/instrumentation/next-share/BaseLib/Core/BitTornado/zurllib.py
new file mode 100644 (file)
index 0000000..7efdd90
--- /dev/null
@@ -0,0 +1,143 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+import sys
+from httplib import HTTPConnection, HTTPSConnection, HTTPException
+from urlparse import urlparse
+from bencode import bdecode
+from gzip import GzipFile
+from StringIO import StringIO
+from __init__ import product_name, version_short
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.Utilities.timeouturlopen import find_proxy
+
+VERSION = product_name+'/'+version_short
+MAX_REDIRECTS = 10
+
+
+class btHTTPcon(HTTPConnection): # attempt to add automatic connection timeout
+    def connect(self):
+        HTTPConnection.connect(self)
+        try:
+            self.sock.settimeout(30)
+        except:
+            pass
+
+class btHTTPScon(HTTPSConnection): # attempt to add automatic connection timeout
+    def connect(self):
+        HTTPSConnection.connect(self)
+        try:
+            self.sock.settimeout(30)
+        except:
+            pass 
+
+class urlopen:
+    def __init__(self, url):
+        self.tries = 0
+        self._open(url.strip())
+        self.error_return = None
+
+    def _open(self, url):
+        try:
+            self.tries += 1
+            if self.tries > MAX_REDIRECTS:
+                raise IOError, ('http error', 500,
+                                "Internal Server Error: Redirect Recursion")
+            (scheme, netloc, path, pars, query, fragment) = urlparse(url)
+            if scheme != 'http' and scheme != 'https':
+                raise IOError, ('url error', 'unknown url type', scheme, url)
+            wanturl = path
+            if pars:
+                wanturl += ';'+pars
+            if query:
+                wanturl += '?'+query
+    #        if fragment:
+    
+            proxyhost = find_proxy(url)
+            if proxyhost is None:
+                desthost = netloc
+                desturl = wanturl
+            else:
+                desthost = proxyhost
+                desturl = scheme+'://'+netloc+wanturl
+            try:
+                self.response = None
+                if scheme == 'http':
+                    self.connection = btHTTPcon(desthost)
+                else:
+                    self.connection = btHTTPScon(desthost)
+                self.connection.request('GET', desturl, None,
+                                    { 'Host': netloc, 'User-Agent': VERSION,
+                                      'Accept-Encoding': 'gzip' } )
+                self.response = self.connection.getresponse()
+            except HTTPException, e:
+                print_exc()
+                raise IOError, ('http error', str(e))
+            status = self.response.status
+            if status in (301, 302):
+                try:
+                    self.connection.close()
+                except:
+                    pass
+                self._open(self.response.getheader('Location'))
+                return
+            if status != 200:
+                try:
+                    data = self._read()
+                    d = bdecode(data)
+                    if d.has_key('failure reason'):
+                        self.error_return = data
+                        return
+                except:
+                    pass
+                raise IOError, ('http error', status, self.response.reason)
+        except:
+            print_exc()
+
+
+    def read(self):
+        if self.error_return:
+            return self.error_return
+        return self._read()
+
+    def _read(self):
+        data = self.response.read()
+        if self.response.getheader('Content-Encoding', '').find('gzip') >= 0:
+            try:
+                compressed = StringIO(data)
+                f = GzipFile(fileobj = compressed)
+                data = f.read()
+            except:
+                raise IOError, ('http error', 'got corrupt response')
+        return data
+
+    def close(self):
+        self.connection.close()
+
+try:
+    import pycurl
+    
+    class curlopen:
+            def __init__(self,url):
+                
+                    print >>sys.stderr,"CURL",url
+                
+                    self.contents = ''
+                    self.c = pycurl.Curl()
+                    self.c.setopt(c.URL, url)
+                    self.c.setopt(c.WRITEFUNCTION, t.body_callback)
+                    self.c.perform()
+                    self.c.close()
+    
+            def body_callback(self, buf):
+                    self.contents = self.contents + buf
+    
+            def read(self):
+                return self.contents
+            
+            def close(self):
+                pass
+except:
+    pass
+    
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/TorrentCollecting.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/TorrentCollecting.py
new file mode 100644 (file)
index 0000000..a9288bd
--- /dev/null
@@ -0,0 +1,26 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+DEBUG = False
+    
+class SimpleTorrentCollecting:
+    """
+        Simplest torrent collecting policy: randomly collect a torrent when received
+        a buddycast message
+    """
+    
+    def __init__(self, metadata_handler, data_handler):
+        self.metadata_handler = metadata_handler
+        self.data_handler = data_handler
+        self.torrent_db = data_handler.torrent_db
+        self.pref_db = data_handler.pref_db
+        self.cache_pool = {}
+        
+        
+    def trigger(self, permid, selversion, collect_candidate=None):
+        infohash = self.torrent_db.selectTorrentToCollect(permid, collect_candidate)
+        #print >> sys.stderr, '*****-----------***** trigger torrent collecting', `infohash`
+        if infohash and self.metadata_handler:
+            self.metadata_handler.send_metadata_request(permid, infohash, selversion)
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/__init__.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/bartercast.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/bartercast.py
new file mode 100644 (file)
index 0000000..dea4dab
--- /dev/null
@@ -0,0 +1,343 @@
+# Written by Michel Meulpolder
+# see LICENSE.txt for license information
+import sys, os
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Statistics.Logger import OverlayLogger
+from BaseLib.Core.BitTornado.BT1.MessageID import BARTERCAST #, KEEP_ALIVE
+from BaseLib.Core.CacheDB.CacheDBHandler import BarterCastDBHandler
+from BaseLib.Core.Utilities.utilities import *
+from traceback import print_exc
+from types import StringType, ListType, DictType
+from time import time, gmtime, strftime, ctime
+
+from BaseLib.Core.Overlay.permid import permid_for_user
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTH
+
+
+MAX_BARTERCAST_LENGTH = 10 * 1024 * 1024 # TODO: give this length a reasonable value
+NO_PEERS_IN_MSG = 10
+REFRESH_TOPN_INTERVAL = 30 * 60
+
+DEBUG = False
+LOG = False
+
+def now():
+    return int(time())
+    
+class BarterCastCore:
+
+    ################################
+    def __init__(self, data_handler, overlay_bridge, log = '', dnsindb = None):
+    
+        if DEBUG:
+            print >> sys.stderr, "=================Initializing bartercast core"
+    
+        self.data_handler = data_handler
+        self.dnsindb = dnsindb
+        self.log = log
+        self.overlay_bridge = overlay_bridge
+        self.bartercastdb = BarterCastDBHandler.getInstance()
+        
+        self.network_delay = 30
+        self.send_block_list = {}
+        self.recv_block_list = {}
+        self.block_interval = 1*60*60   # block interval for a peer to barter   cast
+
+        self.topn = self.bartercastdb.getTopNPeers(NO_PEERS_IN_MSG, local_only = True)['top']
+        self.overlay_bridge.add_task(self.refreshTopN, REFRESH_TOPN_INTERVAL)
+        
+        if self.log:
+            self.overlay_log = OverlayLogger.getInstance(self.log)
+            
+        if LOG:
+            self.logfile = '/Users/michel/packages/bartercast_dataset/bartercast42.log'
+            if not os.path.exists(self.logfile):
+                log = open(self.logfile, 'w')
+                log.close()
+
+
+    ################################
+    def refreshTopN(self):
+
+        self.topn = self.bartercastdb.getTopNPeers(NO_PEERS_IN_MSG, local_only = True)['top']
+        self.overlay_bridge.add_task(self.refreshTopN, REFRESH_TOPN_INTERVAL)
+
+
+
+    ################################
+    def createAndSendBarterCastMessage(self, target_permid, selversion, active = False):
+
+
+        # for older versions of Tribler (non-BarterCast): do nothing
+        if selversion <= OLPROTO_VER_FIFTH:
+            return
+
+        if DEBUG:
+            print >> sys.stderr, "===========bartercast: Sending BarterCast msg to ", self.bartercastdb.getName(target_permid)
+
+        # create a new bartercast message
+        bartercast_data = self.createBarterCastMessage(target_permid)
+        
+        if LOG:
+            self.logMsg(bartercast_data, target_permid, 'out', logfile = self.logfile)
+        
+        try:
+            bartercast_msg = bencode(bartercast_data)
+        except:
+            print_exc()
+            print >> sys.stderr, "error bartercast_data:", bartercast_data
+            return
+            
+        # send the message    
+        self.overlay_bridge.send(target_permid, BARTERCAST+bartercast_msg, self.bartercastSendCallback)
+
+        self.blockPeer(target_permid, self.send_block_list, self.block_interval)
+        
+            
+
+    ################################
+    def createBarterCastMessage(self, target_permid):
+        """ Create a bartercast message """
+
+        my_permid = self.bartercastdb.my_permid
+        local_top = self.topn
+        top_peers = map(lambda (permid, up, down): permid, local_top)
+        data = {}
+        totals = self.bartercastdb.getTotals()  # (total_up, total_down)
+        
+        for permid in top_peers:
+            
+            item = self.bartercastdb.getItem((my_permid, permid))
+            
+            if item is not None:
+                # retrieve what i have uploaded to permid
+                data_to = item['uploaded']
+                # retrieve what i have downloaded from permid
+                data_from = item['downloaded']
+
+                data[permid] = {'u': data_to, 'd': data_from}
+        
+        bartercast_data = {'data': data, 'totals': totals}
+        
+        return bartercast_data
+
+
+    ################################
+    def bartercastSendCallback(self, exc, target_permid, other=0):
+        if exc is None:
+            if DEBUG:
+                print "bartercast: %s *** msg was sent successfully to peer %s" % (ctime(now()), self.bartercastdb.getName(target_permid))
+        else:
+            if DEBUG:
+                print "bartercast: %s *** warning - error in sending msg to %s" % (ctime(now()), self.bartercastdb.getName(target_permid))
+
+
+    ################################
+    def gotBarterCastMessage(self, recv_msg, sender_permid, selversion):
+        """ Received a bartercast message and handle it. Reply if needed """
+        
+        if DEBUG:
+            print >>sys.stderr,'bartercast: %s Received a BarterCast msg from %s'% (ctime(now()), self.bartercastdb.getName(sender_permid))
+            
+        if not sender_permid or sender_permid == self.bartercastdb.my_permid:
+            print >> sys.stderr, "bartercast: error - got BarterCastMsg from a None peer", \
+                        sender_permid, recv_msg
+            return False
+        
+        if MAX_BARTERCAST_LENGTH > 0 and len(recv_msg) > MAX_BARTERCAST_LENGTH:
+            print >> sys.stderr, "bartercast: warning - got large BarterCastMsg", len(recv_msg)
+            return False
+
+        bartercast_data = {}
+
+        try:
+            bartercast_data = bdecode(recv_msg)
+        except:
+            print >> sys.stderr, "bartercast: warning, invalid bencoded data"
+            return False
+            
+        try:    # check bartercast message
+            self.validBarterCastMsg(bartercast_data)
+        except RuntimeError, msg:
+            print >> sys.stderr, msg
+            return False
+            
+        if LOG:
+            self.logMsg(bartercast_data, sender_permid, 'in', logfile = self.logfile)
+       
+        data = bartercast_data['data']
+
+        if 'totals' in bartercast_data:
+            totals = bartercast_data['totals']
+        else:
+            totals = None
+
+        if DEBUG:
+            st = time()
+            self.handleBarterCastMsg(sender_permid, data)
+            et = time()
+            diff = et - st
+            print >>sys.stderr,"bartercast: HANDLE took %.4f" % diff
+        else:
+            self.handleBarterCastMsg(sender_permid, data, totals)
+       
+        if not self.isBlocked(sender_permid, self.send_block_list):
+            self.replyBarterCast(sender_permid, selversion)    
+        
+        return True
+
+
+
+    ################################
+    def validBarterCastMsg(self, bartercast_data):
+
+        if not type(bartercast_data) == DictType:
+            raise RuntimeError, "bartercast: received data is not a dictionary"
+            return False
+            
+        if not bartercast_data.has_key('data'):
+            raise RuntimeError, "bartercast: 'data' key doesn't exist"
+            return False
+
+        if not type(bartercast_data['data']) == DictType:
+            raise RuntimeError, "bartercast: 'data' value is not dictionary"
+            return False
+        
+        for permid in bartercast_data['data'].keys():
+                        
+            if not bartercast_data['data'][permid].has_key('u') or \
+               not bartercast_data['data'][permid].has_key('d'):
+                raise RuntimeError, "bartercast: datafield doesn't contain 'u' or 'd' keys"
+                return False
+        
+        return True
+       
+    ################################
+    def handleBarterCastMsg(self, sender_permid, data, totals = None):
+        """ process bartercast data in database """
+        if DEBUG:
+            print >> sys.stderr, "bartercast: Processing bartercast msg from: ", self.bartercastdb.getName(sender_permid)
+            print >> sys.stderr, "totals: ", totals
+        
+        
+        permids = data.keys()
+        changed = False
+        
+        # 1. Add any unknown peers to the database in a single transaction
+        self.bartercastdb.addPeersBatch(permids)
+        
+
+        # 2. Add totals to database (without committing)
+        if totals != None and len(totals) == 2:
+            up = int(totals[0])
+            down = int(totals[1])
+            self.bartercastdb.updateULDL((sender_permid, sender_permid), up, down, commit = False)
+            changed = True
+
+        # 3. Add all the received records to the database in a single transaction
+        datalen = len(permids)
+        for i in range(0,datalen):
+            permid = permids[i]
+            
+            data_to = data[permid]['u']
+            data_from = data[permid]['d']
+            
+            if DEBUG:
+                print >> sys.stderr, "bartercast: data: (%s, %s) up = %d down = %d" % (self.bartercastdb.getName(sender_permid), self.bartercastdb.getName(permid),\
+                                                                        data_to, data_from)
+
+            # update database sender->permid and permid->sender
+            #commit = (i == datalen-1)
+            self.bartercastdb.updateULDL((sender_permid, permid), data_to, data_from, commit = False)
+            changed = True
+            
+        if changed:
+            self.bartercastdb.commit()
+            
+            
+        # ARNODB: 
+        # get rid of index on DB? See where used
+
+
+    ################################
+    def replyBarterCast(self, target_permid, selversion):
+        """ Reply a bartercast message """
+
+        if DEBUG:
+            st = time()
+            self.createAndSendBarterCastMessage(target_permid, selversion)
+            et = time()
+            diff = et - st
+            print >>sys.stderr,"bartercast: CREATE took %.4f" % diff
+        else:
+            self.createAndSendBarterCastMessage(target_permid, selversion)
+
+
+    # Blocking functions (similar to BuddyCast):
+    
+    ################################
+    def isBlocked(self, peer_permid, block_list):
+        if peer_permid not in block_list:
+            return False
+        unblock_time = block_list[peer_permid]
+        if now() >= unblock_time - self.network_delay:    # 30 seconds for network delay
+            block_list.pop(peer_permid)
+            return False
+        return True
+
+
+
+    ################################
+    def blockPeer(self, peer_permid, block_list, block_interval=None):
+        """ Add a peer to a block list """
+
+        if block_interval is None:
+            block_interval = self.block_interval
+        unblock_time = now() + block_interval
+        block_list[peer_permid] = unblock_time
+        
+        if DEBUG:
+            print >>sys.stderr,'bartercast: %s Blocked peer %s'% (ctime(now()), self.bartercastdb.getName(peer_permid))
+
+
+    ################################
+    def logMsg(self, msg_data, msg_permid, in_or_out, logfile):
+        
+        if in_or_out == 'in':
+            permid_from = permid_for_user(msg_permid) 
+        
+        elif in_or_out == 'out':
+            permid_from = 'LOCAL'
+            
+        else:
+            return
+            
+        timestamp = now()
+            
+        log = open(logfile, 'a')
+        string = '%.1f %s %s' % (timestamp, in_or_out, permid_for_user(msg_permid))
+        log.write(string + '\n')
+        print >> sys.stderr, string
+        
+        data = msg_data.get('data', [])
+        
+        for permid in data:
+            u = data[permid]['u']
+            d = data[permid]['d']
+            
+            string = '%.1f %s %s %d %d' % (timestamp, permid_from, permid_for_user(permid), u, d)
+            log.write(string + '\n')
+            print >> sys.stderr, string
+            
+        totals = msg_data.get('totals', None)
+
+        if totals != None:
+            (u, d) = totals
+            
+            string = '%.1f TOT %s %d %d' % (timestamp, permid_from, u, d)
+            log.write(string + '\n')
+            print >> sys.stderr, string
+            
+            
+        log.close()
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/buddycast.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/buddycast.py
new file mode 100644 (file)
index 0000000..f0f4082
--- /dev/null
@@ -0,0 +1,2706 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+#
+
+__fool_epydoc = 481
+"""
+    BuddyCast2 epidemic protocol for p2p recommendation and semantic clustering
+    
+Algorithm in LaTeX format:
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%  algorithm of the active peer   %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{figure*}[ht]
+\begin{center}
+\begin{algorithmic}[1]
+
+\LOOP
+\STATE wait($\Delta T$ time units) \COMMENT{15 seconds in current implementation}
+\STATE remove any peer from $B_S$ and $B_R$ if its block time was expired.
+\STATE keep connection with all peers in $C_T$, $C_R$ and $C_U$
+\IF{$idle\_loops > 0$}
+    \STATE $idle\_loops \leftarrow idle\_loops - 1$ \COMMENT{skip this loop for rate control}
+\ELSE
+    \IF{$C_C$ is empty}
+        \STATE $C_C \leftarrow$ select 5 peers recently seen from Mega Cache
+    \ENDIF
+    \STATE $Q \leftarrow$ select a most similar taste buddy or a random online peer from $C_C$
+    \STATE connectPeer($Q$)
+    \STATE block($Q$, $B_S$, 4hours)
+    \STATE remove $Q$ from $C_C$
+    \IF{$Q$ is connected successfully}
+        \STATE buddycast\_msg\_send $\leftarrow$ \textbf{createBuddycastMsg}()
+        \STATE send buddycast\_msg\_send to $Q$
+        \STATE receive buddycast\_msg\_recv from $Q$
+        \STATE $C_C \leftarrow$ fillPeers(buddycast\_msg\_recv)
+        \STATE \textbf{addConnectedPeer}($Q$) \COMMENT{add $Q$ into $C_T$, $C_R$ or $C_U$ according to its similarity}
+        \STATE blockPeer($Q$, $B_R$, 4hours)
+    \ENDIF
+
+\ENDIF
+\ENDLOOP
+
+\end{algorithmic}
+\caption{The protocol of an active peer.}
+\label{Fig:buddycast_algorithm}
+\end{center}
+\end{figure*}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%  algorithm of the passive peer  %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{figure*}[ht]
+\begin{center}
+\begin{algorithmic}[1]
+
+\LOOP
+    \STATE receive buddycast\_msg\_recv from $Q$
+    \STATE $C_C \leftarrow$ fillPeers(buddycast\_msg\_recv)
+    \STATE \textbf{addConnectedPeer}($Q$)
+    \STATE blockPeer($Q$, $B_R$, 4hours)
+    \STATE buddycast\_msg\_send $\leftarrow$ \textbf{createBuddycastMsg}()
+    \STATE send buddycast\_msg\_send to $Q$
+    \STATE blockPeer($Q$, $B_S$, 4hours)
+    \STATE remove $Q$ from $C_C$
+    \STATE $idle\_loops \leftarrow idle\_loops + 1$ \COMMENT{idle for a loop for
+    rate control}
+\ENDLOOP
+
+\end{algorithmic}
+\caption{The protocol of an passive peer.}
+\label{Fig:buddycast_algorithm}
+\end{center}
+\end{figure*}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%  algorithm of creating a buddycast message  %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{figure*}[ht]
+\begin{center}
+function \textbf{createBuddycastMsg}()
+\begin{algorithmic}
+    \STATE $My\_Preferences \leftarrow$ the most recently 50 preferences of the active peer
+    \STATE $Taste\_Buddies \leftarrow$ all peers from $C_T$
+    \STATE $Random\_Peers \leftarrow$ all peers from $C_R$
+    \STATE $buddycast\_msg\_send \leftarrow$ create an empty message
+    \STATE $buddycast\_msg\_send$ attaches the active peer's address and $My\_Preferences$
+    \STATE $buddycast\_msg\_send$ attaches addresses of $Taste\_Buddies$
+    \STATE $buddycast\_msg\_send$ attaches at most 10 preferences of each peer in $Taste\_Buddies$
+    \STATE $buddycast\_msg\_send$ attaches addresses of $Random\_Peers$
+\end{algorithmic}
+\caption{The function of creating a buddycast message}
+\label{Fig:buddycast_createBuddycastMsg}
+\end{center}
+\end{figure*}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%  algorithm of adding a peer into C_T or C_R or C_U  %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{figure*}[ht]
+\begin{center}
+function \textbf{addConnectedPeer}($Q$)
+\begin{algorithmic}
+    \IF{$Q$ is connectable}
+        \STATE $Sim_Q \leftarrow$ getSimilarity($Q$) \COMMENT{similarity between $Q$ and the active peer}
+        \STATE $Min_{Sim} \leftarrow$ similarity of the least similar peer in $C_T$
+        \IF{$Sim_Q \geq Min_{Sim}$ \textbf{or} ($C_T$ is not full \textbf{and} $Sim_Q>0$)}
+            \STATE $C_T \leftarrow C_T + Q$
+            \STATE move the least similar peer to $C_R$ if $C_T$ overloads
+        \ELSE
+            \STATE $C_R \leftarrow C_R + Q$
+            \STATE remove the oldest peer to $C_R$ if $C_R$ overloads
+        \ENDIF
+    \ELSE
+        \STATE $C_U \leftarrow C_U + Q$
+    \ENDIF
+
+\end{algorithmic}
+\caption{The function of adding a peer into $C_T$ or $C_R$}
+\label{Fig:buddycast_addConnectedPeer}
+\end{center}
+\end{figure*}
+
+"""
+"""
+
+BuddyCast 3:
+    No preferences for taste buddies; 
+    don't accept preferences of taste buddies from incoming message either
+    50 recent my prefs + 50 recent collected torrents + 50 ratings
+    
+Torrent info 
+    preferences: Recently downloaded torrents by the user {'seeders','leechers','check time'}
+    collected torrents: Recently collected torrents (include Subscribed torrents) 
+    #ratings: Recently rated torrents and their ratings (negative rating means this torrent was deleted) 
+Taste Buddies 
+    permid 
+    ip 
+    port 
+    similarity 
+Random Peers 
+    permid 
+    ip 
+    port 
+    similarity 
+
+"""
+
+import sys
+from random import sample, randint, shuffle
+from time import time, gmtime, strftime
+from traceback import print_exc,print_stack
+from sets import Set
+from array import array
+from bisect import insort
+from copy import deepcopy
+import gc
+import socket
+
+from BaseLib.Core.simpledefs import BCCOLPOLICY_SIMPLE
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import BUDDYCAST, BARTERCAST, KEEP_ALIVE, VOTECAST, CHANNELCAST
+from BaseLib.Core.Utilities.utilities import show_permid_short, show_permid,validPermid,validIP,validPort,validInfohash,readableBuddyCastMsg, hostname_or_ip2ip
+from BaseLib.Core.Utilities.unicode import dunno2unicode
+from BaseLib.Core.simpledefs import NTFY_ACT_MEET, NTFY_ACT_RECOMMEND, NTFY_MYPREFERENCES, NTFY_INSERT, NTFY_DELETE
+from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FIRST, OLPROTO_VER_SECOND, OLPROTO_VER_THIRD, OLPROTO_VER_FOURTH, OLPROTO_VER_FIFTH, OLPROTO_VER_SIXTH, OLPROTO_VER_SEVENTH, OLPROTO_VER_EIGHTH, OLPROTO_VER_ELEVENTH , OLPROTO_VER_CURRENT, OLPROTO_VER_LOWEST
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str, str2bin
+from similarity import P2PSim_Single, P2PSim_Full, P2PSimColdStart
+from TorrentCollecting import SimpleTorrentCollecting   #, TiT4TaTTorrentCollecting
+from BaseLib.Core.Statistics.Logger import OverlayLogger
+from BaseLib.Core.Statistics.Crawler import Crawler
+
+from threading import currentThread
+
+from bartercast import BarterCastCore
+from votecast import VoteCastCore
+from channelcast import ChannelCastCore
+
+DEBUG = False   # for errors
+debug = False # for status
+debugnic = False # for my temporary outputs
+unblock = 0
+
+# Nicolas: 10 KByte -- I set this to 1024 KByte.     
+# The term_id->term dictionary can become almost arbitrarily long
+# would be strange if buddycast stopped working once a user has done a lot of searches... 
+#
+# Arno, 2009-03-06: Too big: we don't want every peer to send out 1 MB messages 
+# every 15 secs. Set to 100K
+#
+# Nicolas, 2009-03-06: Ok this was really old. 10k in fact is enough with the new constraints on clicklog data
+MAX_BUDDYCAST_LENGTH = 10*1024    
+
+REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD = 100    # speedup finding >=4.1 peers in this version
+
+# used for datahandler.peers
+PEER_SIM_POS = 0
+PEER_LASTSEEN_POS = 1
+#PEER_PREF_POS = 2 #not needed since new similarity function
+
+def now():
+    return int(time())
+
+def ctime(t):
+    return strftime("%Y-%m-%d.%H:%M:%S", gmtime(t))
+
+def validBuddyCastData(prefxchg, nmyprefs=50, nbuddies=10, npeers=10, nbuddyprefs=10, selversion=0):
+    
+    #
+    #
+    # Arno: TODO: make check version dependent
+    #
+    #
+    
+    def validPeer(peer):
+        validPermid(peer['permid'])
+        validIP(peer['ip'])
+        validPort(peer['port'])
+
+    def validHisPeer(peer):
+        validIP(peer['ip'])
+        validPort(peer['port'])
+
+    
+    def validPref(pref, num):
+        if not (isinstance(prefxchg, list) or isinstance(prefxchg, dict)):
+            raise RuntimeError, "bc: invalid pref type " + str(type(prefxchg))
+        if num > 0 and len(pref) > num:
+            raise RuntimeError, "bc: length of pref exceeds " + str((len(pref), num))
+        for p in pref:
+            validInfohash(p)
+            
+    validHisPeer(prefxchg)
+    if not (isinstance(prefxchg['name'], str)):
+        raise RuntimeError, "bc: invalid name type " + str(type(prefxchg['name']))
+    
+    # Nicolas: create a validity check that doesn't have to know about the version
+    # just found out this function is not called anymore. well if it gets called one day, it should handle both
+    prefs = prefxchg['preferences']
+    if prefs:
+        # >= OLPROTO_VER_EIGHT
+        if type(prefs[0])==list:
+            # list of lists: this is the new wire protocol. entry 0 of each list contains infohash
+            validPref([pref[0] for pref in prefs], nmyprefs)
+        else:
+            # old style
+            validPref(prefs, nmyprefs)
+    
+    if len(prefxchg['taste buddies']) > nbuddies:
+        raise RuntimeError, "bc: length of prefxchg['taste buddies'] exceeds " + \
+                str(len(prefxchg['taste buddies']))
+    for b in prefxchg['taste buddies']:
+        validPeer(b)
+        #validPref(b['preferences'], nbuddyprefs)    # not used from version 4 
+        
+    if len(prefxchg['random peers']) > npeers:
+        raise RuntimeError, "bc: length of random peers " + \
+                str(len(prefxchg['random peers']))
+    for b in prefxchg['random peers']:
+        validPeer(b)
+        
+    if 'collected torrents' in prefxchg:
+        # 'collected torrents' must contain a list with 20 byte infohashes
+        if not isinstance(prefxchg['collected torrents'], list):
+            raise RuntimeError, "bc: invalid 'collected torrents' type " + str(type(prefxchg['collected torrents']))
+        for value in prefxchg['collected torrents']:
+            if selversion >= OLPROTO_VER_ELEVENTH:
+                if not isinstance(value, list):
+                    raise RuntimeError, "bc: invalid 'collected torrents' type of list elem should be list, not " + str(type(value))
+                # infohash
+                # number of seeders
+                # number of leechers
+                # age of checking
+                # number of sources seen
+                if len(value) != 5:
+                    raise RuntimeError, "bc: invalid 'collected torrents' length of list elem should be 5"
+                infohash = value[0]
+                seeders = value[1]
+                leechers = value[2]
+                age = value[3]
+                sources = value[4]
+                if not len(infohash) == 20:
+                    raise RuntimeError, "bc: invalid infohash length " + str(len(infohash))
+            else: 
+                infohash = value
+                if not isinstance(infohash, str):
+                    raise RuntimeError, "bc: invalid infohash type " + str(type(infohash))
+                if not len(infohash) == 20:
+                    raise RuntimeError, "bc: invalid infohash length " + str(len(infohash))
+        
+    return True
+
+
+class BuddyCastFactory:
+    __single = None
+    
+    def __init__(self, superpeer=False, log=''):
+        if BuddyCastFactory.__single:
+            raise RuntimeError, "BuddyCastFactory is singleton"
+        BuddyCastFactory.__single = self 
+        self.registered = False
+        self.buddycast_core = None
+        self.buddycast_interval = 15    # MOST IMPORTANT PARAMETER
+        self.superpeer = superpeer
+        self.log = log
+        self.running = False
+        self.data_handler = None
+        self.started = False    # did call do_buddycast() at least once 
+        self.max_peers = 2500   # was 2500
+        self.ranonce = False # Nicolas: had the impression that BuddyCast can be tested more reliably if I wait until it has gone through buddycast_core.work() successfully once
+        if self.superpeer:
+            print >>sys.stderr,"bc: Starting in SuperPeer mode"
+        
+    def getInstance(*args, **kw):
+        if BuddyCastFactory.__single is None:
+            BuddyCastFactory(*args, **kw)
+        return BuddyCastFactory.__single
+    getInstance = staticmethod(getInstance)
+    
+    def register(self, overlay_bridge, launchmany, errorfunc, 
+                 metadata_handler, torrent_collecting_solution, running,
+                 max_peers=2500,amcrawler=False):
+        if self.registered:
+            return
+        self.overlay_bridge = overlay_bridge
+        self.launchmany = launchmany
+        self.metadata_handler = metadata_handler
+        self.torrent_collecting_solution = torrent_collecting_solution
+        self.errorfunc = errorfunc
+        
+        # BuddyCast is always started, but only active when this var is set.
+        self.running = bool(running)
+        self.max_peers = max_peers
+        self.amcrawler = amcrawler
+        
+        self.registered = True
+
+    def register2(self):
+        # Arno: only start using overlay thread when normal init is finished to
+        # prevent concurrencty on singletons
+        if self.registered:
+            if debug:
+                print >> sys.stderr, "bc: Register BuddyCast", currentThread().getName()
+            self.overlay_bridge.add_task(self.olthread_register, 0)
+
+    def olthread_register(self, start=True):
+        if debug:
+            print >> sys.stderr, "bc: OlThread Register", currentThread().getName()
+            
+        self.data_handler = DataHandler(self.launchmany, self.overlay_bridge, max_num_peers=self.max_peers) 
+        
+        # ARNOCOMMENT: get rid of this dnsindb / get_dns_from_peerdb abuse off SecureOverlay
+        self.bartercast_core = BarterCastCore(self.data_handler, self.overlay_bridge, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb)
+        
+        self.votecast_core = VoteCastCore(self.data_handler, self.overlay_bridge, self.launchmany.session, self.getCurrrentInterval, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb)
+        self.channelcast_core = ChannelCastCore(self.data_handler, self.overlay_bridge, self.launchmany.session, self.getCurrrentInterval, self.log, self.launchmany.secure_overlay.get_dns_from_peerdb)
+            
+        self.buddycast_core = BuddyCastCore(self.overlay_bridge, self.launchmany, 
+               self.data_handler, self.buddycast_interval, self.superpeer,
+               self.metadata_handler, self.torrent_collecting_solution, self.bartercast_core, self.votecast_core, self.channelcast_core, self.log, self.amcrawler)
+        
+        self.data_handler.register_buddycast_core(self.buddycast_core)
+        
+        if start:
+            self.start_time = now()
+            # Arno, 2007-02-28: BC is now started self.buddycast_interval after client
+            # startup. This is assumed to give enough time for UPnP to open the firewall
+            # if any. So when you change this time, make sure it allows for UPnP to
+            # do its thing, or add explicit coordination between UPnP and BC.
+            # See BitTornado/launchmany.py
+            self.overlay_bridge.add_task(self.data_handler.postInit, 0)
+            self.overlay_bridge.add_task(self.doBuddyCast, 0.1)
+            # Arno: HYPOTHESIS: if set to small, we'll only ask superpeers at clean start.
+            if self.data_handler.torrent_db.size() > 0:
+                waitt = 1.0
+            else:
+                waitt = 3.0
+            self.overlay_bridge.add_task(self.data_handler.initRemoteSearchPeers,waitt)
+            
+            #Nitin: While booting up, we try to update the channels that we are subscribed to
+            #       after 6 seconds initially and later, at every 2 hour interval
+            self.overlay_bridge.add_task(self.channelcast_core.updateMySubscribedChannels, 6)
+            
+            print >> sys.stderr, "BuddyCast starts up",waitt
+        
+    def doBuddyCast(self):
+        if not self.running:
+            return
+        
+        if debug:
+            print >>sys.stderr,"bc: doBuddyCast!", currentThread().getName()
+        
+        # Reschedule ourselves for next round
+        buddycast_interval = self.getCurrrentInterval()
+        self.overlay_bridge.add_task(self.doBuddyCast, buddycast_interval)
+        if not self.started:
+            self.started = True
+        # Do our thang.
+        self.buddycast_core.work()
+        self.ranonce = True # Nicolas: now we can start testing and stuff works better
+        
+    def pauseBuddyCast(self):
+        self.running = False
+        
+    def restartBuddyCast(self):
+        if self.registered and not self.running:
+            self.running = True
+            self.doBuddyCast()
+        
+    def getCurrrentInterval(self):
+        """
+        install [#(peers - superpeers)==0] & start < 2min: interval = 1
+        start < 30min: interval = 5
+        start > 24hour: interval = 60
+        other: interval = 15
+        """
+        
+        #return 3    ### DEBUG, remove it before release!!
+        
+        past = now() - self.start_time
+        if past < 2*60:
+            if len(self.buddycast_core.connected_connectable_peers)<10:
+                interval = 0.2                
+            elif self.data_handler.get_npeers() < 20:
+                interval = 2
+            else:
+                interval = 5
+        elif past < 30*60:
+            if len(self.buddycast_core.connected_connectable_peers)<10:
+                interval = 2
+            else:                        
+                interval = 5
+        elif past > 24*60*60:
+            interval = 60
+        else:
+            interval = 15
+        return interval
+        
+        
+    def handleMessage(self, permid, selversion, message):
+        
+        if not self.registered or not self.running:
+            if DEBUG:
+                print >> sys.stderr, "bc: handleMessage got message, but we're not enabled or running"
+            return False
+        
+        t = message[0]
+        
+        if t == BUDDYCAST:
+            return self.gotBuddyCastMessage(message[1:], permid, selversion)
+        elif t == KEEP_ALIVE:
+            if message[1:] == '':
+                return self.gotKeepAliveMessage(permid)
+            else:
+                return False
+            
+        elif t == VOTECAST:
+            if DEBUG:
+                print >> sys.stderr, "bc: Received votecast message"
+            if self.votecast_core != None:
+                return self.votecast_core.gotVoteCastMessage(message[1:], permid, selversion)
+        elif t == CHANNELCAST:
+            if DEBUG:
+                print >> sys.stderr, "bc: Received channelcast message"
+            if self.channelcast_core != None:
+                return self.channelcast_core.gotChannelCastMessage(message[1:], permid, selversion)           
+                
+        elif t == BARTERCAST:
+            if DEBUG:
+                print >> sys.stderr, "bc: Received bartercast message"
+            if self.bartercast_core != None:
+                return self.bartercast_core.gotBarterCastMessage(message[1:], permid, selversion)
+            
+        else:
+            if DEBUG:
+                print >> sys.stderr, "bc: wrong message to buddycast", ord(t), "Round", self.buddycast_core.round
+            return False
+        
+    def gotBuddyCastMessage(self, msg, permid, selversion):
+        if self.registered and self.running:
+            return self.buddycast_core.gotBuddyCastMessage(msg, permid, selversion)
+        else:
+            return False
+    
+    def gotKeepAliveMessage(self, permid):
+        if self.registered and self.running:
+            return self.buddycast_core.gotKeepAliveMessage(permid)
+        else:
+            return False
+    
+    def handleConnection(self,exc,permid,selversion,locally_initiated):
+        
+        if DEBUG:
+            print >> sys.stderr, "bc: handleConnection",exc,show_permid_short(permid),selversion,locally_initiated,currentThread().getName()
+
+        if not self.registered:
+            return
+            
+        if DEBUG:
+            nconn = 0
+            conns = self.buddycast_core.connections
+            print >> sys.stderr, "\nbc: conn in buddycast", len(conns)
+            for peer_permid in conns:
+                _permid = show_permid_short(peer_permid)
+                nconn += 1
+                print >> sys.stderr, "bc: ", nconn, _permid, conns[peer_permid]
+                
+        if self.running or exc is not None:    # if not running, only close connection
+            self.buddycast_core.handleConnection(exc,permid,selversion,locally_initiated)
+            
+    def addMyPref(self, torrent):
+        """ Called by OverlayThread (as should be everything) """
+        if self.registered:
+            self.data_handler.addMyPref(torrent)
+        
+    def delMyPref(self, torrent):
+        if self.registered:
+            self.data_handler.delMyPref(torrent)
+
+        
+    
+class BuddyCastCore:
+     
+    TESTASSERVER = False # for unit testing
+    
+    def __init__(self, overlay_bridge, launchmany, data_handler, 
+                 buddycast_interval, superpeer, 
+                 metadata_handler, torrent_collecting_solution, bartercast_core, votecast_core, channelcast_core, log=None, amcrawler=False):
+        self.overlay_bridge = overlay_bridge
+        self.launchmany = launchmany
+        self.data_handler = data_handler
+        self.buddycast_interval = buddycast_interval
+        self.superpeer = superpeer
+        #print_stack()
+        #print >> sys.stderr, 'debug buddycast'
+        #superpeer    # change it for superpeers
+        #self.superpeer_set = Set(self.data_handler.getSuperPeers())
+        self.log = log
+        self.dialback = DialbackMsgHandler.getInstance()
+
+        self.ip = self.data_handler.getMyIp()
+        self.port = self.data_handler.getMyPort()
+        self.permid = self.data_handler.getMyPermid()
+        self.nameutf8 = self.data_handler.getMyName().encode("UTF-8")
+        
+        # --- parameters ---
+        #self.timeout = 5*60
+        self.block_interval = 4*60*60   # block interval for a peer to buddycast
+        self.short_block_interval = 4*60*60    # block interval if failed to connect the peer
+        self.num_myprefs = 50       # num of my preferences in buddycast msg 
+        self.max_collected_torrents = 50    # num of recently collected torrents (from BuddyCast 3)
+        self.num_tbs = 10           # num of taste buddies in buddycast msg 
+        self.num_tb_prefs = 10      # num of taset buddy's preferences in buddycast msg 
+        self.num_rps = 10           # num of random peers in buddycast msg  
+        # time to check connection and send keep alive message
+        #self.check_connection_round = max(1, 120/self.buddycast_interval)    
+        self.max_conn_cand = 100 # max number of connection candidates
+        self.max_conn_tb = 10    # max number of connectable taste buddies
+        self.max_conn_rp = 10    # max number of connectable random peers
+        self.max_conn_up = 10    # max number of unconnectable peers
+        self.bootstrap_num = 10   # max number of peers to fill when bootstrapping
+        self.bootstrap_interval = 5*60    # 5 min
+        self.network_delay = self.buddycast_interval*2    # 30 seconds
+        self.check_period = 120    # how many seconds to send keep alive message and check updates
+        self.num_search_cand = 10 # max number of remote search peer candidates
+        self.num_remote_peers_in_msg = 2 # number of remote search peers in msg
+        
+        # --- memory ---
+        self.send_block_list = {}           # permid:unlock_time 
+        self.recv_block_list = {}
+        self.connections = {}               # permid: overlay_version
+        self.connected_taste_buddies = []   # [permid]
+        self.connected_random_peers = []    # [permid]
+        self.connected_connectable_peers = {}    # permid: {'connect_time', 'ip', 'port', 'similarity', 'oversion', 'num_torrents'} 
+        self.connected_unconnectable_peers = {}    # permid: connect_time
+        self.connection_candidates = {}     # permid: last_seen
+        self.remote_search_peer_candidates = []    # [last_seen,permid,selversion], sorted, the first one in the list is the oldest one
+        
+        # --- stats ---
+        self.target_type = 0
+        self.next_initiate = 0
+        self.round = 0     # every call to work() is a round
+        self.bootstrapped = False    # bootstrap once every 1 hours
+        self.bootstrap_time = 0  # number of times to bootstrap
+        self.total_bootstrapped_time = 0
+        self.last_bootstrapped = now()    # bootstrap time of the last time
+        self.start_time = now()
+        self.last_check_time = 0
+        
+        # --- dependent modules ---
+        self.metadata_handler = metadata_handler
+        self.torrent_collecting = None
+        if torrent_collecting_solution == BCCOLPOLICY_SIMPLE:
+            self.torrent_collecting = SimpleTorrentCollecting(metadata_handler, data_handler)
+
+        # -- misc ---
+        self.dnsindb = launchmany.secure_overlay.get_dns_from_peerdb
+        if self.log:
+            self.overlay_log = OverlayLogger.getInstance(self.log)
+            
+        # Bartercast
+        self.bartercast_core = bartercast_core
+        #self.bartercast_core.buddycast_core = self    
+
+        self.votecast_core = votecast_core
+        self.channelcast_core = channelcast_core
+
+        # Crawler
+        self.amcrawler = amcrawler
+        
+                            
+    def get_peer_info(self, target_permid, include_permid=True):
+        
+        if not target_permid:
+            return ' None '
+        dns = self.dnsindb(target_permid)
+        if not dns:
+            return ' None '
+        try:
+            ip = dns[0]
+            port = dns[1]
+            sim = self.data_handler.getPeerSim(target_permid)
+            if include_permid:
+                s_pid = show_permid_short(target_permid)
+                return ' %s %s:%s %.3f ' % (s_pid, ip, port, sim)
+            else:
+                return ' %s:%s %.3f' % (ip, port, sim)
+        except:
+            return ' ' + repr(dns) + ' '
+        
+    def work(self):
+        """
+            The worker of buddycast epidemic protocol.
+            In every round, it selects a target and initates a buddycast exchange,
+            or idles due to replying messages in the last rounds.
+        """
+        
+        try:
+            self.round += 1
+            if DEBUG:
+                print >> sys.stderr, 'bc: Initiate exchange'
+            self.print_debug_info('Active', 2)
+            if self.log:
+                nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu = self.get_stats()
+                self.overlay_log('BUCA_STA', self.round, (nPeer,nPref,nCc), (nBs,nBr), (nSO,nCo), (nCt,nCr,nCu))
+        
+            self.print_debug_info('Active', 3)
+            #print >> sys.stderr, 'bc: ************ working buddycast 2'
+            self.updateSendBlockList()
+            
+            _now = now()
+            if _now - self.last_check_time >= self.check_period:
+                self.print_debug_info('Active', 4)
+                self.keepConnections()
+                #self.data_handler.checkUpdate()
+                gc.collect()
+                self.last_check_time = _now
+            
+            if self.next_initiate > 0:
+                # It replied some meesages in the last rounds, so it doesn't initiate Buddycast
+                self.print_debug_info('Active', 6)
+                self.next_initiate -= 1
+            else:
+                if len(self.connection_candidates) == 0:
+                    self.booted = self._bootstrap(self.bootstrap_num)
+                    self.print_debug_info('Active', 9)
+        
+                # It didn't reply any message in the last rounds, so it can initiate BuddyCast
+                if len(self.connection_candidates) > 0:
+                    r, target_permid = self.selectTarget()
+                    self.print_debug_info('Active', 11, target_permid, r=r)
+                    self.startBuddyCast(target_permid)
+                
+            if debug:
+                print
+        except:
+            print_exc()
+        
+     # -------------- bootstrap -------------- #
+    def _bootstrap(self, number):
+        """ Select a number of peers from recent online peers which are not
+            in send_block_list to fill connection_candidates.
+            When to call this function is an issue to study.
+        """
+        
+        _now = now()
+        # bootstrapped recently, so wait for a while
+        if self.bootstrapped and _now - self.last_bootstrapped < self.bootstrap_interval:
+            self.bootstrap_time = 0    # let it read the most recent peers next time
+            return -1
+        
+        #ARNODB: self.data_handler.peers is a map from peer_id to something, i.e., not
+        # permid. send_block_list is a list of permids
+        send_block_list_ids = []
+        for permid in self.send_block_list:
+            peer_id = self.data_handler.getPeerID(permid)
+            send_block_list_ids.append(peer_id)
+        
+        target_cands_ids = Set(self.data_handler.peers) - Set(send_block_list_ids)
+        recent_peers_ids = self.selectRecentPeers(target_cands_ids, number, 
+                                              startfrom=self.bootstrap_time*number)
+        
+        for peer_id in recent_peers_ids:
+            last_seen = self.data_handler.getPeerIDLastSeen(peer_id)
+            self.addConnCandidate(self.data_handler.getPeerPermid(peer_id), last_seen)
+        self.limitConnCandidate()
+        
+        self.bootstrap_time += 1
+        self.total_bootstrapped_time += 1
+        self.last_bootstrapped = _now
+        if len(self.connection_candidates) < self.bootstrap_num:
+            self.bootstrapped = True    # don't reboot until self.bootstrap_interval later
+        else:  
+            self.bootstrapped = False    # reset it to allow read more peers if needed
+        return 1
+
+    def selectRecentPeers(self, cand_ids, number, startfrom=0):
+        """ select a number of most recently online peers
+        @return a list of peer_ids
+        """
+        
+        if not cand_ids:
+            return []
+        peerids = []
+        last_seens = []
+        for peer_id in cand_ids:
+            peerids.append(peer_id)
+            last_seens.append(self.data_handler.getPeerIDLastSeen(peer_id))
+        npeers = len(peerids)
+        if npeers == 0:
+            return []
+        aux = zip(last_seens, peerids)
+        aux.sort()
+        aux.reverse()
+        peers = []
+        i = 0
+        
+        # roll back when startfrom is bigger than npeers
+        startfrom = startfrom % npeers    
+        endat = startfrom + number
+        for _, peerid in aux[startfrom:endat]:
+            peers.append(peerid)
+        return peers
+            
+    def addConnCandidate(self, peer_permid, last_seen):
+        """ add a peer to connection_candidates, and only keep a number of
+            the most fresh peers inside.
+        """
+        
+        if self.isBlocked(peer_permid, self.send_block_list) or peer_permid == self.permid:
+            return
+        self.connection_candidates[peer_permid] = last_seen
+        
+    def limitConnCandidate(self):
+        if len(self.connection_candidates) > self.max_conn_cand:
+            tmp_list = zip(self.connection_candidates.values(),self.connection_candidates.keys())
+            tmp_list.sort()
+            while len(self.connection_candidates) > self.max_conn_cand:
+                ls,peer_permid = tmp_list.pop(0)
+                self.removeConnCandidate(peer_permid)
+        
+    def removeConnCandidate(self, peer_permid):
+        if peer_permid in self.connection_candidates:
+            self.connection_candidates.pop(peer_permid)
+        
+    # -------------- routines in each round -------------- #
+    def updateSendBlockList(self):
+        """ Remove expired peers in send block list """
+        
+        _now = now()
+        for p in self.send_block_list.keys():    # don't call isBlocked() for performance reason
+            if _now >= self.send_block_list[p] - self.network_delay:
+                if debug:
+                    print >>sys.stderr,"bc: *** unblock peer in send block list" + self.get_peer_info(p) + \
+                        "expiration:", ctime(self.send_block_list[p])
+                self.send_block_list.pop(p)
+                    
+    def keepConnections(self):
+        """ Close expired connections, and extend the expiration of 
+            peers in connection lists
+        """
+
+        timeout_list = []
+        for peer_permid in self.connections:
+            # we don't close connection here, because if no incoming msg,
+            # sockethandler will close connection in 5-6 min.
+            
+            if (peer_permid in self.connected_connectable_peers or \
+                 peer_permid in self.connected_unconnectable_peers):   
+                timeout_list.append(peer_permid)
+
+        # 04/08/10 boudewijn: a crawler can no longer disconnect.
+        # Staying connected means that the crawler is returned in
+        # buddycast messages, otherwise not.
+        for peer_permid in timeout_list:
+            self.sendKeepAliveMsg(peer_permid)
+                
+    def sendKeepAliveMsg(self, peer_permid):
+        """ Send keep alive message to a peer, and extend its expiration """
+        
+        if self.isConnected(peer_permid):
+            overlay_protocol_version = self.connections[peer_permid]
+            if overlay_protocol_version >= OLPROTO_VER_THIRD:
+                # From this version, support KEEP_ALIVE message in secure overlay
+                keepalive_msg = ''
+                self.overlay_bridge.send(peer_permid, KEEP_ALIVE+keepalive_msg, 
+                                     self.keepaliveSendCallback)
+            if debug:
+                print >>sys.stderr,"bc: *** Send keep alive to peer", self.get_peer_info(peer_permid),  \
+                    "overlay version", overlay_protocol_version
+        
+    def isConnected(self, peer_permid):
+        return peer_permid in self.connections
+    
+    def keepaliveSendCallback(self, exc, peer_permid, other=0):        
+        if exc is None:
+            pass
+        else:
+            if debug:
+                print >> sys.stderr, "bc: error - send keep alive msg", exc, \
+                self.get_peer_info(peer_permid), "Round", self.round
+            self.closeConnection(peer_permid, 'keepalive:'+str(exc))
+        
+    def gotKeepAliveMessage(self, peer_permid):
+        if self.isConnected(peer_permid):
+            if debug:
+                print >> sys.stderr, "bc: Got keep alive from", self.get_peer_info(peer_permid)
+            # 04/08/10 boudewijn: a crawler can no longer disconnect.
+            # Staying connected means that the crawler is returned in
+            # buddycast messages, otherwise not.
+            return True
+        else:
+            if DEBUG:
+                print >> sys.stderr, "bc: error - got keep alive from a not connected peer. Round", \
+                    self.round
+            return False
+        
+    # -------------- initiate buddycast, active thread -------------- #
+    # ------ select buddycast target ------ #
+    def selectTarget(self):
+        """ select a most similar taste buddy or a most likely online random peer 
+            from connection candidates list by 50/50 chance to initate buddycast exchange.
+        """
+        
+        def selectTBTarget():
+            # Select the most similar taste buddy 
+            max_sim = (-1, None)
+            for permid in self.connection_candidates:
+                peer_id = self.data_handler.getPeerID(permid)
+                if peer_id:
+                    sim = self.data_handler.getPeerSim(permid)
+                    max_sim = max(max_sim, (sim, permid))
+            selected_permid = max_sim[1]
+            if selected_permid is None:
+                return None
+            else:
+                return selected_permid
+            
+        def selectRPTarget():
+            # Randomly select a random peer 
+            selected_permid = None
+            while len(self.connection_candidates) > 0:
+                selected_permid = sample(self.connection_candidates, 1)[0]
+                selected_peer_id = self.data_handler.getPeerID(selected_permid)
+                if selected_peer_id is None:
+                    self.removeConnCandidate(selected_permid)
+                    selected_permid = None
+                elif selected_peer_id:
+                    break
+                
+            return selected_permid
+    
+        self.target_type = 1 - self.target_type
+        if self.target_type == 0:  # select a taste buddy
+            target_permid = selectTBTarget()
+        else:       # select a random peer
+            target_permid = selectRPTarget()
+
+        return self.target_type, target_permid
+    
+    # ------ start buddycast exchange ------ #
+    def startBuddyCast(self, target_permid):
+        """ Connect to a peer, create a buddycast message and send it """
+        
+        if not target_permid or target_permid == self.permid:
+            return
+        
+        if not self.isBlocked(target_permid, self.send_block_list):
+            if debug:
+                print >> sys.stderr, 'bc: connect a peer', show_permid_short(target_permid), currentThread().getName()
+            self.overlay_bridge.connect(target_permid, self.buddycastConnectCallback)
+                        
+            self.print_debug_info('Active', 12, target_permid)
+            if self.log:
+                dns = self.dnsindb(target_permid)
+                if dns:
+                    ip,port = dns
+                    self.overlay_log('CONN_TRY', ip, port, show_permid(target_permid))
+            
+            # always block the target for a while not matter succeeded or not
+            #self.blockPeer(target_permid, self.send_block_list, self.short_block_interval)
+            self.print_debug_info('Active', 13, target_permid)
+
+            # remove it from candidates no matter if it has been connected
+            self.removeConnCandidate(target_permid)
+            self.print_debug_info('Active', 14, target_permid)
+
+        else:
+            if DEBUG:
+                print >> sys.stderr, 'buddycast: peer', self.get_peer_info(target_permid), \
+                    'is blocked while starting buddycast to it.', "Round", self.round
+        
+    def buddycastConnectCallback(self, exc, dns, target_permid, selversion):
+        if exc is None:
+            self.addConnection(target_permid, selversion, True)
+
+            ## Create message depending on selected protocol version
+            try:
+                # 04/08/10 boudewijn: the self.isConnected check fails
+                # in certain threading conditions, namely when the
+                # callback to self.buddycastConnectCallback is made
+                # before the callback to self.handleConnection where
+                # the peer is put in the connection list.  However,
+                # since self.buddycastConnectCallback already
+                # indicates a successfull connection, this check is
+                # not needed.
+                # if not self.isConnected(target_permid):
+                #     if debug:
+                #         raise RuntimeError, 'buddycast: not connected while calling connect_callback'
+                #     return
+                
+                self.print_debug_info('Active', 15, target_permid, selversion)
+                        
+                self.createAndSendBuddyCastMessage(target_permid, selversion, active=True)
+
+            except:
+                print_exc()
+                print >> sys.stderr, "bc: error in reply buddycast msg",\
+                    exc, dns, show_permid_short(target_permid), selversion, "Round", self.round, 
+
+        else:
+            if debug:
+                print >> sys.stderr, "bc: warning - connecting to",\
+                    show_permid_short(target_permid),exc,dns, ctime(now())
+                    
+    def createAndSendBuddyCastMessage(self, target_permid, selversion, active):
+        
+        #print >>sys.stderr,"bc: SENDING BC to",show_permid_short(target_permid)
+        #target_permid ="""MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGbSaE3xVUvdMYGkj+x/mE24f/f4ZId7kNPVkALbAa2bQNjCKRDSPt+oE1nzr7It/CfxvCTK+sjOYAjr""" 
+        
+        #selversion = 12 # for test
+        
+        buddycast_data = self.createBuddyCastMessage(target_permid, selversion)
+        if debug:
+            print >> sys.stderr, "bc: createAndSendBuddyCastMessage", len(buddycast_data), currentThread().getName()
+        try:
+            buddycast_data['permid'] = self.permid
+            #validBuddyCastData(buddycast_data, self.num_myprefs, 
+            #                       self.num_tbs, self.num_rps, self.num_tb_prefs)
+            buddycast_data.pop('permid')
+            buddycast_msg = bencode(buddycast_data)
+        except:
+            print_exc()
+            print >> sys.stderr, "error buddycast_data:", buddycast_data
+            return
+            
+        if active:
+            self.print_debug_info('Active', 16, target_permid)
+        else:
+            self.print_debug_info('Passive', 6, target_permid)
+            
+        self.overlay_bridge.send(target_permid, BUDDYCAST+buddycast_msg, self.buddycastSendCallback)
+        self.blockPeer(target_permid, self.send_block_list, self.short_block_interval)
+        self.removeConnCandidate(target_permid)        
+        
+        if debug:
+            print >> sys.stderr, '****************--------------'*2
+            print >> sys.stderr, 'sent buddycast message to', show_permid_short(target_permid), len(buddycast_msg)
+        
+        if active:
+            self.print_debug_info('Active', 17, target_permid)
+        else:
+            self.print_debug_info('Passive', 7, target_permid)
+        
+        # Bartercast
+        if self.bartercast_core != None and active:
+            try:
+                self.bartercast_core.createAndSendBarterCastMessage(target_permid, selversion, active)
+            except:
+                print_exc()
+            
+        # As of March 5, 2009, VoteCast Messages are sent in lock-step with BuddyCast.
+        # (only if there are any votes to send.)
+        # Update (July 24, 2009): ChannelCast is used in place of ModerationCast
+       
+        if self.votecast_core != None:
+            try:
+                self.votecast_core.createAndSendVoteCastMessage(target_permid, selversion)
+            except:
+                print_exc()
+                
+
+        if self.channelcast_core != None:
+            try:
+                self.channelcast_core.createAndSendChannelCastMessage(target_permid, selversion)
+            except:
+                print_exc()
+            
+        if self.log:
+            dns = self.dnsindb(target_permid)
+            if dns:
+                ip,port = dns
+                if active:
+                    MSG_ID = 'ACTIVE_BC'
+                else:
+                    MSG_ID = 'PASSIVE_BC'
+                msg = repr(readableBuddyCastMsg(buddycast_data,selversion))    # from utilities
+                self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg)
+                
+        #print >>sys.stderr,"bc: Created BC",`buddycast_data`
+                
+        return buddycast_data # Nicolas: for testing
+                
+    def createBuddyCastMessage(self, target_permid, selversion, target_ip=None, target_port=None):
+        """ Create a buddycast message for a target peer on selected protocol version """
+        # Nicolas: added manual target_ip, target_port parameters for testing
+        ## Test 
+        try:
+            target_ip,target_port = self.dnsindb(target_permid)    
+        except:
+            if not self.TESTASSERVER:
+                raise # allow manual ips during unit-testing if dnsindb fails
+        if not target_ip or not target_port:
+            return {}
+        
+        my_pref = self.data_handler.getMyLivePreferences(selversion, self.num_myprefs)       #[pref]
+        
+        if debug:
+            print >> sys.stderr, " bc:Amended preference list is:", str(my_pref)
+            
+        taste_buddies = self.getTasteBuddies(self.num_tbs, self.num_tb_prefs, target_permid, target_ip, target_port, selversion)
+        random_peers = self.getRandomPeers(self.num_rps, target_permid, target_ip, target_port, selversion)    #{peer:last_seen}
+        buddycast_data = {'ip':self.ip,
+                         'port':self.port,
+                         'name':self.nameutf8,
+                         'preferences':my_pref,
+                         'taste buddies':taste_buddies, 
+                         'random peers':random_peers}
+        
+        if selversion >= OLPROTO_VER_THIRD:
+            # From this version, add 'connectable' entry in buddycast message
+            connectable = self.isConnectable()
+            buddycast_data['connectable'] = connectable
+        
+        if selversion >= OLPROTO_VER_FOURTH:
+            recent_collect = self.metadata_handler.getRecentlyCollectedTorrents(self.max_collected_torrents, selversion)
+                            
+            buddycast_data['collected torrents'] = recent_collect
+        
+        if selversion >= OLPROTO_VER_SIXTH:
+            npeers = self.data_handler.get_npeers()
+            ntorrents = self.data_handler.get_ntorrents()
+            nmyprefs = self.data_handler.get_nmyprefs()
+            buddycast_data['npeers'] = npeers
+            buddycast_data['nfiles'] = ntorrents
+            buddycast_data['ndls'] = nmyprefs
+            
+            
+        return buddycast_data
+
+    def getTasteBuddies(self, ntbs, ntbprefs, target_permid, target_ip, target_port, selversion):
+        """ Randomly select a number of peers from connected_taste_buddies. """
+        
+        if not self.connected_taste_buddies:
+            return []
+        tb_list = self.connected_taste_buddies[:]
+        if target_permid in tb_list:
+            tb_list.remove(target_permid)
+
+        peers = []
+        for permid in tb_list:    
+            # keys = ('ip', 'port', 'oversion', 'num_torrents')
+            peer = deepcopy(self.connected_connectable_peers[permid])
+            if peer['ip'] == target_ip and peer['port'] == target_port:
+                continue
+            peer['similarity'] = self.data_handler.getPeerSim(permid)
+            peer['permid'] = permid
+            # Arno, 2010-01-28: St*pid Unicode handling causes IP addresses to be Unicode, fix.
+            peer['ip'] = str(peer['ip'])
+            peers.append(peer)
+        
+#        peers = self.data_handler.getPeers(tb_list, ['permid', 'ip', 'port', 'similarity', 'oversion', 'num_torrents'])
+#        # filter peers with the same ip and port
+#        peers = filter(lambda p:p['ip']!=target_ip or int(p['port'])!=target_port, peers)
+#        
+#        for i in range(len(peers)):
+#            peers[i]['port'] = int(peers[i]['port'])
+            
+        # In overlay version 2, buddycast has 'age' field
+        if selversion <= OLPROTO_VER_SECOND:
+            for i in range(len(peers)):
+                peers[i]['age'] = 0
+            
+        # In overlay version 2 and 3, buddycast doesn't have similarity field, and taste buddy has preferences
+        if selversion <= OLPROTO_VER_THIRD:
+            for i in range(len(peers)):
+                peers[i].pop('similarity')
+                peers[i]['preferences'] = []    # don't support from now on
+        
+        # From overlay version 4, buddycast includes similarity for peers
+        if selversion >= OLPROTO_VER_FOURTH:
+            for i in range(len(peers)):
+                peers[i]['similarity'] = int(peers[i]['similarity']+0.5)    # bencode doesn't accept float type
+        
+
+
+        # Every peer >= 6 in message attachs nfiles and oversion for remote search from version 6
+        for i in range(len(peers)):
+            oversion = peers[i].pop('oversion')
+            nfiles = peers[i].pop('num_torrents')
+            if selversion >= OLPROTO_VER_SIXTH and oversion >= OLPROTO_VER_SIXTH and nfiles >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD:
+                peers[i]['oversion'] = oversion
+                # ascribe it to the inconsistent name of the same concept in msg and db
+                peers[i]['nfiles'] = nfiles
+        
+        return peers
+    
+    def getRandomPeers(self, nrps, target_permid, target_ip, target_port, selversion):
+        """ Randomly select a number of peers from connected_random_peers. """
+        
+        if not self.connected_random_peers:
+            return []
+        rp_list = self.connected_random_peers[:]
+        
+        # From version 6, two (might be offline) remote-search-peers must be included in msg
+        if selversion >= OLPROTO_VER_SIXTH:
+            remote_search_peers = self.getRemoteSearchPeers(self.num_remote_peers_in_msg)
+            rp_list += remote_search_peers
+            if len(rp_list) > nrps:
+                rp_list = sample(rp_list, nrps)
+            
+        if target_permid in rp_list:
+            rp_list.remove(target_permid)
+        
+        peers = []
+        if DEBUG:
+            print >> sys.stderr, 'bc: ******** rplist nconn', len(rp_list), len(self.connected_connectable_peers)
+        #print >> sys.stderr, rp_list, self.connected_connectable_peers
+        for permid in rp_list:    
+            # keys = ('ip', 'port', 'oversion', 'num_torrents')
+            #print >> sys.stderr, '**************', `self.connected_connectable_peers`, `rp_list`
+            # TODO: Fix this bug: not consisitent
+            if permid not in self.connected_connectable_peers:
+                continue
+            peer = deepcopy(self.connected_connectable_peers[permid])
+            if peer['ip'] == target_ip and peer['port'] == target_port:
+                continue
+            peer['similarity'] = self.data_handler.getPeerSim(permid)
+            peer['permid'] = permid
+            # Arno, 2010-01-28: St*pid Unicode handling causes IP addresses to be Unicode, fix.
+            peer['ip'] = str(peer['ip'])
+            peers.append(peer)
+            
+#        peers = self.data_handler.getPeers(rp_list, ['permid', 'ip', 'port', 'similarity', 'oversion', 'num_torrents'])
+#        peers = filter(lambda p:p['ip']!=target_ip or int(p['port'])!=target_port, peers)
+#        
+#        for i in range(len(peers)):
+#            peers[i]['port'] = int(peers[i]['port'])
+            
+        if selversion <= OLPROTO_VER_SECOND:    
+            for i in range(len(peers)):
+                peers[i]['age'] = 0
+                
+        # random peer also attachs similarity from 4
+        if selversion <= OLPROTO_VER_THIRD:
+            for i in range(len(peers)):
+                peers[i].pop('similarity')
+
+        if selversion >= OLPROTO_VER_FOURTH:
+            for i in range(len(peers)):
+                old_sim = peers[i]['similarity']
+                if old_sim is None:
+                    old_sim = 0.0
+                peers[i]['similarity'] = int(old_sim+0.5)
+        
+        # Every peer >= 6 in message attachs nfiles and oversion for remote search from version 6
+        for i in range(len(peers)):
+            oversion = peers[i].pop('oversion')
+            nfiles = peers[i].pop('num_torrents')
+            # only include remote-search-peers
+            if selversion >= OLPROTO_VER_SIXTH and oversion >= OLPROTO_VER_SIXTH and nfiles >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD:
+                peers[i]['oversion'] = oversion
+                # ascribe it to the inconsistent name of the same concept in msg and db
+                peers[i]['nfiles'] = nfiles
+  
+        return peers       
+    
+    def isConnectable(self):
+        return bool(self.dialback.isConnectable())
+
+    def buddycastSendCallback(self, exc, target_permid, other=0):
+        if exc is None:
+            if debug:
+                print >>sys.stderr,"bc: *** msg was sent successfully to peer", \
+                    self.get_peer_info(target_permid)
+        else:
+            if debug:
+                print >>sys.stderr,"bc: *** warning - error in sending msg to",\
+                        self.get_peer_info(target_permid), exc
+            self.closeConnection(target_permid, 'buddycast:'+str(exc))
+            
+    def blockPeer(self, peer_permid, block_list, block_interval=None):
+        """ Add a peer to a block list """
+        
+        peer_id = peer_permid # ARNODB: confusing!
+        if block_interval is None:
+            block_interval = self.block_interval
+        unblock_time = now() + block_interval
+        block_list[peer_id] = unblock_time
+        
+    
+        
+    def isBlocked(self, peer_permid, block_list):
+        if self.TESTASSERVER:
+            return False # we do not want to be blocked when sending various messages
+
+        peer_id = peer_permid
+        if peer_id not in block_list:
+            return False
+             
+        unblock_time = block_list[peer_id]
+        if now() >= unblock_time - self.network_delay:    # 30 seconds for network delay
+            block_list.pop(peer_id)
+            return False
+        return True
+    
+        
+            
+    # ------ receive a buddycast message, for both active and passive thread ------ #
+    def gotBuddyCastMessage(self, recv_msg, sender_permid, selversion):
+        """ Received a buddycast message and handle it. Reply if needed """
+        
+        if debug:
+            print >> sys.stderr, "bc: got and handle buddycast msg", currentThread().getName()
+        
+        if not sender_permid or sender_permid == self.permid:
+            print >> sys.stderr, "bc: error - got BuddyCastMsg from a None peer", \
+                        sender_permid, recv_msg, "Round", self.round
+            return False
+        
+        blocked = self.isBlocked(sender_permid, self.recv_block_list)
+
+        if blocked:
+            if DEBUG:
+                print >> sys.stderr, "bc: warning - got BuddyCastMsg from a recv blocked peer", \
+                        show_permid(sender_permid), "Round", self.round
+            return True     # allow the connection to be kept. That peer may have restarted in 4 hours
+        
+        # Jie: Because buddycast message is implemented as a dictionary, anybody can 
+        # insert any content in the message. It isn't secure if someone puts 
+        # some fake contents inside and make the message very large. The same 
+        # secure issue could happen in other protocols over the secure overlay layer. 
+        # Therefore, I'd like to set a limitation of the length of buddycast message. 
+        # The receiver should close the connection if the length of the message 
+        # exceeds the limitation. According to my experience, the biggest 
+        # buddycast message should be around 6~7KBytes. So the reasonable 
+        # length limitation might be 10KB for buddycast message. 
+        if MAX_BUDDYCAST_LENGTH > 0 and len(recv_msg) > MAX_BUDDYCAST_LENGTH:
+            print >> sys.stderr, "bc: warning - got large BuddyCastMsg", len(recv_msg), "Round", self.round
+            return False
+
+        active = self.isBlocked(sender_permid, self.send_block_list)
+        
+        if active:
+            self.print_debug_info('Active', 18, sender_permid)
+        else:
+            self.print_debug_info('Passive', 2, sender_permid)
+        
+        buddycast_data = {}
+        try:    
+            try:
+                buddycast_data = bdecode(recv_msg) 
+            except ValueError, msg:
+                try:
+                    errmsg = str(msg)
+                except:
+                    errmsg = repr(msg)
+                if DEBUG:
+                    print >> sys.stderr, "bc: warning, got invalid BuddyCastMsg:", errmsg, \
+                    "Round", self.round   # ipv6
+                return False            
+            buddycast_data.update({'permid':sender_permid})
+
+            try:    # check buddycast message
+                validBuddyCastData(buddycast_data, 0, 
+                                   self.num_tbs, self.num_rps, self.num_tb_prefs, selversion)    # RCP 2            
+            except RuntimeError, msg:
+                try:
+                    errmsg = str(msg)
+                except:
+                    errmsg = repr(msg)
+                if DEBUG:
+                    dns = self.dnsindb(sender_permid)
+                    print >> sys.stderr, "bc: warning, got invalid BuddyCastMsg:", errmsg, "From", dns, "Round", self.round   # ipv6
+
+                return False
+           
+            # update sender's ip and port in buddycast
+            dns = self.dnsindb(sender_permid)
+            if dns != None:
+                sender_ip = dns[0]
+                sender_port = dns[1]
+                buddycast_data.update({'ip':sender_ip})
+                buddycast_data.update({'port':sender_port})
+            
+            if self.log:
+                if active:
+                    MSG_ID = 'ACTIVE_BC'
+                else:
+                    MSG_ID = 'PASSIVE_BC'
+                msg = repr(readableBuddyCastMsg(buddycast_data,selversion))    # from utilities
+                self.overlay_log('RECV_MSG', sender_ip, sender_port, show_permid(sender_permid), selversion, MSG_ID, msg)
+            
+            # store discovered peers/preferences/torrents to cache and db
+            conn = buddycast_data.get('connectable', 0)    # 0 - unknown
+            
+            self.handleBuddyCastMessage(sender_permid, buddycast_data, selversion)
+            if active:
+                conn = 1
+            
+            if active:
+                self.print_debug_info('Active', 19, sender_permid)
+            else:
+                self.print_debug_info('Passive', 3, sender_permid)
+            
+            # update sender and other peers in connection list
+            addto = self.addPeerToConnList(sender_permid, conn)
+            
+            if active:
+                self.print_debug_info('Active', 20, sender_permid)
+            else:
+                self.print_debug_info('Passive', 4, sender_permid)
+            
+        except Exception, msg:
+            print_exc()
+            raise Exception, msg
+            return True    # don't close connection, maybe my problem in handleBuddyCastMessage
+        
+        self.blockPeer(sender_permid, self.recv_block_list)
+        
+        # update torrent collecting module
+        #self.data_handler.checkUpdate()
+        collectedtorrents = buddycast_data.get('collected torrents', [])
+        if selversion >= OLPROTO_VER_ELEVENTH:
+            collected_infohashes = [] 
+            for value in collectedtorrents:
+                infohash = value['infohash']
+                collected_infohashes.append(infohash)
+        else: 
+            collected_infohashes = collectedtorrents
+            
+        if self.torrent_collecting and not self.superpeer:
+            collected_infohashes += self.getPreferenceHashes(buddycast_data)  
+            self.torrent_collecting.trigger(sender_permid, selversion, collected_infohashes)
+        
+        if active:
+            self.print_debug_info('Active', 21, sender_permid)
+        else:
+            self.print_debug_info('Passive', 5, sender_permid)
+                
+        if not active:
+            self.replyBuddyCast(sender_permid, selversion)    
+
+        # show activity
+        buf = dunno2unicode('"'+buddycast_data['name']+'"')
+        self.launchmany.set_activity(NTFY_ACT_RECOMMEND, buf)
+        
+        if DEBUG:
+            print >> sys.stderr, "bc: Got BUDDYCAST message from",self.get_peer_info(sender_permid),active
+        
+        return True
+
+
+    def createPreferenceDictionaryList(self, buddycast_data):
+        """as of OL 8, preferences are no longer lists of infohashes, but lists of lists containing 
+           infohashes and associated metadata. this method checks which overlay version has been used
+           and replaces either format by a list of dictionaries, such that the rest of the code can remain
+           version-agnostic and additional information like torrent ids can be stored along the way"""
+
+        prefs = buddycast_data.get('preferences',[])
+        # assume at least one entry below here        
+        if len(prefs) == 0:
+            return []
+        d = []
+
+        try:
+
+            if not type(prefs[0])==list:
+                # pre-OLPROTO_VER_EIGHTH
+                # create dictionary from list of info hashes, extended fields simply aren't set
+
+                d =  [dict({'infohash': pref}) for pref in prefs]
+
+                # we shouldn't receive these lists if the peer says he's OL 8.
+                # let's accept it but complain
+                if buddycast_data['oversion'] >= OLPROTO_VER_EIGHTH:
+                    if DEBUG:
+                        print >> sys.stderr, 'buddycast: received OLPROTO_VER_EIGHTH buddycast data containing old style preferences. only ok if talking to an earlier non-release version'
+                return d
+
+            # if the single prefs entries are lists, we have a more modern wire format
+            # currently, there is only one possibility
+            if buddycast_data['oversion'] >= OLPROTO_VER_ELEVENTH:
+                # Rahim: This part extracts swarm size info from the BC message 
+                # and then returns it in the result list.
+                # create dictionary from list of lists
+                d = [dict({'infohash': pref[0],
+                           'search_terms': pref[1],
+                           'position': pref[2],
+                           'reranking_strategy': pref[3],
+                           'num_seeders':pref[4],
+                           'num_leechers':pref[5],
+                           'calc_age':pref[6],
+                           'num_sources_seen':pref[7]}) 
+                     for pref in prefs]
+                
+            elif buddycast_data['oversion'] >= OLPROTO_VER_EIGHTH:
+                # create dictionary from list of lists
+                d = [dict({'infohash': pref[0],
+                           'search_terms': pref[1],
+                           'position': pref[2],
+                           'reranking_strategy': pref[3]}) 
+                     for pref in prefs]
+            else:
+                raise RuntimeError, 'buddycast: unknown preference protocol, pref entries are lists but oversion= %s:\n%s' % (buddycast_data['oversion'], prefs)
+
+            return d
+                
+        except Exception, msg:
+            print_exc()
+            raise Exception, msg
+            return d
+            
+    def getPreferenceHashes(self, buddycast_data):
+        """convenience function returning the infohashes from the preferences. 
+           returns a list of infohashes, i.e. replaces old calls to buddycast_data.get('preferences')"""
+        return [preference.get('infohash',"") for preference in buddycast_data.get('preferences', [])] 
+    
+    def handleBuddyCastMessage(self, sender_permid, buddycast_data, selversion):
+        """ Handle received buddycast message 
+            Add peers, torrents and preferences into database and update last seen
+            Add fresh peers to candidate list
+            All database updates caused by buddycast msg should be handled here 
+        """
+        
+        _now = now()
+        
+        cache_db_data = {'peer':{},'infohash':Set(),'pref':[], 'coll':[]}  # peer, updates / pref, pairs, Rahim: coll for colleected torrents
+        cache_peer_data = {}
+        
+        tbs = buddycast_data.pop('taste buddies')
+        rps = buddycast_data.pop('random peers')
+        buddycast_data['oversion'] = selversion
+
+        # print >> sys.stderr, "bc: \n" * 10 
+        # print >> sys.stderr, "bc: received", len(tbs), "and", len(rps), "tastebudies and randompeers, respectively"
+        # for peer in tbs:
+        #     print >> sys.stderr, "bc: tastebuddy", peer
+        # for peer in rps:
+        #     print >> sys.stderr, "bc: randompeer", peer
+        
+        max_tb_sim = 1
+
+        # include sender itself
+        bc_data = [buddycast_data] + tbs + rps 
+        for peer in bc_data:
+            
+            #print >>sys.stderr,"bc: Learned about peer",peer['ip']
+            
+            peer_permid = peer['permid']
+            if peer_permid == self.permid:
+                continue 
+            age = max(peer.get('age', 0), 0)    # From secure overlay version 3, it doesn't include 'age'
+            last_seen = _now - age
+            old_last_seen = self.data_handler.getPeerLastSeen(peer_permid)
+            last_seen = min(max(old_last_seen, last_seen), _now)
+            oversion = peer.get('oversion', 0)
+            nfiles = peer.get('nfiles', 0)
+            self.addRemoteSearchPeer(peer_permid, oversion, nfiles, last_seen)
+            
+            cache_peer_data[peer_permid] = {}
+            cache_peer_data[peer_permid]['last_seen'] = last_seen
+            #self.data_handler._addPeerToCache(peer_permid, last_seen)
+            #if selversion >= OLPROTO_VER_FOURTH:
+            sim = peer.get('similarity', 0)
+            max_tb_sim = max(max_tb_sim, sim)
+            if sim > 0:
+                cache_peer_data[peer_permid]['sim'] = sim
+                #self.data_handler.addRelativeSim(sender_permid, peer_permid, sim, max_tb_sim)
+            
+            if peer_permid != sender_permid:
+                self.addConnCandidate(peer_permid, last_seen)
+
+            new_peer_data = {}
+            #new_peer_data['permid'] = peer['permid']
+            new_peer_data['ip'] = hostname_or_ip2ip(peer['ip']) 
+            new_peer_data['port'] = peer['port']
+            new_peer_data['last_seen'] = last_seen
+            if peer.has_key('name'):
+                new_peer_data['name'] = dunno2unicode(peer['name']) # store in db as unicode
+            cache_db_data['peer'][peer_permid] = new_peer_data
+            #self.data_handler.addPeer(peer_permid, last_seen, new_peer_data, commit=True)    # new peer
+
+        self.limitConnCandidate()
+        if len(self.connection_candidates) > self.bootstrap_num:
+            self.bootstrapped = True
+        
+        # database stuff
+        if selversion >= OLPROTO_VER_SIXTH:
+            stats = {'num_peers':buddycast_data['npeers'],'num_torrents':buddycast_data['nfiles'],'num_prefs':buddycast_data['ndls']}
+            cache_db_data['peer'][sender_permid].update(stats)
+               
+        cache_db_data['peer'][sender_permid]['last_buddycast'] = _now
+        
+        prefs = self.createPreferenceDictionaryList(buddycast_data)
+        
+        #Rahim: Since overlay version 11 , the collected torrents contain 
+        # swarm size info. The code below handles it and changes list of list 
+        # to a list of dictionary, same as preference.
+        #
+        if selversion >= OLPROTO_VER_ELEVENTH: 
+            collecteds = self.createCollectedDictionaryList(buddycast_data, selversion)
+            buddycast_data['collected torrents'] = collecteds
+            infohashes = Set(self.getCollectedHashes(buddycast_data, selversion))
+        else: 
+            infohashes = Set(buddycast_data.get('collected torrents', []))           
+        
+        # Nicolas: store this back into buddycast_data because it's used later on gotBuddyCastMessage again
+        buddycast_data['preferences'] = prefs  
+        prefhashes = Set(self.getPreferenceHashes(buddycast_data))  # only accept sender's preference, to avoid pollution
+        infohashes = infohashes.union(prefhashes)
+                
+        cache_db_data['infohash'] = infohashes
+        if prefs:
+            cache_db_data['pref'] = prefs 
+        
+
+        if selversion >= OLPROTO_VER_ELEVENTH:
+            if collecteds:
+                cache_db_data['coll'] = collecteds
+
+        
+        self.data_handler.handleBCData(cache_db_data, cache_peer_data, sender_permid, max_tb_sim, selversion, _now)
+    
+    def getCollectedHashes(self, buddycast_data, selversion):
+        """
+        @author: Rahim
+        @param buddycast_data: A dictionary structure that contains received buddycast message.
+        @param selversion: The selected overlay version between peers.
+        @return: The infohash of the collected torrents is returned as a list.
+        """  
+        return [collected.get('infohash',"") for collected in buddycast_data.get('collected torrents', [])] 
+        
+        
+    def createCollectedDictionaryList(self, buddycast_data, selversion):
+        """
+        Processes the list of the collected torrents and then returns back a list of dictionaries.
+        @author: Rahim
+        @param buddycast_data: Received BC message.
+        @param selversion: Version of the agreed OL protocol.
+        @return: List of dictionaries. Each item in the dictionary is like :
+        """
+        collecteds = buddycast_data.get('collected torrents',[])
+              
+        if len(collecteds) == 0:
+            return []
+        d = []
+
+        try:
+           d = [dict({'infohash': coll[0],
+                      'num_seeders': coll[1],
+                      'num_leechers': coll[2],
+                      'calc_age': coll[3],
+                      'num_sources_seen':coll[4]}) 
+                     for coll in collecteds]
+                                 
+           return d
+        except Exception, msg:
+            print_exc()
+            raise Exception, msg
+            return d
+        
+    def removeFromConnList(self, peer_permid):
+        removed = 0
+        if peer_permid in self.connected_connectable_peers:     # Ct
+            self.connected_connectable_peers.pop(peer_permid)
+            try:
+                self.connected_taste_buddies.remove(peer_permid)
+            except ValueError:
+                pass
+            try:
+                self.connected_random_peers.remove(peer_permid)
+            except ValueError:
+                pass
+            removed = 1
+        if peer_permid in self.connected_unconnectable_peers:    # Cu
+            self.connected_unconnectable_peers.pop(peer_permid)
+            removed = 2
+        return removed
+        
+    def addPeerToConnList(self, peer_permid, connectable=0):
+        """ Add the peer to Ct, Cr or Cu """
+        
+        # remove the existing peer from lists so that its status can be updated later
+        self.removeFromConnList(peer_permid)    
+        
+        if not self.isConnected(peer_permid):
+            #print >> sys.stderr, "bc: cannot add a unconnected peer to conn list", "Round", self.round
+            return
+        
+        _now = now()
+        
+        if connectable == 1:
+            self.addPeerToConnCP(peer_permid, _now)
+            addto = '(reachable peer)'
+        else:
+            self.addPeerToConnUP(peer_permid, _now)
+            addto = '(peer deemed unreachable)'
+            
+        return addto
+           
+    def updateTBandRPList(self):
+        """ Select the top 10 most similar (sim>0) peer to TB and others to RP """
+        
+        """ In early September 2009, it has been decided that, out of 10 taste buddies, 3 peers are selected which has an overlay
+            same or better of the current version; another 3 peers are selected each of which has an overlay better than 8. Rest 
+            of the slots are filled with highest similarity (just as before). The process of the selection of random peers is not changed!"""
+            
+        nconnpeers = len(self.connected_connectable_peers)
+        if nconnpeers == 0:
+            self.connected_taste_buddies = []
+            self.connected_random_peers = [] 
+            return
+        
+        # we need at least 3 peers of the same or better versions, among taste buddies
+        better_version_peers = 0 
+        
+        # we also need at least 4 peers of the recent versions (here, OL>=8), among taste buddies
+        recent_version_peers = 0 
+
+        tmplist = []
+        tmpverlist = []
+        tmplist2 = []
+        tbs = []
+        rps = []
+        for permid in self.connected_connectable_peers:
+            sim = self.data_handler.getPeerSim(permid)            
+            version = self.connected_connectable_peers[permid]['oversion']
+            if sim > 0:
+                tmplist.append([version,sim,permid])
+            else:
+                rps.append(permid)
+        
+        #ntb = self.max_conn_tb    # 10 tb & 10 rp
+        ntb = min((nconnpeers+1)/2, self.max_conn_tb)    # half tb and half rp
+        
+        """ tmplist now contains all peers which have sim > 0, 
+            because of new similarity function we add X peers until ntb is reached
+        """
+        if len(tmplist) < ntb:
+            cold_start_peers = P2PSimColdStart(self.connected_connectable_peers, tmplist, ntb - len(tmplist))
+            tmplist.extend(cold_start_peers)
+            
+            #remove cold_start_peers from rps
+            for version, sim, permid in cold_start_peers: 
+                if permid in rps:
+                    rps.remove(permid) 
+        
+        """ sort tmplist, emphasis is on overlay version, then on similarity.
+            thus we try to select top-(self.max_conn_tb) with the highest overlay/similarity
+        """
+        tmplist.sort()
+        tmplist.reverse() 
+        
+        if len(tmplist) > 0:
+            for version,sim,permid in tmplist:
+                if version >= OLPROTO_VER_CURRENT and better_version_peers<=3: #OLPROTO_VER_ELEVENTH
+                    better_version_peers += 1
+                    tmpverlist.append(permid)
+                elif version >= OLPROTO_VER_EIGHTH and recent_version_peers<=3:
+                    recent_version_peers += 1
+                    tmpverlist.append(permid)
+                else:
+                    tmplist2.append([sim,permid])
+            tmplist2.sort()
+            tmplist2.reverse()
+            tbs = tmpverlist
+            for sim, permid in tmplist2[:ntb-better_version_peers-recent_version_peers]:
+                tbs.append(permid)          
+        
+        ntb = len(tbs)
+        if len(tmplist) > ntb:
+            rps = [permid for sim,permid in tmplist2[ntb-better_version_peers-recent_version_peers:]] + rps
+        
+        tmplist = []
+        # remove the oldest peer from both random peer list and connected_connectable_peers
+        if len(rps) > self.max_conn_rp:
+            # then select recently seen peers 
+            tmplist = []
+            for permid in rps:
+                connect_time = self.connected_connectable_peers[permid]['connect_time']
+                tmplist.append([connect_time, permid])
+            tmplist.sort()
+            tmplist.reverse()
+            rps = []
+            for last_seen,permid in tmplist[:self.max_conn_rp]:
+                rps.append(permid)
+            for last_seen,permid in tmplist[self.max_conn_rp:]:
+                self.connected_connectable_peers.pop(permid)
+
+        self.connected_taste_buddies = tbs
+        self.connected_random_peers = rps
+        #print >> sys.stderr, "#tbs:",len(tbs), ";#rps:", len(rps)
+        #for p in self.connected_taste_buddies:
+        #    assert p in self.connected_connectable_peers
+        #for p in self.connected_random_peers:
+        #    assert p in self.connected_connectable_peers
+        #assert len(self.connected_taste_buddies) + len(self.connected_random_peers) <= len(self.connected_connectable_peers)
+        
+            
+    def addPeerToConnCP(self, peer_permid, conn_time):
+        keys = ('ip', 'port', 'oversion', 'num_torrents')
+        res = self.data_handler.getPeer(peer_permid, keys)
+        peer = dict(zip(keys,res))
+        peer['connect_time'] = conn_time
+        self.connected_connectable_peers[peer_permid] = peer
+        self.updateTBandRPList()
+        
+    def addNewPeerToConnList(self, conn_list, max_num, peer_permid, conn_time):
+        """ Add a peer to a connection list, and pop the oldest peer out """
+        
+        if max_num <= 0 or len(conn_list) < max_num:
+            conn_list[peer_permid] = conn_time
+            return None
+        
+        else:
+            oldest_peer = (conn_time+1, None)
+            initial = 'abcdefghijklmnopqrstuvwxyz'
+            separator = ':-)'
+            for p in conn_list:
+                _conn_time = conn_list[p]
+                r = randint(0, self.max_conn_tb)
+                name = initial[r] + separator + p 
+                to_cmp = (_conn_time, name)
+                oldest_peer = min(oldest_peer, to_cmp)
+                
+            if conn_time >= oldest_peer[0]:     # add it
+                out_peer = oldest_peer[1].split(separator)[1]
+                conn_list.pop(out_peer)            
+                conn_list[peer_permid] = conn_time
+                return out_peer
+            return peer_permid
+
+    def addPeerToConnUP(self, peer_permid, conn_time):
+        ups = self.connected_unconnectable_peers
+        if peer_permid not in ups:
+            out_peer = self.addNewPeerToConnList(ups, 
+                                      self.max_conn_up, peer_permid, conn_time)
+            if out_peer != peer_permid:
+                return True
+        return False
+            
+    # -------------- reply buddycast, passive thread -------------- #
+    def replyBuddyCast(self, target_permid, selversion):
+        """ Reply a buddycast message """
+        
+        #print >> sys.stderr, '*************** replay buddycast message', show_permid_short(target_permid), self.isConnected(target_permid)
+        
+        if not self.isConnected(target_permid):
+            #print >> sys.stderr, 'buddycast: lost connection while replying buddycast', \
+            #    "Round", self.round
+            return
+        
+        self.createAndSendBuddyCastMessage(target_permid, selversion, active=False)
+        
+        self.print_debug_info('Passive', 8, target_permid)
+        self.print_debug_info('Passive', 9, target_permid)
+
+        self.next_initiate += 1        # Be idel in next round
+        self.print_debug_info('Passive', 10)
+        
+        
+    # -------------- handle overlay connections from SecureOverlay ---------- #
+    def handleConnection(self,exc,permid,selversion,locally_initiated):
+        if exc is None and permid != self.permid:    # add a connection
+            self.addConnection(permid, selversion, locally_initiated)
+        else:
+            self.closeConnection(permid, 'overlayswarm:'+str(exc))
+
+        if debug:
+            print >> sys.stderr, "bc: handle conn from overlay", exc, \
+                self.get_peer_info(permid), "selversion:", selversion, \
+                "local_init:", locally_initiated, ctime(now()), "; #connections:", len(self.connected_connectable_peers), \
+                "; #TB:", len(self.connected_taste_buddies), "; #RP:", len(self.connected_random_peers)
+        
+    def addConnection(self, peer_permid, selversion, locally_initiated):
+        # add connection to connection list
+        _now = now()
+        if DEBUG:
+            print >> sys.stderr, "bc: addConnection", self.isConnected(peer_permid)
+        if not self.isConnected(peer_permid):
+            # SecureOverlay has already added the peer to db
+            self.connections[peer_permid] = selversion # add a new connection
+            addto = self.addPeerToConnList(peer_permid, locally_initiated)
+            
+            dns = self.get_peer_info(peer_permid, include_permid=False)
+            buf = '%s %s'%(dns, addto)
+            self.launchmany.set_activity(NTFY_ACT_MEET, buf)    # notify user interface
+
+            if self.torrent_collecting and not self.superpeer:
+                try:
+                    # Arno, 2009-10-09: Torrent Collecting errors should not kill conn.
+                    self.torrent_collecting.trigger(peer_permid, selversion)
+                except:
+                    print_exc()
+
+            if debug:
+                print >> sys.stderr, "bc: add connection", \
+                    self.get_peer_info(peer_permid), "to", addto
+            if self.log:
+                dns = self.dnsindb(peer_permid)
+                if dns:
+                    ip,port = dns
+                    self.overlay_log('CONN_ADD', ip, port, show_permid(peer_permid), selversion)
+
+    def closeConnection(self, peer_permid, reason):
+        """ Close connection with a peer, and remove it from connection lists """
+        
+        if debug:
+            print >> sys.stderr, "bc: close connection:", self.get_peer_info(peer_permid)
+        
+        if self.isConnected(peer_permid):
+            self.connections.pop(peer_permid)
+        removed = self.removeFromConnList(peer_permid)
+        if removed == 1:
+            self.updateTBandRPList()
+        
+        if self.log:
+            dns = self.dnsindb(peer_permid)
+            if dns:
+                ip,port = dns
+                self.overlay_log('CONN_DEL', ip, port, show_permid(peer_permid), reason)
+
+    # -------------- print debug info ---------- #
+    def get_stats(self):
+        nPeer = len(self.data_handler.peers)
+        nPref = nPeer #len(self.data_handler.preferences)
+        nCc = len(self.connection_candidates)
+        nBs = len(self.send_block_list)
+        nBr = len(self.recv_block_list)
+        nSO = -1 # TEMP ARNO len(self.overlay_bridge.debug_get_live_connections())
+        nCo = len(self.connections)
+        nCt = len(self.connected_taste_buddies)
+        nCr = len(self.connected_random_peers)
+        nCu = len(self.connected_unconnectable_peers)
+        return nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu
+    
+    def print_debug_info(self, thread, step, target_permid=None, selversion=0, r=0, addto=''):
+        if not debug:
+            return
+        if DEBUG:
+            print >>sys.stderr,"bc: *****", thread, str(step), "-",
+        if thread == 'Active':
+            if step == 2:
+                print >> sys.stderr, "Working:", now() - self.start_time, \
+                    "seconds since start. Round", self.round, "Time:", ctime(now())
+                nPeer, nPref, nCc, nBs, nBr, nSO, nCo, nCt, nCr, nCu = self.get_stats()
+                print >> sys.stderr, "bc: *** Status: nPeer nPref nCc: %d %d %d  nBs nBr: %d %d  nSO nCo nCt nCr nCu: %d %d %d %d %d" % \
+                                      (nPeer,nPref,nCc,           nBs,nBr,        nSO,nCo, nCt,nCr,nCu)
+                if nSO != nCo:
+                    print >> sys.stderr, "bc: warning - nSo and nCo is inconsistent"
+                if nCc > self.max_conn_cand or nCt > self.max_conn_tb or nCr > self.max_conn_rp or nCu > self.max_conn_up:
+                    print >> sys.stderr, "bc: warning - nCC or nCt or nCr or nCu overloads"
+                _now = now()
+                buf = ""
+                i = 1
+                for p in self.connected_taste_buddies:
+                    buf += "bc: %d taste buddies: "%i + self.get_peer_info(p) + str(_now-self.connected_connectable_peers[p]['connect_time']) + " version: " + str(self.connections[p]) + "\n"
+                    i += 1
+                print >> sys.stderr, buf
+                
+                buf = ""
+                i = 1
+                for p in self.connected_random_peers:
+                    buf += "bc: %d random peers: "%i + self.get_peer_info(p) + str(_now-self.connected_connectable_peers[p]['connect_time']) + " version: " + str(self.connections[p]) + "\n"
+                    i += 1
+                print >> sys.stderr, buf
+                
+                buf = ""
+                i = 1
+                for p in self.connected_unconnectable_peers:
+                    buf += "bc: %d unconnectable peers: "%i + self.get_peer_info(p) + str(_now-self.connected_unconnectable_peers[p]) + " version: " + str(self.connections[p]) + "\n"
+                    i += 1
+                print >> sys.stderr, buf
+                buf = ""
+                totalsim = 0
+                nsimpeers = 0
+                minsim = 1e10
+                maxsim = 0
+                sims = []
+                for p in self.data_handler.peers:
+                    sim = self.data_handler.peers[p][PEER_SIM_POS]
+                    if sim > 0:
+                        sims.append(sim)
+                if sims:
+                    minsim = min(sims)
+                    maxsim = max(sims)
+                    nsimpeers = len(sims)
+                    totalsim = sum(sims)
+                    if nsimpeers > 0:
+                        meansim = totalsim/nsimpeers
+                    else:
+                        meansim = 0
+                    print >> sys.stderr, "bc: * sim peer: %d %.3f %.3f %.3f %.3f\n" % (nsimpeers, totalsim, meansim, minsim, maxsim)
+
+            elif step == 3:
+                print >> sys.stderr, "check blocked peers: Round", self.round
+                
+            elif step == 4:
+                print >> sys.stderr, "keep connections with peers: Round", self.round
+                
+            elif step == 6:
+                print >> sys.stderr, "idle loop:", self.next_initiate
+                
+            elif step == 9: 
+                print >> sys.stderr, "bootstrapping: select", self.bootstrap_num, \
+                    "peers recently seen from Mega Cache"
+                if self.booted < 0:
+                    print >> sys.stderr, "bc: *** bootstrapped recently, so wait for a while"
+                elif self.booted == 0:
+                    print >> sys.stderr, "bc: *** no peers to bootstrap. Try next time"
+                else:
+                    print >> sys.stderr, "bc: *** bootstrapped, got", len(self.connection_candidates), \
+                      "peers in Cc. Times of bootstrapped", self.total_bootstrapped_time
+                    buf = ""
+                    for p in self.connection_candidates:
+                        buf += "bc: * cand:" + `p` + "\n"
+                    buf += "\nbc: Remote Search Peer Candidates:\n"
+                    for p in self.remote_search_peer_candidates:
+                        buf += "bc: * remote: %d "%p[0] + self.get_peer_info(p[1]) + "\n"
+                    print >> sys.stderr, buf
+            
+            elif step == 11:
+                buf = "select "
+                if r == 0:
+                    buf += "a most similar taste buddy"
+                else:
+                    buf += "a most likely online random peer"
+                buf += " from Cc for buddycast out\n"
+                
+                if target_permid:
+                    buf += "bc: *** got target %s sim: %s last_seen: %s" % \
+                    (self.get_peer_info(target_permid),
+                     self.data_handler.getPeerSim(target_permid),
+                     ctime(self.data_handler.getPeerLastSeen(target_permid)))
+                else:
+                    buf += "bc: *** no target to select. Skip this round"
+                print >> sys.stderr, buf
+
+            elif step == 12:
+                print >> sys.stderr, "connect a peer to start buddycast", self.get_peer_info(target_permid)
+                
+            elif step == 13:
+                print >> sys.stderr, "block connected peer in send block list", \
+                    self.get_peer_info(target_permid)#, self.send_block_list[target_permid]
+                    
+            elif step == 14:
+                print >> sys.stderr, "remove connected peer from Cc", \
+                    self.get_peer_info(target_permid)#, "removed?", target_permid not in self.connection_candidates
+
+            elif step == 15:
+                print >> sys.stderr, "peer is connected", \
+                    self.get_peer_info(target_permid), "overlay version", selversion, currentThread().getName()
+                
+            elif step == 16:
+                print >> sys.stderr, "create buddycast to send to", self.get_peer_info(target_permid)
+                
+            elif step == 17:
+                print >> sys.stderr, "send buddycast msg to", self.get_peer_info(target_permid)
+                
+            elif step == 18:
+                print >> sys.stderr, "receive buddycast message from peer %s" % self.get_peer_info(target_permid)
+                
+            elif step == 19:
+                print >> sys.stderr, "store peers from incoming msg to cache and db"
+                
+            elif step == 20:
+                print >> sys.stderr, "add connected peer %s to connection list %s" % (self.get_peer_info(target_permid), addto)
+                
+            elif step == 21:
+                print >> sys.stderr, "block connected peer in recv block list", \
+                    self.get_peer_info(target_permid), self.recv_block_list[target_permid]
+                
+        if thread == 'Passive': 
+            if step == 2:
+                print >> sys.stderr,  "receive buddycast message from peer %s" % self.get_peer_info(target_permid)
+                
+            elif step == 3:
+                print >> sys.stderr, "store peers from incoming msg to cache and db"
+                
+            elif step == 4:
+                print >> sys.stderr, "add connected peer %s to connection list %s" % (self.get_peer_info(target_permid), addto)
+        
+            elif step == 5:
+                print >> sys.stderr, "block connected peer in recv block list", \
+                    self.get_peer_info(target_permid), self.recv_block_list[target_permid]
+            
+            elif step == 6:
+                print >> sys.stderr, "create buddycast to reply to", self.get_peer_info(target_permid)
+            
+            elif step == 7:
+                print >> sys.stderr, "reply buddycast msg to", self.get_peer_info(target_permid)
+                
+            elif step == 8:
+                print >> sys.stderr, "block connected peer in send block list", \
+                    self.get_peer_info(target_permid), self.send_block_list[target_permid]
+        
+            elif step == 9:
+                print >> sys.stderr, "remove connected peer from Cc", \
+                    self.get_peer_info(target_permid)#, "removed?", target_permid not in self.connection_candidates
+
+            elif step == 10:
+                print >> sys.stderr, "add idle loops", self.next_initiate
+        sys.stdout.flush()
+        sys.stderr.flush()
+        if DEBUG:
+            print >> sys.stderr, "bc: *****", thread, str(step), "-",
+
+    def getAllTasteBuddies(self):
+        return self.connected_taste_buddies
+        
+    def addRemoteSearchPeer(self, permid, oversion, ntorrents, last_seen):
+        if oversion >= OLPROTO_VER_SIXTH and ntorrents >= REMOTE_SEARCH_PEER_NTORRENTS_THRESHOLD:
+            insort(self.remote_search_peer_candidates, [last_seen,permid,oversion])
+            if len(self.remote_search_peer_candidates) > self.num_search_cand:
+                self.remote_search_peer_candidates.pop(0)
+                
+    def getRemoteSearchPeers(self, npeers,minoversion=None):
+        """ Return some peers that are remote-search capable """
+        if len(self.remote_search_peer_candidates) > npeers:
+            _peers = sample(self.remote_search_peer_candidates, npeers)    # randomly select
+        else:
+            _peers = self.remote_search_peer_candidates
+        peers = []
+        for p in _peers:
+            (last_seen,permid,selversion) = p
+            if minoversion is None or selversion >= minoversion:
+                peers.append(permid)
+
+        # Also add local peers (they should be cheap)
+        # TODO: How many peers?  Should these be part of the npeers?
+        local_peers = self.data_handler.getLocalPeerList(max_peers=5,minoversion=minoversion)
+        if DEBUG:
+            print >> sys.stderr, "bc: getRemoteSearchPeers: Selected %d local peers" % len(local_peers)
+        
+        return local_peers + peers
+        
+        
+class DataHandler:
+    def __init__(self, launchmany, overlay_bridge, max_num_peers=2500):
+        self.launchmany = launchmany
+        self.overlay_bridge = overlay_bridge
+        self.config = self.launchmany.session.sessconfig # should be safe at startup
+        # --- database handlers ---
+        self.peer_db = launchmany.peer_db
+        self.superpeer_db = launchmany.superpeer_db
+        self.torrent_db = launchmany.torrent_db
+        self.mypref_db = launchmany.mypref_db
+        self.pref_db = launchmany.pref_db
+        self.simi_db = launchmany.simi_db
+        # self.term_db = launchmany.term_db
+        self.friend_db = launchmany.friend_db
+        self.pops_db = launchmany.pops_db
+        self.myfriends = Set() # FIXME: implement friends
+        self.myprefs = []    # torrent ids
+        self.peers = {}    # peer_id: [similarity, last_seen, prefs(array('l',[torrent_id])] 
+        self.default_peer = [0, 0, None]
+        self.permid = self.getMyPermid()
+        self.ntorrents = 0
+        self.last_check_ntorrents = 0
+        #self.total_pref_changed = 0
+        # how many peers to load into cache from db
+        #self.max_peer_in_db = max_num_peers
+        self.max_num_peers = min(max(max_num_peers, 100), 2500)    # at least 100, at most 2500
+        #self.time_sim_weight = 4*60*60  # every 4 hours equals to a point of similarity
+        # after added some many (user, item) pairs, update sim of item to item
+        #self.update_i2i_threshold = 100
+        #self.npeers = self.peer_db.size() - self.superpeer_db.size()
+        self.old_peer_num = 0
+        self.buddycast_core = None
+        self.all_peer_list = None
+        self.num_peers_ui = None
+        self.num_torrents_ui = None
+        self.cached_updates = {'peer':{},'torrent':{}}
+
+        # Subscribe BC to updates to MyPreferences, such that we can add/remove
+        # them from our download history that we send to other peers.
+        self.launchmany.session.add_observer(self.sesscb_ntfy_myprefs,NTFY_MYPREFERENCES,[NTFY_INSERT,NTFY_DELETE])
+            
+    def commit(self):
+        self.peer_db.commit()
+
+    def register_buddycast_core(self, buddycast_core):
+        self.buddycast_core = buddycast_core
+    
+    def getMyName(self, name=''):
+        return self.config.get('nickname', name)
+
+    def getMyIp(self, ip=''):
+        return self.launchmany.get_ext_ip()
+    
+    def getMyPort(self, port=0):
+        return self.launchmany.listen_port
+    
+    def getMyPermid(self, permid=''):
+        return self.launchmany.session.get_permid()
+  
+    def getPeerID(self, permid):
+        if isinstance(permid, int) and permid > 0:
+            return permid
+        else:
+            return self.peer_db.getPeerID(permid)
+    
+    def getTorrentID(self, infohash):
+        if isinstance(infohash, int) and infohash > 0:
+            return infohash
+        else:
+            return self.torrent_db.getTorrentID(infohash)
+    
+    def getPeerPermid(self, peer_id):
+        return self.peer_db.getPermid(peer_id)
+
+    def getLocalPeerList(self, max_peers,minoversion=None):
+        return self.peer_db.getLocalPeerList(max_peers,minoversion=minoversion)
+  
+    def postInit(self, delay=4, batch=50, update_interval=10, npeers=None, updatesim=True):
+        # build up a cache layer between app and db
+        if npeers is None:
+            npeers = self.max_num_peers
+        self.updateMyPreferences()
+        self.loadAllPeers(npeers)
+        if updatesim:
+            self.updateAllSim(delay, batch, update_interval)
+
+    def updateMyPreferences(self, num_pref=None):
+        # get most recent preferences, and sort by torrent id
+        res = self.mypref_db.getAll('torrent_id', order_by='creation_time desc', limit=num_pref)
+        self.myprefs = [p[0] for p in res]
+                
+    def loadAllPeers(self, num_peers=None):
+        """ Read peers from db and put them in self.peers.
+            At most num_peers (=self.max_num_peers) recently seen peers can be cached.
+            
+        """
+        peer_values = self.peer_db.getAll(['peer_id','similarity','last_seen'], order_by='last_connected desc', limit=num_peers)
+        self.peers = dict(zip([p[0] for p in peer_values], [[p[1],p[2],array('l', [])] for p in peer_values])) 
+
+        """ Not needed due to new similarity function
+        user_item_pairs = self.pref_db.getRecentPeersPrefs('last_connected',num_peers)
+        for pid,tid in user_item_pairs:
+            self.peers[pid][PEER_PREF_POS].append(tid)
+        """
+        #print >> sys.stderr, '**************** loadAllPeers', len(self.peers)
+
+#        for pid in self.peers:
+#            self.peers[pid][PEER_PREF_POS].sort()    # keep in order
+
+    def updateAllSim(self, delay=4, batch=50, update_interval=10):
+        self._updateAllPeerSim(delay, batch, update_interval)    # 0.156 second
+        
+        #Disabled Torrent Relevancy since 5.0
+        #self._updateAllItemRel(delay, batch, update_interval)    # 0.875 second
+        # Tuning batch (without index relevance)
+        
+        # batch = 25:                             0.00 0.22 0.58
+        # batch = 50: min/avg/max execution time: 0.09 0.29 0.63 second 
+        # batch = 100:                            0.16 0.47 0.95
+        # update_interval=10
+        # 50000 updates take: 50000 / 50 * (10+0.3) / 3600 = 3 hours
+        # cpu load: 0.3/10 = 3%
+        
+        # With index relevance:
+        # batch = 50: min/avg/max execution time: 0.08 0.62 1.39 second
+        # batch = 25:                             0.00 0.41 1.67
+        # update_interval=5, batch=25
+        # 50000 updates take: 50000 / 25 * (5+0.4) / 3600 = 3 hours
+        # cpu load: 0.4/5 = 8%
+        
+    def cacheSimUpdates(self, update_table, updates, delay, batch, update_interval):
+        self.cached_updates[update_table].update(updates)
+        self.overlay_bridge.add_task(lambda:self.checkSimUpdates(batch, update_interval), delay, 'checkSimUpdates')
+        
+    def checkSimUpdates(self, batch, update_interval):
+        last_update = 0
+        if self.cached_updates['peer']:
+            updates = []
+            update_peers = self.cached_updates['peer']
+            keys = update_peers.keys()
+            shuffle(keys)   # to avoid always update the same items when cacheSimUpdates is called frequently
+            for key in keys[:batch]:
+                updates.append((update_peers.pop(key), key))
+            self.overlay_bridge.add_task(lambda:self.peer_db.updatePeerSims(updates), last_update + update_interval, 'updatePeerSims')
+            last_update += update_interval 
+            
+        if self.cached_updates['torrent']:
+            updates = []
+            update_peers = self.cached_updates['torrent'] 
+            keys = update_peers.keys()
+            shuffle(keys)   
+            for key in keys[:batch]:
+                updates.append((update_peers.pop(key), key))
+            self.overlay_bridge.add_task(lambda:self.torrent_db.updateTorrentRelevances(updates), last_update + update_interval, 'updateTorrentRelevances')
+            last_update += update_interval
+            
+        if self.cached_updates['peer'] or self.cached_updates['torrent']:
+            self.overlay_bridge.add_task(lambda:self.checkSimUpdates(batch, update_interval), last_update+0.001, 'checkSimUpdates')
+        
+    def _updateAllPeerSim(self, delay, batch, update_interval):
+        # update similarity to all peers to keep consistent
+        #if self.old_peer_num == len(self.peers):    # if no new peers, don't update
+        #    return
+        
+        #call full_update
+        updates = {}
+        if len(self.myprefs) > 0:
+           not_peer_id = self.getPeerID(self.permid)
+           similarities = P2PSim_Full(self.simi_db.getPeersWithOverlap(not_peer_id, self.myprefs), len(self.myprefs))
+            
+           for peer_id in self.peers:
+               if peer_id in similarities:
+                   oldsim = self.peers[peer_id][PEER_SIM_POS]
+                   sim = similarities[peer_id]
+                   updates[peer_id] = sim
+
+        #print >> sys.stderr, '****************** update peer sim', len(updates), len(self.peers)        
+        if updates:
+            self.cacheSimUpdates('peer', updates, delay, batch, update_interval)
+                        
+    def _updateAllItemRel(self, delay, batch, update_interval):
+        # update all item's relevance
+        # Relevance of I = Sum(Sim(Users who have I)) + Poplarity(I)
+        # warning: this function may take 5 seconds to commit to the database
+        
+        """
+        Disabled, not in use since v5.0
+        
+        if len(self.peers) == 0:
+            return
+        tids = {}
+        nsimpeers = 0
+        for peer_id in self.peers:
+            if self.peers[peer_id][PEER_PREF_POS]:
+                sim = self.peers[peer_id][PEER_SIM_POS]
+                if sim > 0:
+                    nsimpeers += 1
+                    prefs = self.peers[peer_id][PEER_PREF_POS]
+                    for tid in prefs:
+                        if tid not in tids:
+                            tids[tid] = [0,0]
+                        tids[tid][0] += sim
+                        tids[tid][1] += 1
+
+        if len(tids) == 1:
+            return
+        
+        res = self.torrent_db.getTorrentRelevances(tids)
+        if res:
+            old_rels = dict(res)
+        else:
+            old_rels = {}
+        #print >> sys.stderr, '********* update all item rel', len(old_rels), len(tids) #, old_rels[:10]
+        
+        for tid in tids.keys():
+            tids[tid] = tids[tid][0]/tids[tid][1] + tids[tid][1]
+            old_rel = old_rels.get(tid, None)
+            if old_rel != None and abs(old_rel - tids[tid]) <= old_rel*0.05:
+                tids.pop(tid)   # don't update db
+            
+        #print >> sys.stderr, '**************--- update all item rel', len(tids), len(old_rels) #, len(self.peers), nsimpeers, tids.items()[:10]  # 37307 2500
+        if tids:
+            self.cacheSimUpdates('torrent', tids, delay, batch, update_interval)
+        """
+
+    def sesscb_ntfy_myprefs(self,subject,changeType,objectID,*args):
+        """ Called by SessionCallback thread """
+        if DEBUG:
+            print >>sys.stderr,"bc: sesscb_ntfy_myprefs:",subject,changeType,`objectID`
+        if subject == NTFY_MYPREFERENCES:
+            infohash = objectID
+            if changeType == NTFY_INSERT:
+                op_my_pref_lambda = lambda:self.addMyPref(infohash)
+            elif changeType == NTFY_DELETE:
+                op_my_pref_lambda = lambda:self.delMyPref(infohash)
+            # Execute on OverlayThread
+            self.overlay_bridge.add_task(op_my_pref_lambda, 0)
+
+
+    def addMyPref(self, infohash):
+        infohash_str=bin2str(infohash)
+        torrentdata = self.torrent_db.getOne(('secret', 'torrent_id'), infohash=infohash_str)
+        if not torrentdata:
+            return
+        
+        secret = torrentdata[0]
+        torrent_id = torrentdata[1]
+        if secret:
+            if DEBUG:
+                print >> sys.stderr, 'bc: Omitting secret download: %s' % torrentdata.get('info', {}).get('name', 'unknown')
+            return # do not buddycast secret downloads
+        
+        if torrent_id not in self.myprefs:
+            insort(self.myprefs, torrent_id)
+            self.old_peer_num = 0
+            self.updateAllSim() # time-consuming
+            #self.total_pref_changed += self.update_i2i_threshold
+            
+    def delMyPref(self, infohash):
+        torrent_id = self.torrent_db.getTorrentID(infohash)
+        if torrent_id in self.myprefs:
+            self.myprefs.remove(torrent_id)
+            self.old_peer_num = 0
+            self.updateAllSim()
+            #self.total_pref_changed += self.update_i2i_threshold
+
+    def initRemoteSearchPeers(self, num_peers=10):
+        peer_values = self.peer_db.getAll(['permid','oversion','num_torrents','last_seen'], order_by='last_seen desc', limit=num_peers)
+        for p in peer_values:
+            p = list(p)
+            p[0] = str2bin(p[0])
+            self.buddycast_core.addRemoteSearchPeer(*tuple(p))
+        pass
+
+    def getMyLivePreferences(self, selversion, num=0):
+        """ Get a number of my preferences. Get all if num==0 """
+        #Rahim
+        if selversion >= OLPROTO_VER_ELEVENTH:
+            return self.mypref_db.getRecentLivePrefListOL11(num) # return a list of preferences with clicklog and swarm size info.
+        
+        elif selversion>=OLPROTO_VER_EIGHTH:
+            return self.mypref_db.getRecentLivePrefListWithClicklog(num)
+        
+        else:
+            return self.mypref_db.getRecentLivePrefList(num)
+        
+    def getPeerSim(self, peer_permid, read_db=False, raw=False):
+        if read_db:
+            sim = self.peer_db.getPeerSim(peer_permid)
+        else:            
+            peer_id = self.getPeerID(peer_permid)
+            if peer_id is None or peer_id not in self.peers:
+                sim = 0
+            else:
+                sim = self.peers[peer_id][PEER_SIM_POS]
+        if sim is None:
+            sim = 0
+        if not raw:
+            # negative value means it is calculated from other peers, 
+            # not itself. See addRelativeSim()
+            return abs(sim)
+        else:
+            return sim
+        
+    def getPeerLastSeen(self, peer_permid):
+        peer_id = self.getPeerID(peer_permid)
+        return self.getPeerIDLastSeen(peer_id)
+        
+    def getPeerIDLastSeen(self, peer_id):
+        if not peer_id or peer_id not in self.peers:
+            return 0
+        #print >> sys.stderr, '***** getPeerLastSeen', self.peers[pefer_permid], `peer_permid`
+        return self.peers[peer_id][PEER_LASTSEEN_POS]
+    
+    def getPeerPrefList(self, peer_permid):
+        """ Get a number of peer's preference list. Get all if num==0.
+            If live==True, dead torrents won't include
+        """
+        return self.pref_db.getPrefList(peer_permid)
+    
+#    def addPeer(self, peer_permid, last_seen, peer_data=None, commit=True):  
+#        """ add a peer from buddycast message to both cache and db """
+#        
+#        if peer_permid != self.permid:
+#            if peer_data is not None:
+#                self._addPeerToDB(peer_permid, last_seen, peer_data, commit=commit)
+#            self._addPeerToCache(peer_permid, last_seen)    
+
+    def _addPeerToCache(self, peer_permid, last_seen):
+        """ add a peer to cache """
+        # Secure Overlay should have added this peer to database.
+        if peer_permid == self.permid:
+            return
+        peer_id = self.getPeerID(peer_permid)
+        assert peer_id != None, `peer_permid`
+        if peer_id not in self.peers:
+            sim = self.peer_db.getPeerSim(peer_permid)
+            peerprefs = self.pref_db.getPrefList(peer_permid)    # [torrent_id]
+            self.peers[peer_id] = [last_seen, sim, array('l', peerprefs)]    # last_seen, similarity, pref
+        else:
+            self.peers[peer_id][PEER_LASTSEEN_POS] = last_seen
+                    
+    def _addPeerToDB(self, peer_permid, peer_data, commit=True):
+        
+        if peer_permid == self.permid:
+            return
+        new_peer_data = {}
+        try:
+            new_peer_data['permid'] = peer_data['permid']
+            new_peer_data['ip'] = hostname_or_ip2ip(peer_data['ip'])
+            new_peer_data['port'] = peer_data['port']
+            new_peer_data['last_seen'] = peer_data['last_seen']
+            if peer_data.has_key('name'):
+                new_peer_data['name'] = dunno2unicode(peer_data['name']) # store in db as unicode
+
+            self.peer_db.addPeer(peer_permid, new_peer_data, update_dns=True, commit=commit)
+            
+        except KeyError:
+            print_exc()
+            print >> sys.stderr, "bc: _addPeerToDB has KeyError"
+        except socket.gaierror:
+            print >> sys.stderr, "bc: _addPeerToDB cannot find host by name", peer_data['ip']
+        except:
+            print_exc()
+            
+    def addInfohashes(self, infohash_list, commit=True):
+        for infohash in infohash_list:
+            self.torrent_db.addInfohash(infohash, commit=False)    # it the infohash already exists, it will skip it
+        if commit:
+            self.torrent_db.commit()
+                
+    def addPeerPreferences(self, peer_permid, prefs, selversion, recvTime, commit=True):
+        """ add a peer's preferences to both cache and db """
+        
+        if peer_permid == self.permid:
+            return 0
+        
+        cur_prefs = self.getPeerPrefList(peer_permid)
+        if not cur_prefs:
+            cur_prefs = []
+        prefs2add = []
+        #Rahim: It is possible that, a peer receive info about same torrent in
+        # different rounds. New torrents are handled by adding them to prefs2add 
+        # list and adding them. If the peer receive same torrent more than 
+        # once, the current version ignores it. But the swarm size is 
+        # dynamic so the next torrents may have different swarm size info. So 
+        # we should handle them as well.
+        #
+        pops2update = [] # a new list that contains already available torrents.  
+        for pref in prefs:
+            infohash = pref['infohash'] # Nicolas: new dictionary format of OL 8 preferences
+            torrent_id = self.torrent_db.getTorrentID(infohash)
+            if not torrent_id:
+                print >> sys.stderr, "buddycast: DB Warning: infohash", bin2str(infohash), "should have been inserted into db, but was not found"
+                continue
+            pref['torrent_id'] = torrent_id
+            if torrent_id not in cur_prefs:
+                prefs2add.append(pref)
+                cur_prefs.append(torrent_id)
+            elif selversion >= OLPROTO_VER_ELEVENTH:
+                pops2update.append(pref) # already available preference is appended to this list.
+                
+                
+        if len(prefs2add) > 0:
+            self.pref_db.addPreferences(peer_permid, prefs2add, recvTime, is_torrent_id=True, commit=commit) 
+            peer_id = self.getPeerID(peer_permid)
+            self.updateSimilarity(peer_id, commit=commit)
+            
+        if len(pops2update)>0:
+            self.pops_db.addPopularityRecord(peer_permid, pops2update, selversion, recvTime, is_torrent_id=True, commit=commit)
+    
+    def addCollectedTorrentsPopularity(self, peer_permid, colls, selversion, recvTime, commit=True):
+        """
+        This method adds/updats the popularity of the collected torrents that is received 
+        through BuddyCast message.  
+        @param peer_permid: perm_id of the sender of BC message. 
+        @param param: colls: A dictionary that contains a subset of collected torrents by the sender of BC.
+        @param selversion: The overlay protocol version that both sides agreed on. 
+        @param recvTime: receive time of the message. 
+        @param commit: whether or not to do database commit. 
+        @author: Rahim 11-02-2010
+        """
+        if peer_permid == self.permid:
+            return 0
+    
+        if selversion < OLPROTO_VER_ELEVENTH:
+            return 0 
+        
+        pops2update = []
+        
+        for coll in colls:
+            infohash = coll['infohash']
+            torrent_id = self.torrent_db.getTorrentID(infohash)
+            if not torrent_id:
+                print >> sys.stderr, "buddycast: DB Warning: infohash", bin2str(infohash), "should have been inserted into db, but was not found"
+                continue
+            coll['torrent_id'] = torrent_id
+            pops2update.append(coll) 
+            
+        if len(pops2update)>0:
+            self.pops_db.addPopularityRecord(peer_permid, pops2update, selversion, recvTime, is_torrent_id=True, commit=commit)
+    
+            
+    def updateSimilarity(self, peer_id, update_db=True, commit=True):
+        """ update a peer's similarity """
+        
+        if len(self.myprefs) == 0:
+            return
+        
+        sim = P2PSim_Single(self.simi_db.getOverlapWithPeer(peer_id, self.myprefs), len(self.myprefs));
+        self.peers[peer_id][PEER_SIM_POS] = sim
+        if update_db and sim>0:
+            self.peer_db.updatePeerSims([(sim,peer_id)], commit=commit)
+    
+#    def increaseBuddyCastTimes(self, peer_permid, commit=True):
+#        self.peer_db.updateTimes(peer_permid, 'buddycast_times', 1, commit=False)
+#        self.peer_db.updatePeer(peer_permid, commit=commit, last_buddycast=now())
+
+    def getPeer(self, permid, keys=None):
+        return self.peer_db.getPeer(permid, keys)
+
+    def addRelativeSim(self, sender_permid, peer_permid, sim, max_sim):
+        # Given Sim(I, A) and Sim(A, B), predict Sim(I, B)
+        # Sim(I, B) = Sim(I, A)*Sim(A, B)/Max(Sim(A,B)) for all B
+        old_sim = self.getPeerSim(peer_permid, raw=True)
+        if old_sim > 0:    # its similarity has been calculated based on its preferences
+            return
+        old_sim = abs(old_sim)
+        sender_sim = self.getPeerSim(sender_permid)
+        new_sim = sender_sim*sim/max_sim
+        if old_sim == 0:
+            peer_sim = new_sim    
+        else:
+            peer_sim = (new_sim + old_sim)/2
+        peer_sim = -1*peer_sim
+        # using negative value to indicate this sim comes from others
+        peer_id = self.getPeerID(peer_permid)
+        self.peers[peer_id][PEER_SIM_POS] = peer_sim
+        
+    def get_npeers(self):
+        if self.num_peers_ui is None:
+            return len(self.peers)    # changed to this according to Maarten's suggestion
+        else:
+            return self.num_peers_ui
+
+    def get_ntorrents(self):
+        if self.num_torrents_ui is None:
+            _now = now()
+            if _now - self.last_check_ntorrents > 5*60:
+                self.ntorrents = self.torrent_db.getNumberCollectedTorrents()
+                self.last_check_ntorrents = _now
+            return self.ntorrents
+        else:
+            return self.num_torrents_ui
+        
+    def get_nmyprefs(self):
+        return len(self.myprefs)
+    
+#    def updatePeerLevelStats(self,permid,npeers,ntorrents,nprefs,commit=True):
+#        d = {'num_peers':npeers,'num_torrents':ntorrents,'num_prefs':nprefs}
+#        self.peer_db.updatePeer(permid, commit=commit, **d)
+        
+#    def getAllPeerList(self):
+#        return self.all_peer_list
+#    
+#    def removeAllPeerList(self):
+#        self.all_peer_list = None
+#        
+#    def setNumPeersFromUI(self, num):
+#        self.num_peers_ui = num
+#        
+#    def setNumTorrentsFromUI(self, num):    # not thread safe
+#        self.num_torrents_ui = num
+    
+    def handleBCData(self, cache_db_data, cache_peer_data, sender_permid, max_tb_sim, selversion, recvTime):
+        #self.data_handler.addPeer(peer_permid, last_seen, new_peer_data, commit=True)    # new peer
+        #self.data_handler.increaseBuddyCastTimes(sender_permid, commit=True)
+        #self.data_handler.addInfohashes(infohashes, commit=True)
+        
+        #self.data_handler._addPeerToCache(peer_permid, last_seen)
+        #self.data_handler.addRelativeSim(sender_permid, peer_permid, sim, max_tb_sim)
+        
+        #self.data_handler.addPeerPreferences(sender_permid, prefs)
+
+        #print >>sys.stderr,"bc: handleBCData:",`cache_db_data`
+
+
+        ADD_PEER = 1
+        UPDATE_PEER = 2
+        ADD_INFOHASH = 3
+        
+        peer_data = cache_db_data['peer']
+        db_writes = []
+        for permid in peer_data:
+            new_peer = peer_data[permid]
+            old_peer = self.peer_db.getPeer(permid)
+            if not old_peer:
+                if permid == sender_permid:
+                    new_peer['buddycast_times'] = 1
+                db_writes.append((ADD_PEER, permid, new_peer))
+            else:
+                #print old_peer
+                old_last_seen = old_peer['last_seen']
+                new_last_seen = new_peer['last_seen']
+                if permid == sender_permid:
+                    if not old_peer['buddycast_times']:
+                        new_peer['buddycast_times'] = 1
+                    else:
+                        new_peer['buddycast_times'] =  + 1
+                if not old_last_seen or new_last_seen > old_last_seen + 4*60*60:
+                    # don't update if it was updated in 4 hours
+                    for k in new_peer.keys():
+                        if old_peer[k] == new_peer[k]:
+                            new_peer.pop(k)
+                if new_peer:
+                    db_writes.append((UPDATE_PEER, permid, new_peer))
+                
+        for infohash in cache_db_data['infohash']:
+            tid = self.torrent_db.getTorrentID(infohash)
+            if tid is None:
+                db_writes.append((ADD_INFOHASH, infohash))
+
+        for item in db_writes:
+            if item[0] == ADD_PEER:
+                permid = item[1]
+                new_peer = item[2]
+                # Arno, 2008-09-17: Don't use IP data from BC message, network info gets precedence
+                updateDNS = (permid != sender_permid)
+                self.peer_db.addPeer(permid, new_peer, update_dns=updateDNS, commit=False)
+            elif item[0] == UPDATE_PEER:
+                permid = item[1]
+                new_peer = item[2]
+                # Arno, 2008-09-17: Don't use IP data from BC message, network info gets precedence
+                updateDNS = (permid != sender_permid)
+                if not updateDNS:
+                    if 'ip' in new_peer:
+                        del new_peer['ip']
+                    if 'port' in new_peer:
+                        del new_peer['port']
+                self.peer_db.updatePeer(permid, commit=False, **new_peer)
+            elif item[0] == ADD_INFOHASH:
+                infohash = item[1]
+                self.torrent_db.addInfohash(infohash, commit=False)
+                
+        #self.torrent_db._db.show_sql(1)
+        self.torrent_db.commit()
+        #self.torrent_db._db.show_sql(0)
+                
+        for item in db_writes:
+            if item[0] == ADD_PEER or item[0] == UPDATE_PEER:
+                permid = item[1]
+                new_peer = item[2]
+                last_seen = new_peer['last_seen']
+                self._addPeerToCache(permid, last_seen)
+        
+        for permid in peer_data:
+            if 'sim' in peer_data[permid]:
+                sim = peer_data[permid]['sim']
+                self.addRelativeSim(sender_permid, permid, sim, max_tb_sim)
+
+        #self.torrent_db._db.show_sql(1)
+        self.torrent_db.commit()
+        #self.torrent_db._db.show_sql(0)
+        
+        # Nicolas: moved this block *before* the call to addPeerPreferences because with the clicklog,
+        # this in fact writes to several different databases, so it's easier to tell it to commit
+        # right away. hope this is ok
+        
+        # Nicolas 2009-03-30: thing is that we need to create terms and their generated ids, forcing at least one commit in-between
+        # have to see later how this might be optimized. right now, there's three commits:
+        # before addPeerPreferences, after bulk_insert, and after storing clicklog data
+                
+        if cache_db_data['pref']:
+            self.addPeerPreferences(sender_permid, 
+                                    cache_db_data['pref'], selversion, recvTime, 
+                                    commit=True)
+            
+        # Arno, 2010-02-04: Since when are collected torrents also a peer pref?
+
+        if cache_db_data['coll']:
+            self.addCollectedTorrentsPopularity(sender_permid, 
+                                    cache_db_data['coll'], selversion, recvTime, 
+                                    commit=True)
+        
+                
+        #print hash(k), peer_data[k]
+        #cache_db_data['infohash']
+        #cache_db_data['pref']
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/channelcast.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/channelcast.py
new file mode 100644 (file)
index 0000000..871ca03
--- /dev/null
@@ -0,0 +1,382 @@
+# Written by Nitin Chiluka\r
+# see LICENSE.txt for license information\r
+\r
+import sys\r
+from time import time, ctime, sleep\r
+from zlib import compress, decompress\r
+from binascii import hexlify\r
+from traceback import print_exc, print_stack\r
+from types import StringType, ListType, DictType\r
+from random import randint, sample, seed, random\r
+from sha import sha\r
+from sets import Set\r
+\r
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode\r
+from BaseLib.Core.Statistics.Logger import OverlayLogger\r
+from BaseLib.Core.BitTornado.BT1.MessageID import CHANNELCAST, BUDDYCAST\r
+from BaseLib.Core.CacheDB.CacheDBHandler import ChannelCastDBHandler, VoteCastDBHandler\r
+from BaseLib.Core.Utilities.unicode import str2unicode\r
+from BaseLib.Core.Utilities.utilities import *\r
+from BaseLib.Core.Overlay.permid import permid_for_user,sign_data,verify_data\r
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL\r
+from BaseLib.Core.CacheDB.Notifier import Notifier\r
+from BaseLib.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler\r
+from BaseLib.Core.BuddyCast.moderationcast_util import *\r
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_THIRTEENTH,\\r
+    OLPROTO_VER_FOURTEENTH\r
+from BaseLib.Core.simpledefs import NTFY_CHANNELCAST, NTFY_UPDATE\r
+from BaseLib.Core.Subtitles.RichMetadataInterceptor import RichMetadataInterceptor\r
+from BaseLib.Core.CacheDB.MetadataDBHandler import MetadataDBHandler\r
+from BaseLib.Core.Subtitles.PeerHaveManager import PeersHaveManager\r
+from BaseLib.Core.Subtitles.SubtitlesSupport import SubtitlesSupport\r
+\r
+DEBUG = False\r
+\r
+NUM_OWN_RECENT_TORRENTS = 15\r
+NUM_OWN_RANDOM_TORRENTS = 10\r
+NUM_OTHERS_RECENT_TORRENTS = 15\r
+NUM_OTHERS_RECENT_TORRENTS = 10\r
+\r
+RELOAD_FREQUENCY = 2*60*60\r
+\r
+class ChannelCastCore:\r
+    __single = None\r
+    TESTASSERVER = False # for unit testing\r
+\r
+    def __init__(self, data_handler, overlay_bridge, session, buddycast_interval_function, log = '', dnsindb = None):\r
+        """ Returns an instance of this class """\r
+        #Keep reference to interval-function of BuddycastFactory\r
+        self.interval = buddycast_interval_function\r
+        self.data_handler = data_handler\r
+        self.dnsindb = dnsindb\r
+        self.log = log\r
+        self.overlay_bridge = overlay_bridge\r
+        self.channelcastdb = ChannelCastDBHandler.getInstance()\r
+        self.votecastdb = VoteCastDBHandler.getInstance()\r
+        self.rtorrent_handler = RemoteTorrentHandler.getInstance()\r
+        self.my_permid = self.channelcastdb.my_permid\r
+        self.session = session\r
+        \r
+        self.network_delay = 30\r
+        #Reference to buddycast-core, set by the buddycast-core (as it is created by the\r
+        #buddycast-factory after calling this constructor).\r
+        self.buddycast_core = None\r
+        \r
+        #Extend logging with ChannelCast-messages and status\r
+        if self.log:\r
+            self.overlay_log = OverlayLogger.getInstance(self.log)\r
+            self.dnsindb = self.data_handler.get_dns_from_peerdb\r
+        \r
+        self.hits = []\r
+        \r
+        self.notifier = Notifier.getInstance()\r
+\r
+        self.metadataDbHandler = MetadataDBHandler.getInstance()\r
+        \r
+        #subtitlesHandler = SubtitlesHandler.getInstance()\r
+        subtitleSupport = SubtitlesSupport.getInstance()\r
+        # better if an instance of RMDInterceptor was provided from the\r
+        # outside\r
+        self.peersHaveManger = PeersHaveManager.getInstance()\r
+        if not self.peersHaveManger.isRegistered():\r
+                self.peersHaveManger.register(self.metadataDbHandler, self.overlay_bridge)\r
+        self.richMetadataInterceptor = RichMetadataInterceptor(self.metadataDbHandler,self.votecastdb,\r
+                                                               self.my_permid, subtitleSupport, self.peersHaveManger,\r
+                                                               self.notifier)\r
+        \r
+        \r
+\r
+    \r
+    def initialized(self):\r
+        return self.buddycast_core is not None\r
\r
+\r
+\r
+    def getInstance(*args, **kw):\r
+        if ChannelCastCore.__single is None:\r
+            ChannelCastCore(*args, **kw)\r
+        return ChannelCastCore.__single\r
+    getInstance = staticmethod(getInstance)\r
+\r
+   \r
+    def createAndSendChannelCastMessage(self, target_permid, selversion):\r
+        """ Create and send a ChannelCast Message """\r
+        # ChannelCast feature starts from eleventh version; hence, do not send to lower version peers\r
+        # Arno, 2010-02-05: v12 uses a different on-the-wire format, ignore those.\r
+        \r
+        # Andrea, 2010-04-08: sending the "old-style" channelcast message to older\r
+        # peers, and enriched channelcast messages to new versions, for full backward\r
+        # compatibility\r
+        if selversion < OLPROTO_VER_THIRTEENTH:\r
+            if DEBUG:\r
+                print >> sys.stderr, "channelcast: Do not send to lower version peer:", selversion\r
+            return\r
+        \r
+        # 3/5/2010 Andrea: adding the destination parameters to createChannelCastMessage for\r
+        # logging reasons only. When logging will be disabled, that parameter will\r
+        # become useless\r
+        channelcast_data = self.createChannelCastMessage(selversion, target_permid)\r
+        if channelcast_data is None or len(channelcast_data)==0:\r
+            if DEBUG:\r
+                print >>sys.stderr, "channelcast: No channels there.. hence we do not send"\r
+            return\r
+        channelcast_msg = bencode(channelcast_data)\r
+        \r
+        if self.log:\r
+            dns = self.dnsindb(target_permid)\r
+            if dns:\r
+                ip,port = dns\r
+                MSG_ID = "CHANNELCAST"\r
+                msg = repr(channelcast_data)\r
+                self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID, msg)\r
+        \r
+        data = CHANNELCAST + channelcast_msg\r
+        self.overlay_bridge.send(target_permid, data, self.channelCastSendCallback)        \r
+        #if DEBUG: print >> sys.stderr, "channelcast: Sent channelcastmsg",repr(channelcast_data)\r
+    \r
+    def createChannelCastMessage(self, selversion, dest_permid=None):\r
+        """ \r
+        Create a ChannelCast Message \r
+        \r
+        @param selversion: the protocol version of the destination\r
+        @param dest_permid: the destination of the message. Actually this parameter is not really needed. If \r
+                            not none, it is used for logging purposes only\r
+                            \r
+        @return a channelcast message, possibly enrich with rich metadata content in the\r
+                case selversion is sufficiently high\r
+        """\r
+        # 09-04-2010 Andrea: I addedd the selversion param, to intercept and modify\r
+        # the ChannelCast message contents if the protocol version allows rich metadata\r
+        # enrichment\r
+        \r
+        if DEBUG: \r
+            print >> sys.stderr, "channelcast: Creating channelcastmsg..."\r
+        \r
+        hits = self.channelcastdb.getRecentAndRandomTorrents(NUM_OWN_RECENT_TORRENTS,NUM_OWN_RANDOM_TORRENTS,NUM_OTHERS_RECENT_TORRENTS,NUM_OTHERS_RECENT_TORRENTS)\r
+        # 3/5/2010 Andrea:  \r
+        # hits is of the form: [(mod_id, mod_name, infohash, torrenthash, torrent_name, time_stamp, signature)]\r
+        # adding the destination parameter to buildChannelcastMessageFrom Hits for\r
+        # logging reasons only. When logging will be disabled, that parameter will\r
+        # become useless\r
+        d = self.buildChannelcastMessageFromHits(hits, selversion, dest_permid)\r
+#        #assert validChannelCastMsg(d)\r
+        return d\r
+    \r
+    def channelCastSendCallback(self, exc, target_permid, other=0):\r
+        if DEBUG:\r
+            if exc is None:\r
+                print >> sys.stderr,"channelcast: *** msg was sent successfully to peer", show_permid_short(target_permid)\r
+            else:\r
+                print >> sys.stderr, "channelcast: *** warning - error in sending msg to", show_permid_short(target_permid), exc\r
\r
+    def gotChannelCastMessage(self, recv_msg, sender_permid, selversion):\r
+        """ Receive and handle a ChannelCast message """\r
+        # ChannelCast feature starts from eleventh version; hence, do not receive from lower version peers\r
+        # Arno, 2010-02-05: v12 uses a different on-the-wire format, ignore those.\r
+        \r
+        # Andrea: 2010-04-08: v14 can still receive v13 channelcast messages\r
+        if selversion < OLPROTO_VER_THIRTEENTH:\r
+            if DEBUG:\r
+                print >> sys.stderr, "channelcast: Do not receive from lower version peer:", selversion\r
+            return True\r
+                \r
+        if DEBUG:\r
+            print >> sys.stderr,'channelcast: Received a msg from ', show_permid_short(sender_permid)\r
+            print >> sys.stderr,"channelcast: my_permid=", show_permid_short(self.my_permid)\r
+\r
+        if not sender_permid or sender_permid == self.my_permid:\r
+            if DEBUG:\r
+                print >> sys.stderr, "channelcast: warning - got channelcastMsg from a None/Self peer", \\r
+                        show_permid_short(sender_permid), recv_msg\r
+            return False\r
+\r
+        #if len(recv_msg) > self.max_length:\r
+        #    if DEBUG:\r
+        #        print >> sys.stderr, "channelcast: warning - got large channelCastHaveMsg", len(recv_msg)\r
+        #    return False\r
+\r
+        channelcast_data = {}\r
+\r
+        try:\r
+            channelcast_data = bdecode(recv_msg)\r
+        except:\r
+            print >> sys.stderr, "channelcast: warning, invalid bencoded data"\r
+            return False\r
+\r
+        # check message-structure\r
+        if not validChannelCastMsg(channelcast_data):\r
+            print >> sys.stderr, "channelcast: invalid channelcast_message"\r
+            return False\r
+\r
+        # 19/02/10 Boudewijn: validChannelCastMsg passes when\r
+        # PUBLISHER_NAME and TORRENTNAME are either string or\r
+        # unicode-string.  However, all further code requires that\r
+        # these are unicode!\r
+        for ch in channelcast_data.values():\r
+            if isinstance(ch["publisher_name"], str):\r
+                ch["publisher_name"] = str2unicode(ch["publisher_name"])\r
+            if isinstance(ch["torrentname"], str):\r
+                ch["torrentname"] = str2unicode(ch["torrentname"])\r
+\r
+        self.handleChannelCastMsg(sender_permid, channelcast_data)\r
+        \r
+        #Log RECV_MSG of uncompressed message\r
+        if self.log:\r
+            dns = self.dnsindb(sender_permid)\r
+            if dns:\r
+                ip,port = dns\r
+                MSG_ID = "CHANNELCAST"\r
+                # 08/04/10 Andrea: representing the whole channelcast  + metadata message\r
+                msg = repr(channelcast_data)\r
+                self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg)\r
\r
+        if self.TESTASSERVER:\r
+            self.createAndSendChannelCastMessage(sender_permid, selversion)\r
+            \r
+        return True       \r
+\r
+    def handleChannelCastMsg(self, sender_permid, data):\r
+        self._updateChannelInternal(sender_permid, None, data)\r
+\r
+    def updateChannel(self,query_permid, query, hits):\r
+        """\r
+        This function is called when there is a reply from remote peer regarding updating of a channel\r
+        @param query_permid: the peer who returned the results\r
+        @param query: the query string (None if this is not the results of a query) \r
+        @param hits: details of all matching results related to the query\r
+        """\r
+        \r
+        return self._updateChannelInternal(query_permid, query, hits)\r
+    \r
+        \r
+        \r
+    def _updateChannelInternal(self, query_permid, query, hits):\r
+        listOfAdditions = list()\r
+        \r
+        # a single read from the db is more efficient\r
+        all_spam_channels = self.votecastdb.getPublishersWithNegVote(bin2str(self.session.get_permid()))\r
+        for k,v in hits.items():\r
+            #check if the record belongs to a channel who we have "reported spam" (negative vote)\r
+            if bin2str(v['publisher_id']) in all_spam_channels:\r
+                # if so, ignore the incoming record\r
+                continue\r
+            \r
+            # make everything into "string" format, if "binary"\r
+            hit = (bin2str(v['publisher_id']),v['publisher_name'],bin2str(v['infohash']),bin2str(v['torrenthash']),v['torrentname'],v['time_stamp'],bin2str(k))\r
+\r
+            listOfAdditions.append(hit)\r
+\r
+        # Arno, 2010-06-11: We're on the OverlayThread\r
+        self._updateChannelcastDB(query_permid, query, hits, listOfAdditions)\r
+        \r
+        ##return listOfAdditions\r
+                \r
+    \r
+    def _updateChannelcastDB(self, query_permid, query, hits, listOfAdditions):\r
+        \r
+        publisher_ids = Set()\r
+        \r
+        #08/04/10: Andrea: processing rich metadata part.\r
+        self.richMetadataInterceptor.handleRMetadata(query_permid, hits, fromQuery = query is not None)\r
+        \r
+        \r
+        tmp_hits = {} #"binary" key\r
+\r
+        def usercallback(infohash,metadata,filename):\r
+            if tmp_hits.has_key(infohash):\r
+                hit = tmp_hits[infohash]\r
+                if self.channelcastdb.addTorrent(hit):\r
+                    self.hits.append(hit)\r
+            else:\r
+                print >> sys.stderr, "channelcast: updatechannel: could not find infohash", bin2str(infohash)\r
+\r
+\r
+        for hit in listOfAdditions:\r
+            publisher_ids.add(hit[0])\r
+            infohash = str2bin(hit[2])\r
+            tmp_hits[infohash] = hit \r
+            # effectively v['infohash'] == str2bin(hit[2])\r
+\r
+\r
+            if self.channelcastdb.existsTorrent(infohash):\r
+                if self.channelcastdb.addTorrent(hit):\r
+                    self.hits.append(hit)\r
+            else:\r
+                self.rtorrent_handler.download_torrent(query_permid,infohash,usercallback)\r
+        \r
+        # Arno, 2010-02-24: Generate event\r
+        for publisher_id in publisher_ids:\r
+            try:\r
+                self.notifier.notify(NTFY_CHANNELCAST, NTFY_UPDATE, publisher_id)\r
+            except:\r
+                print_exc()\r
+\r
+                \r
+            \r
+\r
+\r
+    def updateMySubscribedChannels(self):\r
+        subscribed_channels = self.channelcastdb.getMySubscribedChannels()\r
+        for permid, channel_name, num_subscriptions, notused in subscribed_channels:\r
+            # query the remote peers, based on permid, to update the channel content\r
+            q = "CHANNEL p "+permid\r
+            self.session.query_connected_peers(q,usercallback=self.updateChannel)\r
+        \r
+        self.overlay_bridge.add_task(self.updateMySubscribedChannels, RELOAD_FREQUENCY)    \r
+\r
+\r
+    def buildChannelcastMessageFromHits(self, hits, selversion, dest_permid=None, fromQuery=False):\r
+        '''\r
+        Creates a channelcast message from database hits.\r
+        \r
+        This method is used to create channel results both when a channelcast message\r
+        is created in the "normal" buddycast epidemic protocol, and when a remote\r
+        query for channels arrives and is processed. It substitutes a lot of duplicated\r
+        code in the old versions.\r
+        \r
+        @param hits: a tuple (publisher_id, publisher_name, infohash, \r
+                     torrenthash, torrentname, time_stamp, signature) representing\r
+                     a channelcast entry in the db\r
+        @param selversion: the protocol version of the destination\r
+        @param dest_permid: the permid of the destination of the message. Actually this parameter\r
+                            is used for logging purposes only, when not None. If None, nothing\r
+                            bad happens.\r
+        '''\r
+        # 09-04-2010 Andrea : I introduced this separate method because this code was \r
+        # duplicated in RemoteQueryMessageHandler\r
+        enrichWithMetadata = False\r
+        \r
+        if selversion >= OLPROTO_VER_FOURTEENTH:\r
+            enrichWithMetadata = True\r
+            if DEBUG:\r
+                print >> sys.stderr, "channelcast: creating enriched messages"\\r
+                    "since peer has version: ", selversion\r
+        d = {}\r
+        for hit in hits:\r
+            # ARNOUNICODE: temp fixes until data is sent not Base64-encoded\r
+             \r
+            # 08/04/10 Andrea: I substituted the keys with constnats, otherwise a change here\r
+            # would break my code in the RichMetadataInterceptor\r
+            r = {}\r
+            r['publisher_id'] = str(hit[0]) # ARNOUNICODE: must be str\r
+            r['publisher_name'] = hit[1].encode("UTF-8")  # ARNOUNICODE: must be explicitly UTF-8 encoded\r
+            r['infohash'] = str(hit[2])     # ARNOUNICODE: must be str\r
+            r['torrenthash'] = str(hit[3])  # ARNOUNICODE: must be str\r
+            r['torrentname'] = hit[4].encode("UTF-8") # ARNOUNICODE: must be explicitly UTF-8 encoded\r
+            r['time_stamp'] = int(hit[5])\r
+            # hit[6]: signature, which is unique for any torrent published by a user\r
+            signature = hit[6]\r
+            d[signature] = r\r
+            \r
+\r
+        # 08/04/10 Andrea: intercepting a channelcast message and enriching it with\r
+        # subtitles information\r
+        # 3/5/2010 Andrea: adding the destination parameter to addRichMetadataContent for\r
+        # logging reasons only. When logging will be disabled, that parameter will\r
+        # become useless\r
+        if enrichWithMetadata:\r
+            d = self.richMetadataInterceptor.addRichMetadataContent(d, dest_permid, fromQuery)\r
+    \r
+        return d\r
+    \r
+        \r
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/moderationcast_util.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/moderationcast_util.py
new file mode 100644 (file)
index 0000000..39c77df
--- /dev/null
@@ -0,0 +1,147 @@
+# Written by Vincent Heinink and Rameez Rahman
+# see LICENSE.txt for license information
+#
+#Utilities for moderationcast (including databases)
+#
+import sys
+
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str, str2bin
+#For validity-checks
+from types import StringType, ListType, DictType
+from time import time
+from BaseLib.Core.BitTornado.bencode import bencode
+from BaseLib.Core.Overlay.permid import verify_data
+from os.path import exists, isfile
+from BaseLib.Core.Subtitles.RichMetadataInterceptor import validMetadataEntry
+
+
+DEBUG = False
+
+TIMESTAMP_IN_FUTURE = 5 * 60    # 5 minutes is okay
+
+#*****************Validity-checks*****************
+def validInfohash(infohash):
+    """ Returns True iff infohash is a valid infohash """
+    r = isinstance(infohash, str) and len(infohash) == 20
+    if not r:
+        if DEBUG:
+            print >>sys.stderr, "Invalid infohash: type(infohash) ==", str(type(infohash))+\
+            ", infohash ==", `infohash`
+    return r
+
+def validPermid(permid):
+    """ Returns True iff permid is a valid Tribler Perm-ID """
+    r = type(permid) == str and len(permid) <= 125
+    if not r:
+        if DEBUG:
+            print >>sys.stderr, "Invalid permid: type(permid) ==", str(type(permid))+\
+            ", permid ==", `permid`
+    return r
+
+def now():
+    """ Returns current-system-time in UTC, seconds since the epoch (type==int) """
+    return int(time())
+
+def validTimestamp(timestamp):
+    """ Returns True iff timestamp is a valid timestamp """
+    r = timestamp is not None and type(timestamp) == int and timestamp > 0 and timestamp <= now() + TIMESTAMP_IN_FUTURE
+    if not r:
+        if DEBUG:
+            print >>sys.stderr, "Invalid timestamp"
+    return r
+
+def validVoteCastMsg(data):
+    """ Returns True if VoteCastMsg is valid, ie, be of type [(mod_id,vote)] """
+    if data is None:
+        print >> sys.stderr, "data is None"
+        return False
+     
+    if not type(data) == DictType:
+        print >> sys.stderr, "data is not Dictionary"
+        return False
+    
+    for key,value in data.items():
+        #if DEBUG: 
+        #    print >>sys.stderr, "validvotecastmsg: ", repr(record)
+        if not validPermid(key):
+            if DEBUG:
+                print >> sys.stderr, "not valid permid: ", repr(key) 
+            return False
+        if not ('vote' in value and 'time_stamp' in value):
+            if DEBUG:
+                print >> sys.stderr, "validVoteCastMsg: key missing, got", value.keys()
+            return False
+        if not type(value['vote']) == int:
+            if DEBUG:
+                print >> sys.stderr, "Vote is not int: ", repr(value['vote']) 
+            return False
+        if not(value['vote']==2 or value['vote']==-1):
+            if DEBUG:
+                print >> sys.stderr, "Vote is not -1 or 2: ", repr(value['vote']) 
+            return False
+        if not type(value['time_stamp']) == int:
+            if DEBUG:
+                print >> sys.stderr, "time_stamp is not int: ", repr(value['time_stamp']) 
+            return False    
+    return True
+
+
+def validChannelCastMsg(channelcast_data):
+    """ Returns true if ChannelCastMsg is valid,
+    format: {'signature':{'publisher_id':, 'publisher_name':, 'infohash':, 'torrenthash':, 'torrent_name':, 'timestamp':, 'signature':}} 
+     """
+     
+        
+    if not isinstance(channelcast_data,dict):
+        return False
+    for signature, ch in channelcast_data.items():
+        if not isinstance(ch,dict):
+            if DEBUG:
+                print >>sys.stderr,"validChannelCastMsg: value not dict"
+            return False
+        
+        # 08-04-2010 We accept both 6 and 7 fields to allow
+        # compatibility with messages from older versions 
+        # the rich metadata field
+        length = len(ch)
+        if not 6 <= length <= 7:
+            if DEBUG:
+                print >>sys.stderr,"validChannelCastMsg: #keys!=7"
+            return False
+        if not ('publisher_id' in ch and 'publisher_name' in ch and 'infohash' in ch and 'torrenthash' in ch \
+                and 'torrentname' in ch and 'time_stamp' in ch):
+            if DEBUG:
+                print >>sys.stderr,"validChannelCastMsg: key missing"
+            return False
+        
+        if length == 7:
+            if 'rich_metadata' not in ch: #enriched Channelcast
+                if DEBUG:
+                    print >>sys.stderr,"validChannelCastMsg: key missing"
+                    return False
+            else:
+                if not validMetadataEntry(ch['rich_metadata']):
+                    print >> sys.stderr, "validChannelCastMsg: invalid rich metadata"
+                    return False
+                
+        
+        
+        if not (validPermid(ch['publisher_id']) and isinstance(ch['publisher_name'],str) \
+                and validInfohash(ch['infohash']) and validInfohash(ch['torrenthash'])
+                and isinstance(ch['torrentname'],str) and validTimestamp(ch['time_stamp'])):
+            if DEBUG:
+                print >>sys.stderr,"validChannelCastMsg: something not valid"
+            return False
+        # now, verify signature
+        # Nitin on Feb 5, 2010: Signature is validated using binary forms of permid, infohash, torrenthash fields
+        l = (ch['publisher_id'],ch['infohash'], ch['torrenthash'], ch['time_stamp'])
+        if not verify_data(bencode(l),ch['publisher_id'],signature):
+            if DEBUG:
+                print >>sys.stderr, "validChannelCastMsg: verification failed!"
+            return False
+    return True
+     
+#*************************************************
+
+def voteCastMsgToString(data):
+    return repr(data)
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/similarity.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/similarity.py
new file mode 100644 (file)
index 0000000..e1d97a4
--- /dev/null
@@ -0,0 +1,139 @@
+# Written by Jun Wang, Jie Yang
+# see LICENSE.txt for license information
+
+__fool_epydoc = 481
+"""
+Formulas: 
+ P(I|U) = sum{U'<-I} P(U'|U)    # U' has I in his profile
+   P(U'|U) = Sum{I}Pbs(U'|I)Pml(I|U)  # P2PSim
+   Pbs(U|I) = (c(U,I) + mu*Pml(U))/(Sum{U}c(U,I) + mu)   # mu=1 by tuning on tribler dataset
+   Pml(I|U) = c(U,I)/Sum{I}c(U,I)         
+   Pml(U) = Sum{I}c(U,I) / Sum{U,I}c(U,I) 
+   
+Data Structur:
+    preferences - U:{I|c(U,I)>0}, # c(U,I)    # Sum{I}c(U,I) = len(preferences[U])
+    owners - I:{U|c(U,I)>0}    # I:I:Sum{U}c(U,I) = len(owners[I])
+    userSim - U':P(U'|U)
+    itemSim - I:P(I|U)
+    total - Sum{U,I}c(U,I)     # Pml(U) = len(preferences[U])/total
+    
+Test:
+    Using hash(permid) as user id, hash(infohash) as torrent id
+    Incremental change == overall change
+"""
+
+from sets import Set
+
+def P2PSim(pref1, pref2):
+    """ Calculate simple similarity between peers """
+    
+    cooccurrence = len(Set(pref1) & Set(pref2))
+    if cooccurrence == 0:
+        return 0
+    normValue = (len(pref1)*len(pref2))**0.5
+    _sim = cooccurrence/normValue
+    sim = int(_sim*1000)    # use integer for bencode
+    return sim
+
+def getCooccurrence(pref1, pref2):    # pref1 and pref2 are sorted
+    i = 0
+    j = 0
+    co = 0
+    size1 = len(pref1)
+    size2 = len(pref2)
+    if size1 == 0 or size2 == 0:
+        return 0
+    while 1:
+        if (i>= size1) or (j>=size2): break
+        Curr_ID1 = pref1[i]
+        Curr_ID2 = pref2[j]
+        if Curr_ID1 < Curr_ID2 :
+            i=i+1
+        elif Curr_ID1 > Curr_ID2 :
+            j=j+1
+        else:
+            co +=1
+            i+=1
+            j+=1
+    return co    
+
+def P2PSimSorted(pref1, pref2):
+    """ Calculate similarity between peers """
+    
+    cooccurrence = getCooccurrence(pref1, pref2)
+    if cooccurrence == 0:
+        return 0
+    normValue = (len(pref1)*len(pref2))**0.5
+    _sim = cooccurrence/normValue
+    sim = int(_sim*1000)    # use integer for bencode
+    return sim
+
+def P2PSimLM(peer_permid, my_pref, peer_pref, owners, total_prefs, mu=1.0):
+    """
+        Calculate similarity between two peers using Bayesian Smooth.
+        P(U|U') = Sum{I}Pbs(U|I)Pml(I|U')
+        Pbs(U|I) = (c(U,I) + mu*Pml(U))/(Sum{U}c(U,I) + mu)  
+        Pml(U) = Sum{I}c(U,I) / Sum{U,I}c(U,I) 
+        Pml(I|U') = c(U',I)/Sum{I}c(U',I) 
+    """
+
+    npeerprefs = len(peer_pref)
+    if npeerprefs == 0 or total_prefs == 0:
+        return 0
+
+    nmyprefs = len(my_pref)
+    if nmyprefs == 0:
+        return 0
+        
+    PmlU = float(npeerprefs) / total_prefs
+    PmlIU = 1.0 / nmyprefs
+    peer_sim = 0.0
+    for item in owners:
+        nowners = len(owners[item]) + 1    # add myself
+        cUI = item in peer_pref
+        PbsUI = float(cUI + mu*PmlU)/(nowners + mu)
+        peer_sim += PbsUI*PmlIU
+    return peer_sim * 100000
+
+
+def P2PSim_Single(db_row, nmyprefs):
+    sim = 0
+    if db_row:
+        peer_id, nr_items, overlap = db_row
+
+        # Arno, 2010-01-14: Safety catch for weird by reported by Johan
+        if (nr_items is None) or (nmyprefs is None):
+            return sim
+        if (nr_items is 0) or (nmyprefs is 0):
+            return sim
+        
+        #Cosine Similarity With Emphasis on users with profilelength >= 40
+        sim = overlap * ((1.0/(nmyprefs ** .5)) * (1.0/(nr_items ** .5)))
+        if nr_items < 40:
+            sim = (nr_items/40.0) * sim
+    return sim
+
+def P2PSim_Full(db_rows, nmyprefs):
+    similarity = {}  
+    for db_row in db_rows:
+        similarity[db_row[0]] = P2PSim_Single(db_row, nmyprefs)
+    return similarity
+
+def P2PSimColdStart(choose_from, not_in, nr):
+    """
+        choose_from has keys: ip port oversion num_torrents
+        not_in is [version, permid]
+        return a list containing [version, permid]
+    """
+    allready_choosen = [permid for version,sim,permid in not_in]
+    options = []
+    for permid in choose_from:
+        if permid not in allready_choosen:
+            options.append([choose_from[permid]['num_torrents'],[choose_from[permid]['oversion'],0.0,permid]])
+    options.sort()
+    options.reverse()
+    
+    options = [row[1] for row in options[:nr]]
+    return options
+    
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/BuddyCast/votecast.py b/instrumentation/next-share/BaseLib/Core/BuddyCast/votecast.py
new file mode 100644 (file)
index 0000000..3a7887b
--- /dev/null
@@ -0,0 +1,216 @@
+# Written by Rameez Rahman
+# see LICENSE.txt for license information
+#
+
+import sys
+from time import time
+from sets import Set
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Statistics.Logger import OverlayLogger
+from BaseLib.Core.BitTornado.BT1.MessageID import VOTECAST
+from BaseLib.Core.CacheDB.CacheDBHandler import VoteCastDBHandler
+from BaseLib.Core.Utilities.utilities import *
+from BaseLib.Core.Overlay.permid import permid_for_user
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str, str2bin
+from BaseLib.Core.BuddyCast.moderationcast_util import *
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_THIRTEENTH
+from BaseLib.Core.CacheDB.Notifier import Notifier
+from BaseLib.Core.simpledefs import NTFY_VOTECAST, NTFY_UPDATE
+
+DEBUG_UI = False
+DEBUG = False    #Default debug
+debug = False    #For send-errors and other low-level stuff
+
+
+SINGLE_VOTECAST_LENGTH = 130
+
+class VoteCastCore:
+    """ VoteCastCore is responsible for sending and receiving VOTECAST-messages """
+
+    TESTASSERVER = False # for unit testing
+    
+    ################################
+    def __init__(self, data_handler, secure_overlay, session, buddycast_interval_function, log = '', dnsindb = None):
+        """ Returns an instance of this class
+        """
+        #Keep reference to interval-function of BuddycastFactory
+        self.interval = buddycast_interval_function
+        self.data_handler = data_handler
+        self.dnsindb = dnsindb
+        self.log = log
+        self.secure_overlay = secure_overlay
+        self.votecastdb = VoteCastDBHandler.getInstance()
+        self.my_permid = self.votecastdb.my_permid
+        self.session = session
+        self.max_length = SINGLE_VOTECAST_LENGTH * (session.get_votecast_random_votes() + session.get_votecast_recent_votes())       
+
+        self.network_delay = 30
+        #Reference to buddycast-core, set by the buddycast-core (as it is created by the
+        #buddycast-factory after calling this constructor).
+        self.buddycast_core = None
+        
+        
+        self.notifier = Notifier.getInstance()
+        
+        #Extend logging with VoteCast-messages and status
+        if self.log:
+            self.overlay_log = OverlayLogger.getInstance(self.log)
+
+    def initialized(self):
+        return self.buddycast_core is not None
+
+    ################################
+    def createAndSendVoteCastMessage(self, target_permid, selversion):
+        """ Creates and sends a VOTECAST message """
+        # Arno, 2010-02-05: v12 uses a different on-the-wire format, ignore those.
+        if selversion < OLPROTO_VER_THIRTEENTH:
+            if DEBUG:
+                print >> sys.stderr, "votecast: Do not send to lower version peer:", selversion
+            return
+                
+        votecast_data = self.createVoteCastMessage()
+        if len(votecast_data) == 0:
+            if DEBUG:
+                print >>sys.stderr, "votecast: No votes there.. hence we do not send"            
+            return
+        
+        votecast_msg = bencode(votecast_data)
+         
+        if self.log:
+            dns = self.dnsindb(target_permid)
+            if dns:
+                ip,port = dns
+                MSG_ID = "VOTECAST"
+                # msg = voteCastReplyMsgToString(votecast_data)
+                self.overlay_log('SEND_MSG', ip, port, show_permid(target_permid), selversion, MSG_ID)
+        
+        if DEBUG: print >> sys.stderr, "votecast: Sending votecastmsg",voteCastMsgToString(votecast_data)
+#        data = VOTECAST + votecast_msg
+#        self.secure_overlay.send(target_permid, data, self.voteCastSendCallback)
+        self.secure_overlay.send(target_permid, VOTECAST + votecast_msg, self.voteCastSendCallback)           
+        
+
+    ################################
+    def createVoteCastMessage(self):
+        """ Create a VOTECAST message """
+
+        if DEBUG: print >> sys.stderr, "votecast: Creating votecastmsg..."        
+        
+        NO_RANDOM_VOTES = self.session.get_votecast_random_votes()
+        NO_RECENT_VOTES = self.session.get_votecast_recent_votes()
+        records = self.votecastdb.getRecentAndRandomVotes()
+
+        data = {}
+        for record in records:
+            # record is of the format: (publisher_id, vote, time_stamp)
+            if DEBUG:
+                print >>sys.stderr,"votecast: publisher id",`record[0]`,type(record[0]) 
+            publisher_id = record[0]
+            data[publisher_id] = {'vote':record[1], 'time_stamp':record[2]}
+        if DEBUG: print >>sys.stderr, "votecast to be sent:", repr(data)
+        return data
+
+    
+    ################################
+    def voteCastSendCallback(self, exc, target_permid, other=0):
+        if DEBUG:
+            if exc is None:
+                print >> sys.stderr,"votecast: *** msg was sent successfully to peer", show_permid_short(target_permid)
+            else:
+                print >> sys.stderr, "votecast: *** warning - error in sending msg to", show_permid_short(target_permid), exc
+
+    ################################
+    def gotVoteCastMessage(self, recv_msg, sender_permid, selversion):
+        """ Receives VoteCast message and handles it. """
+        # VoteCast feature is renewed in eleventh version; hence, do not receive from lower version peers
+        # Arno, 2010-02-05: v12 uses a different on-the-wire format, ignore those.
+        if selversion < OLPROTO_VER_THIRTEENTH:
+            if DEBUG:
+                print >> sys.stderr, "votecast: Do not receive from lower version peer:", selversion
+            return True
+                
+        if DEBUG:
+            print >> sys.stderr,'votecast: Received a msg from ', show_permid_short(sender_permid)
+
+        if not sender_permid or sender_permid == self.my_permid:
+            if DEBUG:
+
+                print >> sys.stderr, "votecast: error - got votecastMsg from a None peer", \
+                        show_permid_short(sender_permid), recv_msg
+            return False
+
+        if self.max_length > 0 and len(recv_msg) > self.max_length:
+            if DEBUG:
+                print >> sys.stderr, "votecast: warning - got large voteCastHaveMsg; msg_size:", len(recv_msg)
+            return False
+
+        votecast_data = {}
+
+        try:
+            votecast_data = bdecode(recv_msg)
+        except:
+            print >> sys.stderr, "votecast: warning, invalid bencoded data"
+            return False
+
+        # check message-structure
+        if not validVoteCastMsg(votecast_data):
+            print >> sys.stderr, "votecast: warning, invalid votecast_message"
+            return False
+        
+        self.handleVoteCastMsg(sender_permid, votecast_data)
+
+        #Log RECV_MSG of uncompressed message
+        if self.log:
+            dns = self.dnsindb(sender_permid)
+            if dns:
+                ip,port = dns
+                MSG_ID = "VOTECAST"
+                msg = voteCastMsgToString(votecast_data)
+                self.overlay_log('RECV_MSG', ip, port, show_permid(sender_permid), selversion, MSG_ID, msg)
+        if self.TESTASSERVER:
+            self.createAndSendVoteCastMessage(sender_permid, selversion)
+        return True
+
+    ################################
+        ################################
+    def handleVoteCastMsg(self, sender_permid, data):
+        """ Handles VoteCast message """
+        if DEBUG: 
+            print >> sys.stderr, "votecast: Processing VOTECAST msg from: ", show_permid_short(sender_permid), "; data: ", repr(data)
+
+        mod_ids = Set()
+        for key, value in data.items():
+            vote = {}
+            vote['mod_id'] = bin2str(key)
+            vote['voter_id'] = permid_for_user(sender_permid)
+            vote['vote'] = value['vote']
+            vote['time_stamp'] = value['time_stamp'] 
+            self.votecastdb.addVote(vote)
+            
+            mod_ids.add(vote['mod_id'])
+
+        # Arno, 2010-02-24: Generate event
+        for mod_id in mod_ids:
+            try:
+                self.notifier.notify(NTFY_VOTECAST, NTFY_UPDATE, mod_id)
+            except:
+                print_exc()
+            
+        if DEBUG:
+            print >> sys.stderr,"votecast: Processing VOTECAST msg from: ", show_permid_short(sender_permid), "DONE; data:"
+            
+    def showAllVotes(self):
+        """ Currently this function is only for testing, to show all votes """
+        if DEBUG:
+            records = self.votecastdb.getAll()
+            print >>sys.stderr, "Existing votes..."
+            for record in records:
+                print >>sys.stderr, "    mod_id:",record[0],"; voter_id:", record[1], "; votes:",record[2],"; timestamp:", record[3]
+            print >>sys.stderr, "End of votes..."        
+
+    
+
+
+    ################################
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/CacheDBHandler.py b/instrumentation/next-share/BaseLib/Core/CacheDB/CacheDBHandler.py
new file mode 100644 (file)
index 0000000..038ed32
--- /dev/null
@@ -0,0 +1,4 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+from SqliteCacheDBHandler import *
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/EditDist.py b/instrumentation/next-share/BaseLib/Core/CacheDB/EditDist.py
new file mode 100644 (file)
index 0000000..1244ece
--- /dev/null
@@ -0,0 +1,54 @@
+# Written by Maarten Clemens, Jelle Roozenburg
+# see LICENSE.txt for license information
+
+#http://en.wikipedia.org/wiki/Damerau-Levenshtein_distance
+
+def editDist(str1,str2, maxlength=14):
+    # If fast is set: only calculate titles with same #fast initial chars
+    if not str1 or not str2: # protect against empty strings
+        return 1.0
+    
+    str1 = str1[:maxlength].lower()
+    str2 = str2[:maxlength].lower()
+
+    lenStr1 = len(str1)
+    lenStr2 = len(str2)
+
+    d = [range(lenStr2+1)]
+    row = []
+
+    for i in range(lenStr1):
+        row.append(i+1)
+        for j in range(lenStr2):
+            penalty = 1./max(i+1,j+1)
+            ##penalty = 1
+            if str1[i] == str2[j]:
+                cost = 0
+            else:
+                cost = penalty
+            deletion = d[i][j+1] + penalty
+            insertion = row[j] + penalty
+            substitution = d[i][j] + cost
+            row.append(min(deletion,insertion,substitution))
+            (deletion,insertion,substitution)
+            if i>0 and j>0 and str1[i] == str2[j-1] and str1[i-1] == str2[j]:
+                row[j+1] = min(row[j+1], d[i-1][j-1]+cost) # transposition
+        d.append(row)
+        row = []
+
+    ##maxi = max(lenStr1,lenStr2) # for penalty = 1
+    maxi = sum([1./j for j in range(max(lenStr1,lenStr2)+1)[1:]])
+    return 1.*d[lenStr1][lenStr2]/ maxi
+    
+
+if __name__ == '__main__':
+    import sys
+    str1 = sys.argv[1]
+    str2 = sys.argv[2]
+    print editDist(str1, str2)
+    
+    
+##    d,e = EditDist('mamamstein','levenstein')
+##    print e
+##    for i in d:
+##        print i
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/MetadataDBHandler.py b/instrumentation/next-share/BaseLib/Core/CacheDB/MetadataDBHandler.py
new file mode 100644 (file)
index 0000000..4545e44
--- /dev/null
@@ -0,0 +1,1125 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler
+import threading
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
+
+import sys
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import SignatureException, \
+    MetadataDBException
+from BaseLib.Core.Utilities.utilities import bin2str, str2bin
+import sqlite3
+import time
+
+
+SUBTITLE_LANGUAGE_CODE = "lang"
+SUBTITLE_PATH = "path"
+
+METADATA_TABLE = "Metadata"
+
+MD_ID_KEY = "metadata_id"
+MD_PUBLISHER_KEY = "publisher_id"
+MD_INFOHASH_KEY = "infohash"
+MD_DESCRIPTION_KEY = "description"
+MD_TIMESTAMP_KEY = "timestamp"
+MD_SIGNATURE_KEY = "signature"
+
+
+SUBTITLES_TABLE = "Subtitles"
+
+SUB_MD_FK_KEY = "metadata_id_fk"
+SUB_LANG_KEY = "subtitle_lang"
+SUB_LOCATION_KEY = "subtitle_location"
+SUB_CHECKSUM_KEY = "checksum"
+
+SUBTITLES_HAVE_TABLE = "SubtitlesHave"
+
+SH_MD_FK_KEY = "metadata_id_fk"
+SH_PEER_ID_KEY = "peer_id"
+SH_HAVE_MASK_KEY = "have_mask"
+SH_TIMESTAMP = "received_ts"
+
+
+# maximum number of have entries returned
+# by the database (-1 for unlimited)
+SH_RESULTS_LIMIT = 200
+
+DEBUG = False
+
+#it's good to have all of the queries in one place:
+#the code is more easy to read, and if some query is wrong
+#it is easier to correct them all
+SELECT_SUBS_JOIN_BASE = "SELECT sub." + SUB_MD_FK_KEY + ", sub." + SUB_LANG_KEY \
+             + ", sub." + SUB_LOCATION_KEY \
+             + ", sub." + SUB_CHECKSUM_KEY \
+             + " FROM " + METADATA_TABLE + " AS md " \
+             + "INNER JOIN " \
+             + SUBTITLES_TABLE + " AS sub " \
+             + "ON md." + MD_ID_KEY + " = sub." + SUB_MD_FK_KEY
+             
+MD_SH_JOIN_CLAUSE = \
+            METADATA_TABLE + " AS md " \
+            + "INNER JOIN " \
+            + SUBTITLES_HAVE_TABLE + " AS sh " \
+            + "ON md." + MD_ID_KEY + " = sh." + SH_MD_FK_KEY
+
+
+QUERIES = { 
+           "SELECT SUBS JOIN HASH ALL" : 
+           SELECT_SUBS_JOIN_BASE  
+             + " WHERE md." + MD_INFOHASH_KEY + " = ?"\
+             + " AND md." + MD_PUBLISHER_KEY + " = ?;",
+           
+           "SELECT SUBS JOIN HASH ONE" :
+           SELECT_SUBS_JOIN_BASE 
+             + " WHERE md." + MD_INFOHASH_KEY + " = ?"\
+             + " AND md." + MD_PUBLISHER_KEY + " = ?"\
+             + " AND sub." + SUB_LANG_KEY + " = ?;",
+             
+           "SELECT SUBS FK ALL" :
+           "SELECT * FROM " + SUBTITLES_TABLE  
+             + " WHERE " + SUB_MD_FK_KEY + " = ?;",
+           
+           "SELECT SUBS FK ONE" :
+           "SELECT * FROM " + SUBTITLES_TABLE  
+             + " WHERE " + SUB_MD_FK_KEY + " = ?"\
+             + " AND " + SUB_LANG_KEY + " = ?;",
+             
+           "SELECT METADATA" : 
+           "SELECT * FROM " \
+             + METADATA_TABLE + " WHERE " + MD_INFOHASH_KEY + " = ?" \
+             + " AND " + MD_PUBLISHER_KEY + " = ?;",
+           
+           "SELECT PUBLISHERS FROM INFOHASH":
+           "SELECT " + MD_PUBLISHER_KEY + " FROM " + METADATA_TABLE \
+             + " WHERE " + MD_INFOHASH_KEY + " = ?;",
+             
+           "UPDATE METADATA" : 
+           "UPDATE " + METADATA_TABLE \
+             + " SET "  \
+             + MD_DESCRIPTION_KEY + " = ?, " \
+             + MD_TIMESTAMP_KEY + " = ?, " \
+             + MD_SIGNATURE_KEY + " = ?" \
+             + " WHERE " + MD_INFOHASH_KEY + " = ?" \
+             + " AND " + MD_PUBLISHER_KEY + " = ?;",
+             
+           "UPDATE SUBTITLES" : 
+           "UPDATE " + SUBTITLES_TABLE \
+             + " SET " + SUB_LOCATION_KEY + "= ?, " \
+             + SUB_CHECKSUM_KEY + "= ?" \
+             + " WHERE " + SUB_MD_FK_KEY + "= ?" \
+              + " AND " + SUB_LANG_KEY + "= ?;",
+            
+           "DELETE ONE SUBTITLES" :
+           "DELETE FROM " + SUBTITLES_TABLE \
+            + " WHERE " + SUB_MD_FK_KEY + "= ? " \
+            + " AND " + SUB_LANG_KEY + "= ?;",
+            
+            "DELETE ONE SUBTITLE JOIN" :
+            "DELETE FROM " + SUBTITLES_TABLE \
+            + " WHERE " + SUB_MD_FK_KEY  \
+            + " IN ( SELECT " + MD_ID_KEY + " FROM " + METADATA_TABLE \
+            + " WHERE " + MD_PUBLISHER_KEY + " = ?"  \
+            + " AND " + MD_INFOHASH_KEY + " = ? )"  \
+            + " AND " + SUB_LANG_KEY + "= ?;",
+            
+           "DELETE ALL SUBTITLES" :
+           "DELETE FROM " + SUBTITLES_TABLE \
+            + " WHERE " + SUB_MD_FK_KEY + "= ?;",
+            
+           "DELETE METADATA PK" :
+           "DELETE FROM " + METADATA_TABLE \
+            + " WHERE " + MD_ID_KEY + " = ?;",
+           
+           "INSERT METADATA" :
+           "INSERT INTO " + METADATA_TABLE + " VALUES " \
+             + "(NULL,?,?,?,?,?)",
+             
+           "INSERT SUBTITLES" : 
+           "INSERT INTO " + SUBTITLES_TABLE + " VALUES (?, ?, ?, ?);",
+           
+           "SELECT SUBTITLES WITH PATH":
+           "SELECT sub." + SUB_MD_FK_KEY + ", sub." + SUB_LOCATION_KEY  + ", sub." \
+           + SUB_LANG_KEY + ", sub." + SUB_CHECKSUM_KEY \
+           + ", m." + MD_PUBLISHER_KEY + ", m." + MD_INFOHASH_KEY \
+           + " FROM " + METADATA_TABLE + " AS m " \
+           +"INNER JOIN " + SUBTITLES_TABLE + " AS sub "\
+           + "ON m." + MD_ID_KEY + " = " + " sub." + SUB_MD_FK_KEY \
+           + " WHERE " \
+           + SUB_LOCATION_KEY + " IS NOT NULL;",
+           
+           "SELECT SUBTITLES WITH PATH BY CHN INFO":
+           "SELECT sub." + SUB_LOCATION_KEY  + ", sub." \
+           + SUB_LANG_KEY + ", sub." + SUB_CHECKSUM_KEY \
+           + " FROM " + METADATA_TABLE + " AS m " \
+           +"INNER JOIN " + SUBTITLES_TABLE + " AS sub "\
+           + "ON m." + MD_ID_KEY + " = " + " sub." + SUB_MD_FK_KEY \
+           + " WHERE sub." \
+           + SUB_LOCATION_KEY + " IS NOT NULL" \
+           + " AND m." + MD_PUBLISHER_KEY + " = ?"\
+           + " AND m." + MD_INFOHASH_KEY + " = ?;" ,
+           
+           "INSERT HAVE MASK":
+           "INSERT INTO " + SUBTITLES_HAVE_TABLE + " VALUES " \
+           + "(?, ?, ?, ?);",
+           
+           "GET ALL HAVE MASK":
+           "SELECT sh." + SH_PEER_ID_KEY + ", sh." + SH_HAVE_MASK_KEY \
+           + ", sh." + SH_TIMESTAMP \
+           + " FROM " + MD_SH_JOIN_CLAUSE + " WHERE md." + MD_PUBLISHER_KEY \
+           + " = ? AND md." + MD_INFOHASH_KEY + " = ? "\
+           + "ORDER BY sh." + SH_TIMESTAMP + " DESC" \
+           + " LIMIT " + str(SH_RESULTS_LIMIT) + ";",
+           
+           "GET ONE HAVE MASK":
+           "SELECT sh." + SH_HAVE_MASK_KEY \
+           + ", sh." + SH_TIMESTAMP \
+           + " FROM " + MD_SH_JOIN_CLAUSE + " WHERE md." + MD_PUBLISHER_KEY \
+           + " = ? AND md." + MD_INFOHASH_KEY + " = ? AND sh." + SH_PEER_ID_KEY \
+           + " = ?;",
+           
+           "UPDATE HAVE MASK":
+           "UPDATE " + SUBTITLES_HAVE_TABLE \
+           + " SET " + SH_HAVE_MASK_KEY + " = ?, " \
+           + SH_TIMESTAMP + " = ?" \
+           + " WHERE " + SH_PEER_ID_KEY + " = ?" \
+           + " AND " + SH_MD_FK_KEY + " IN " \
+           + "( SELECT + " + MD_ID_KEY+  " FROM " \
+           +  METADATA_TABLE + " WHERE + "\
+           +  MD_PUBLISHER_KEY + " = ?"\
+           + " AND " + MD_INFOHASH_KEY + " = ? );",
+           
+           "DELETE HAVE":
+           "DELETE FROM " + SUBTITLES_HAVE_TABLE \
+           + " WHERE " + SH_PEER_ID_KEY + " = ?" \
+           + " AND " + SH_MD_FK_KEY + " IN " \
+           + "( SELECT + " + MD_ID_KEY+  " FROM " \
+           +  METADATA_TABLE + " WHERE + "\
+           +  MD_PUBLISHER_KEY + " = ?"\
+           + " AND " + MD_INFOHASH_KEY + " = ? );",
+           
+           "CLEANUP OLD HAVE":
+           "DELETE FROM " + SUBTITLES_HAVE_TABLE \
+           + " WHERE " + SH_TIMESTAMP + " < ? " \
+           + " AND " + SH_PEER_ID_KEY + " NOT IN " \
+           + "( SELECT md." + MD_PUBLISHER_KEY + " FROM " \
+           + METADATA_TABLE + " AS md WHERE md." + MD_ID_KEY \
+           + " = " + SH_MD_FK_KEY + " );"   
+           }
+
+class MetadataDBHandler (object, BasicDBHandler):
+    
+    """
+    Data Access Layer for the subtitles database.
+    """
+    
+    __single = None    # used for multithreaded singletons pattern
+    _lock = threading.RLock()
+    
+    @staticmethod
+    def getInstance(*args, **kw):        
+        if MetadataDBHandler.__single is None:
+            MetadataDBHandler._lock.acquire()   
+            try:
+                if MetadataDBHandler.__single is None:
+                    MetadataDBHandler(*args, **kw)
+            finally:
+                MetadataDBHandler._lock.release()
+        return MetadataDBHandler.__single
+    
+    
+    def __init__(self, db=SQLiteCacheDB.getInstance()):
+        # notice that singleton pattern is not enforced.
+        # This way the code is more easy
+        # to test.
+        
+        try:
+            MetadataDBHandler._lock.acquire()
+            MetadataDBHandler.__single = self
+        finally:
+            MetadataDBHandler._lock.release()
+        
+        try:
+            self._db = db
+            # Don't know what those life should know. Assuming I don't need 
+            # them 'till a countrary proof! (Ask Nitin) 
+            # BasicDBHandler.__init__(self,db,METADATA_TABLE)
+            # BasicDBHandler.__init__(self,db,SUBTITLES_TABLE)
+            print >> sys.stderr, "Metadata: DB made" 
+        except: 
+            print >> sys.stderr, "Metadata: couldn't make the tables"
+        
+        
+        print >> sys.stderr, "Metadata DB Handler initialized"
+        
+    def commit(self):
+        self._db.commit()
+                
+#    Commented for the sake of API simplicity
+#    But then uncommented for coding simplicity :P
+    def getAllSubtitles(self, channel, infohash):
+        """
+        Get all the available subtitles for a channel and infohash.
+        
+        Returns a list representing subtitles that are available for
+        a givenchannel and infohash. 
+        
+        @param channel: the perm_id of the channel owner (binary)
+        @param infohash: the infhash of a channel elements as it
+                         is announced in ChannelCast (binary)
+        @return: a dictionary of { lang : SubtitleInfo instance}
+        """
+        
+        query = QUERIES["SELECT SUBS JOIN HASH ALL"]
+        infohash = bin2str(infohash)
+        channel = bin2str(channel)
+          
+        results = self._db.fetchall(query, (infohash, channel))
+        
+        subsDict = {}
+        for entry in results:
+            subsDict[entry[1]] = SubtitleInfo(entry[1], entry[2], entry[3])
+     
+        return subsDict
+    
+    def _deleteSubtitleByChannel(self, channel, infohash, lang):
+        '''
+        Remove a subtitle for a channel infohash
+        
+        @param channel: the channel where the subtitle is (binary)
+        @param infohash: the infohash of the torrent referred by the subtitle
+                        (binary)
+        @param lang: ISO-639-2 language code of the subtitle to remove
+        
+        '''
+        
+        query = QUERIES["DELETE ONE SUBTITLE JOIN"]
+        
+        infohash = bin2str(infohash)
+        channel = bin2str(channel)
+        
+        self._db.execute_write(query,(channel, infohash, lang))
+
+
+    def _getAllSubtitlesByKey(self, metadataKey):
+        '''
+        Retrieves every subtitles given a Metadata table key
+        
+        Given an instance of the Metadata table artificial key, retrieves
+        every subtitle instance associated to that key
+        
+        @param metadataKey: a value of an artificial key in the Metadata table
+        @return : a dictionary of type {lang : SubtitleInfo}, empty if no results
+        '''
+        query = QUERIES["SELECT SUBS FK ALL"]
+        
+          
+        results = self._db.fetchall(query, (metadataKey,))
+        subsDict = {}
+        for entry in results:
+            subsDict[entry[1]] = SubtitleInfo(entry[1], entry[2], str2bin(entry[3]))
+     
+        return subsDict
+
+        
+#    commented for the sake of API simplicity
+#    def hasSubtitleInLang(self,channel,infohash, lang):
+#        """
+#        Checks whether an item in a channel as available subitltles.
+#        
+#        @param channel: a perm_id identifying the owner of the channel.
+#        @param infohash: the infohash of an item, as announced in channelcast
+#                         messages.
+#        @param lang: a 3 characters ISO 639-2 language code, identifying
+#                     the desired subtitle langugage
+#        @return:  bool
+#        """
+#        sub = self.getSubtitle(channel, infohash, lang)
+#        return sub is not None
+#    
+    
+#    commented for the sake of api simplicity
+#    But then uncommented for coding simplicity :P
+    def getSubtitle(self, channel, infohash, lang):
+        """
+        Get a subtitle for a language for a given item in a given channel.
+        
+        Returns the details reguarding a subtitles in a given language for a
+        given item in a given channel, if it exists. Otherwise it returns
+        None.
+        
+        @param channel: a perm_id identifying the owner of the channel.
+        @param infohash: the infohash of an item, as announced in channelcast
+                         messages.
+        @param lang: a 3 characters ISO 639-2 language code, identifying
+                     the desired subtitle langugage
+        @return: a SubtitleInfo instance
+        """
+        query = QUERIES["SELECT SUBS JOIN HASH ONE"]
+        
+        infohash = bin2str(infohash)
+        channel = bin2str(channel)
+          
+          
+        res = self._db.fetchall(query, (infohash, channel, lang))
+        if len(res) == 0 :
+            return None
+        elif len(res) == 1 :
+            checksum = str2bin(res[0][3])
+            return SubtitleInfo(res[0][1], res[0][2], checksum)
+        else : 
+            # This should be not possible to database constraints
+            raise MetadataDBException("Metadata DB Constraint violeted!")
+    
+
+            
+    def _getSubtitleByKey(self, metadata_fk, lang):
+        """
+        Return a subtitle in a given language for a key of the Metadata table.
+        
+        Given an instance of the artificial key in the metadata table,
+        retrieves a SubtitleInfo instance for that key and the language passed in.
+        
+        @param metadata_fk: a key in the metadata table
+        @param lang: a language code for the subtitle to be retrieved
+        
+        @return: a SubtitleInfo instance, or None
+        """
+        query = QUERIES["SELECT SUBS FK ONE"]
+          
+          
+        res = self._db.fetchall(query, (metadata_fk, lang))
+        if len(res) == 0 :
+            return None
+        elif len(res) == 1 :
+            checksum = str2bin(res[0][3])
+            return SubtitleInfo(res[0][1], res[0][2], checksum)
+        else : 
+            # This should be not possible to database constraints
+            raise MetadataDBException("Metadata DB Constraint violeted!")
+        
+        
+    def getMetadata(self, channel, infohash):
+        """
+        Returns a MetadataDTO instance for channel/infohash if available in DB
+        
+        Given a channel/infhash couple returns a MetadataDTO instance, built
+        with the values retrieved from the Metadata and Subtitles DB. If
+        no result returns None
+        
+        @param channel: the permid of the channel's owner (binary)
+        @param infohash: the infohash of the item the metadata refers to
+                         (binary)
+        @return: a MetadataDTO instance comprehensive of subtitles if any
+                 metadata is found in the DB. None otherwise.
+        """
+        
+        query = QUERIES["SELECT METADATA"]
+        
+        infohash = bin2str(infohash)
+        channel = bin2str(channel)
+        
+        res = self._db.fetchall(query, (infohash, channel))
+        
+        if len(res) == 0:
+            return None
+        if len(res) > 1:
+            raise MetadataDBException("Metadata DB Constraint violated")
+        
+        metaTuple = res[0]
+        
+        subsDictionary = self._getAllSubtitlesByKey(metaTuple[0])
+        
+        publisher = str2bin(metaTuple[1])
+        infohash =  str2bin(metaTuple[2])
+        timestamp = int(metaTuple[4])
+        description = unicode(metaTuple[3])
+        signature = str2bin(metaTuple[5])
+        
+        toReturn = MetadataDTO(publisher, infohash,
+                               timestamp, description, None,
+                               signature)
+        
+        for sub in subsDictionary.itervalues():
+            toReturn.addSubtitle(sub)
+        
+        return toReturn
+
+    
+    def getAllMetadataForInfohash(self, infohash):
+        """
+        Returns a list of MetadataDTO instances for a given infohash
+        
+        Given a torrent infohash returns a list of MetadataDTO instances for
+        that infohash. Each one of the MetadataDTO refers to a different
+        channel.
+        
+        @param infohash: the infohash for the torrent (binary)
+        @return: a list of MetadataDTO isntances (or empty list if nothing
+                 is found)
+        """
+        
+        assert infohash is not None
+        
+        strinfohash = bin2str(infohash)
+        
+        query = QUERIES["SELECT PUBLISHERS FROM INFOHASH"]
+        
+        channels = self._db.fetchall(query, (strinfohash,))
+        
+        return [self.getMetadata(str2bin(entry[0]), infohash) for entry in channels]
+        
+      
+        
+    
+    def hasMetadata(self, channel, infohash):
+        """
+        Checks whether there exists some metadata for an item in a channel.
+        
+        @param channel: a perm_id identifying the owner of the channel.
+        @param infohash: the infohash of an item, as announced in channelcast
+                         messages.
+        @return boolean
+        """
+        query = QUERIES["SELECT METADATA"]
+        
+        infohash = bin2str(infohash)
+        channel = bin2str(channel)
+        
+        res = self._db.fetchall(query, (infohash, channel))
+        return len(res) != 0
+    
+    
+    def insertMetadata(self, metadata_dto):
+        '''
+        Insert the metadata contained in a Metadata DTO in the database.
+        
+        If an entry relative to the same channel and infohash of the provided 
+        dto already exists in the db, the db is updated only if the timestamp
+        of the new dto is newer then the entry in the database. 
+        If there is no such an entry, a new wan in the Metadata DB is created
+        along with the required entries in the SubtitleInfo DB
+        
+        @type metadata_dto: MetadataDTO 
+        @param metada_dto: an instance of MetadataDTO describing metadata
+        
+        @return True if an existing entry was updated,  false if a new entry
+                was interested. Otherwise None.
+        
+        '''
+        assert metadata_dto is not None
+        assert isinstance(metadata_dto, MetadataDTO)
+        #try to retrieve a correspindng record for channel,infhoash
+        
+        #won't do nothing if the metadata_dto is not correctly signed
+        if not metadata_dto.verifySignature():
+            raise SignatureException("Metadata to insert is not properly" \
+                                     "signed")
+        
+        select_query = QUERIES["SELECT METADATA"]
+          
+        signature = bin2str(metadata_dto.signature)
+        infohash = bin2str(metadata_dto.infohash)
+        channel = bin2str(metadata_dto.channel)
+        
+        res = self._db.fetchall(select_query,
+                                (infohash, channel))
+    
+        isUpdate = False
+    
+        if len(res) != 0 :
+            #updated if the new message is newer
+            if metadata_dto.timestamp > res[0][4] :
+                query = QUERIES["UPDATE METADATA"]
+                
+                
+                self._db.execute_write(query,
+                                    (metadata_dto.description,
+                                    metadata_dto.timestamp,
+                                    signature,
+                                    infohash,
+                                    channel,),
+                                   False) #I don't want the transaction to commit now
+                
+                fk_key = res[0][0]
+                
+                isUpdate = True
+        
+            else:
+                return
+                
+        else: #if is this a whole new metadata item
+            query = QUERIES["INSERT METADATA"]
+            
+            self._db.execute_write(query,
+                                   (channel,
+                                    infohash,
+                                    metadata_dto.description,
+                                    metadata_dto.timestamp,
+                                    signature,
+                                    ),
+                                   True) 
+            
+            if DEBUG:
+                print >> sys.stderr, "Performing query on db: " + query
+            
+            newRows = self._db.fetchall(select_query,
+                                (infohash, channel))
+            
+            
+            if len(newRows) == 0 : 
+                raise IOError("No results, while there should be one")
+            
+            fk_key = newRows[0][0]
+            
+            
+        self._insertOrUpdateSubtitles(fk_key, metadata_dto.getAllSubtitles(), \
+                                      False)
+            
+        self._db.commit() #time to commit everything
+        
+        return isUpdate        
+        
+                
+                            
+    def _insertOrUpdateSubtitles(self, fk_key, subtitles, commitNow=True):
+        """
+        Given a dictionary of subtitles updates the corrisponding entries.
+        
+        This method takes as input a foreign key for the Metadata table,
+        and a dictionary of type {lang : SubtitleInfo}. Then it updates the 
+        SubtitleInfo table, updating existing entries, deleting entries that are
+        in the db but not in the passed dictionary, and inserting entries
+        that are in the dictionary but not in the db.
+        
+        @param fk_key: a foreign key from the Metadata table. Notice that
+                       sqlite does not enforce the fk constraint. Be careful!
+        @param subtitles: a dictionary {lang : SubtitleInfo} (subtitle must be
+                          an instance of SubtitleInfo)
+        @param commitNow: if False the transaction is not committed
+        """
+        
+        
+        allSubtitles = self._getAllSubtitlesByKey(fk_key)
+        oldSubsSet = frozenset(allSubtitles.keys())
+        newSubsSet = frozenset(subtitles.keys())
+        
+        commonLangs = oldSubsSet & newSubsSet
+        newLangs = newSubsSet - oldSubsSet
+        toDelete = oldSubsSet - newSubsSet
+        
+        #update existing subtitles
+        for lang in commonLangs:
+            self._updateSubtitle(fk_key, subtitles[lang], False)
+        
+        
+        #remove subtitles that are no more in the set
+        for lang in toDelete:
+            self._deleteSubtitle(fk_key, lang, False)
+            
+        #insert new subtitles
+        for lang in newLangs:
+            self._insertNewSubtitle(fk_key, subtitles[lang], False)
+        
+        if commitNow:
+            self._db.commit()
+            
+            
+            
+        
+    def _updateSubtitle(self, metadata_fk, subtitle, commitNow=True):
+        """
+        Update an entry in the Subtitles database.
+        
+        If the entry identified by metadata_fk, subtitle.lang does not exist
+        in the subtitle database this method does nothing.
+        
+        @param metadata_fk: foreign key of the metadata table
+        @param subtitle: instance of Subitle containing the data to insert
+        @param commitNow: if False, this method does not commit the changes to
+                          the database
+        """
+        assert metadata_fk is not None
+        assert subtitle is not None
+        assert isinstance(subtitle, SubtitleInfo)
+                 
+        toUpdate = self._getSubtitleByKey(metadata_fk, subtitle.lang)
+        
+        if toUpdate is None:
+            return
+        
+       
+        query = QUERIES["UPDATE SUBTITLES"]
+        
+        checksum = bin2str(subtitle.checksum)
+                            
+        self._db.execute_write(query, (subtitle.path,
+                        checksum, metadata_fk, subtitle.lang),
+                        commitNow) 
+        
+    def updateSubtitlePath(self, channel, infohash, lang, newPath, commitNow=True):
+        """
+        Updates a subtitle entry in the database if it exists.
+        
+        Given the channel, the infohash, and a SubtitleInfo instance,
+        the entry relative to that subtitle is updated accordingly 
+        to the details in the SubtitleInfo instance.
+        If an instance for the provided channel, infohash, and language
+        does not already exist in the db, nothing is done.
+        
+        @param channel: the channel id (permid) of the channel for the
+                        subtitle (binary)
+        @param infohash: the infohash of the item the subtitle refrs to
+                        (binary)
+        @param lang: the language of the subtitle to update
+        @param path: the new path of the subtitle. None to indicate that the
+                    subtitle is not available
+        @return True if an entry was updated in the db. False if nothing
+                got written on the db
+                
+        @precondition: subtitle.lang is not None
+        """
+        query = QUERIES["SELECT SUBS JOIN HASH ONE"]
+        
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        
+        res = self._db.fetchall(query, (infohash, channel, lang))
+        
+        if len(res) > 1 :
+            raise MetadataDBException("Metadata DB constraint violated")
+        elif len(res) == 0 :
+            if DEBUG:
+                print >> sys.stderr, "Nothing to update for channel %s, infohash %s, lang"\
+                        " %s. Doing nothing." % (channel[-10:],\
+                                                 infohash, lang)
+            return False
+        else:
+            query = QUERIES["UPDATE SUBTITLES"]
+            self._db.execute_write(query, (newPath,
+                        res[0][3], res[0][0], lang),
+                        commitNow) 
+            return True
+        
+        
+        
+        
+        
+    
+    def _deleteSubtitle(self, metadata_fk, lang, commitNow=True):
+        """
+        Delete an entry from the subtitles table.
+        
+        Given a foreign key from the metadata table  and a language delets
+        the corresponding entry in the subtitle table. If the entry
+        is not found, it does nothing.
+        
+        @param metadata_fk: a foreign key from the Metadata table
+        @param lang: a 3 characters language code 
+        @param commitNow: if False does not commit the transaction
+        """
+        assert metadata_fk is not None
+        assert lang is not None
+        
+        query = QUERIES["DELETE ONE SUBTITLES"]
+        self._db.execute_write(query, (metadata_fk, lang), commitNow)
+        
+    
+    def _insertNewSubtitle(self, metadata_fk, subtitle, commitNow=True) :
+        """
+        Insert a new subtitle entry in the Subtitles table.
+        
+        Given a foreign key from the Metadata table, and a SubtitleInfo instance
+        describing the subtitle to insert, adds it to the metadata table.
+        This method assumes that that entry does not already exist in the
+        table.
+        NOTICE that sqlite  does not enforce the foreign key constraint,
+        so be careful about integrity
+        """
+        assert metadata_fk is not None
+        assert subtitle is not None
+        assert isinstance(subtitle, SubtitleInfo)
+        
+        query = QUERIES["INSERT SUBTITLES"]
+        
+        checksum = bin2str(subtitle.checksum)
+        self._db.execute_write(query, (metadata_fk, subtitle.lang,
+                                       subtitle.path, checksum),
+                                       commitNow)
+    
+    def deleteMetadata(self, channel, infohash):
+        """
+        Removes all the metadata associated to a channel/infohash.
+        
+        Everything is dropped from both the Metadata and Subtitles db.
+        
+        @param channel: the permid of the channel's owner
+        @param infohash: the infhoash of the entry
+        """
+        
+        assert channel is not None
+        assert infohash is not None
+        
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        
+        query = QUERIES["SELECT METADATA"]
+        
+        if DEBUG:
+            print >> sys.stderr, "Performing query on db: " + query
+        
+        res = self._db.fetchall(query, (infohash, channel))
+        
+        if len(res) == 0 :
+            return
+        if len(res) > 1 :
+            raise IOError("Metadata DB constraint violated")
+        
+        metadata_fk = res[0][0]
+        
+        self._deleteAllSubtitles(metadata_fk, False)
+        
+        query = QUERIES["DELETE METADATA PK"]
+        
+        self._db.execute_write(query, (metadata_fk,), False)
+        
+        self._db.commit()
+        
+        
+        
+        
+    
+    def _deleteAllSubtitles(self, metadata_fk, commitNow):
+        query = QUERIES["DELETE ALL SUBTITLES"]
+        
+        self._db.execute_write(query, (metadata_fk,), commitNow)
+        
+    def getAllLocalSubtitles(self):
+        '''
+        Returns a structure containing all the subtitleInfos that are pointing
+        to a local path
+        
+        @return a dictionary like this:
+                { ...
+                  channel1 : { infohash1 : [ SubtitleInfo1, ...] }
+                  ...
+                }
+        '''
+        query = QUERIES["SELECT SUBTITLES WITH PATH"]
+        res = self._db.fetchall(query)
+        
+        result = {}
+        
+        for entry in res:
+            # fk = entry[0]
+            path = entry[1]
+            lang = entry[2]
+            checksum = str2bin(entry[3])
+            channel = str2bin(entry[4])
+            infohash = str2bin(entry[5])
+            
+            s = SubtitleInfo(lang, path, checksum)
+            
+            if channel not in result:
+                result[channel] = {}
+            if infohash not in result[channel]:
+                result[channel][infohash] = []
+            
+            result[channel][infohash].append(s)
+            
+        return result
+    
+    def getLocalSubtitles(self, channel, infohash):
+        '''
+        Returns a dictionary containing all the subtitles pointing
+        to a local pathm for the given channel, infohash
+        @param channel: binary channel_id(permid)
+        @param infohash: binary infohash
+        
+        @rtype: dict
+        @return: a dictionary like this:
+                {
+                 ...
+                 langCode : SubtitleInfo,
+                 ...
+                }
+                The dictionary will be empty if no local subtitle
+                is available.
+        '''
+        query = QUERIES["SELECT SUBTITLES WITH PATH BY CHN INFO"]
+        
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        res = self._db.fetchall(query,(channel,infohash))
+        
+        result  = {}
+        
+        for entry in res:
+            location = entry[0]
+            language = entry[1]
+            checksum = str2bin(entry[2])
+            subInfo = SubtitleInfo(language, location, checksum)
+            result[language] = subInfo
+        
+        return result
+            
+    
+    def insertHaveMask(self, channel, infohash, peer_id, havemask, timestamp=None):
+        '''
+        Store a received have mask in the db
+        
+        Each inserted rows represent a delcaration of subtitle 
+        availability from peer_id, for some subtitles for
+        a torrent identified by infohash in a channel identified
+        by channel.
+        
+        @type channel: str
+        @param channel: channel_id (binary)
+        
+        @type infohash: str
+        @param infohash: the infohash of a torrent (binary)
+        
+        @type peer_id: str
+        @param peer_id: peer from whom the infomask was received.(ie its binary permid)
+        
+        @type havemask: int
+        @param havemask: a non-negative integer. It must be smaller
+                        then 2**32.
+                        
+        @precondition: an entry for (channel, infohash) must already
+                       exist in the database
+        '''
+        query = QUERIES["SELECT METADATA"]
+        
+        if timestamp is None:
+            timestamp = int(time.time())
+            
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        peer_id = bin2str(peer_id)
+        
+        res = self._db.fetchall(query, (infohash, channel))
+        
+        if len(res) != 1:
+            raise MetadataDBException("No entry in the MetadataDB for %s, %s" %\
+                                      (channel[-10:],infohash))
+                                      
+        metadata_fk = res[0][0]
+        
+        insertQuery = QUERIES["INSERT HAVE MASK"]
+        
+        try:
+            self._db.execute_write(insertQuery, (metadata_fk, peer_id, havemask, timestamp))
+        except sqlite3.IntegrityError,e:
+            raise MetadataDBException(str(e))
+            
+    
+    def updateHaveMask(self,channel,infohash,peer_id, newMask, timestamp=None):
+        '''
+        Store a received have mask in the db
+        
+        (See insertHaveMask for description)
+        
+        @type channel: str
+        @param channel: channel_id (binary)
+        
+        @type infohash: str
+        @param infohash: the infohash of a torrent (binary)
+        
+        @type peer_id: str
+        @param peer_id: peer from whom the infomask was received.(ie its binary permid)
+        
+        @type havemask: int
+        "param havemask: a non-negative integer. It must be smaller
+                        then 2**32.
+        '''
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        peer_id = bin2str(peer_id)
+        
+        updateQuery = QUERIES["UPDATE HAVE MASK"]
+        if timestamp is None:
+            timestamp = int(time.time())
+        self._db.execute_write(updateQuery, 
+                               (newMask,timestamp,peer_id, channel, infohash))
+    
+    def deleteHaveEntry(self, channel, infohash, peer_id):
+        '''
+        Delete a row from the SubtitlesHave db.
+        
+        If the row is not in the db nothing happens.
+        
+        @type channel: str
+        @param channel: channel_id (binary)
+        
+        @type infohash: str
+        @param infohash: the infohash of a torrent (binary)
+        
+        @type peer_id: str
+        @param peer_id: peer from whom the infomask was received.(ie its binary permid)
+        
+        @postcondition: if a row identified by channel, infohash, peer_id
+                        was in the database, it will no longer be there
+                        at the end of this method call
+        
+        '''
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        peer_id = bin2str(peer_id)
+        deleteQuery = QUERIES["DELETE HAVE"]
+        self._db.execute_write(deleteQuery,
+                               (peer_id,channel,infohash))
+    
+    def getHaveMask(self, channel, infohash, peer_id):
+        '''
+        Returns the have mask for a single peer if available.
+        
+        @type channel: str
+        @param channel: channel_id (binary)
+        
+        @type infohash: str
+        @param infohash: the infohash of a torrent (binary)
+        
+        @type peer_id: str
+        @param peer_id: peer from whom the infomask was received.(ie its binary permid)
+        
+        @rtype: int
+        @return: the have mask relative to channel, infohash, and peer.
+                 If not available returns None
+                 
+        @postcondition: the return value is either None or a non-negative
+                        integer smaller then 2**32
+        '''
+        
+        query = QUERIES["GET ONE HAVE MASK"]
+        
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        peer_id = bin2str(peer_id)
+        
+        res = self._db.fetchall(query,(channel,infohash,peer_id))
+        
+        if len(res) <= 0:
+            return None
+        elif len(res) > 1:
+            raise AssertionError("channel,infohash,peer_id should be unique")
+        else:
+            return res[0][0]
+        
+    
+    def getHaveEntries(self, channel, infohash):
+        '''
+        Return a list of have entries for subtitles for a torrent
+        in a channel.
+        
+        This method returns a list of tuple, like:
+        [ 
+          ...
+          (peer_id, haveMask, timestamp),
+          ...
+        ]
+        
+        (peer_id) is the perm_id of a Tribler
+        Peer, haveMask is an integer value representing a 
+        bitmask of subtitles owned by that peer. 
+        Timestamp is the timestamp at the time the havemask
+        was received. 
+        The results are ordered by descending timestamp.
+        If there are no
+        entris for the givenn channel,infohash pair, the returned
+        list will be empty
+        
+        @type channel: str
+        @param channel: channel_id (binary)
+        
+        @type infohash: str
+        @param infohash: the infohash of a torrent (binary)
+        
+        @rtype: list
+        @return: see description
+        
+        '''
+        query = QUERIES["GET ALL HAVE MASK"]
+        
+        channel = bin2str(channel)
+        infohash = bin2str(infohash)
+        
+        res = self._db.fetchall(query,(channel,infohash))
+        returnlist = list()
+        
+        for entry in res:
+            peer_id = str2bin(entry[0])
+            haveMask = entry[1]
+            timestamp = entry[2]
+            returnlist.append((peer_id, haveMask, timestamp))
+            
+        return returnlist
+    
+    def cleanupOldHave(self, limit_ts):
+        '''
+        Remove from the SubtitlesHave database every entry
+        received at a timestamp that is (strictly) less then limit_ts
+        
+        This method does not remove have messages sent by
+        the publisher of the channel.
+        
+        @type limit_ts: int
+        @param limit_ts: a timestamp. All the entries in the
+                         database having timestamp lessere then
+                         limit_ts will be removed, excpet if
+                         they were received by the publisher
+                         of the channel
+        '''
+        cleanupQuery = QUERIES["CLEANUP OLD HAVE"]
+        
+        self._db.execute_write(cleanupQuery,(limit_ts,))
+        
+    
+    def insertOrUpdateHave(self, channel, infohash, peer_id, havemask, timestamp=None):
+        '''
+        Store a received have mask in the db
+        
+        Each inserted rows represent a delcaration of subtitle 
+        availability from peer_id, for some subtitles for
+        a torrent identified by infohash in a channel identified
+        by channel.
+        
+        If a row for the given (channel, infohash, peer_id) it 
+        is updated accordingly to the parameters. Otherwise
+        a new row is added to the db
+        
+        @type channel: str
+        @param channel: channel_id (binary)
+        
+        @type infohash: str
+        @param infohash: the infohash of a torrent (binary)
+        
+        @type peer_id: str
+        @param peer_id: peer from whom the infomask was received.(ie its binary permid)
+        
+        @type havemask: int
+        @param havemask: a non-negative integer. It must be smaller
+                        then 2**32.
+                        
+        @precondition: an entry for (channel, infohash) must already
+                       exist in the database
+        '''
+        
+        if timestamp is None:
+            timestamp = int(time.time())
+
+            
+        if self.getHaveMask(channel, infohash, peer_id) is not None:
+            self.updateHaveMask(channel, infohash, peer_id, havemask, timestamp)
+        else:
+            self.insertHaveMask(channel, infohash, peer_id, havemask, timestamp)
+            
+        
+    
+    
+    
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/Notifier.py b/instrumentation/next-share/BaseLib/Core/CacheDB/Notifier.py
new file mode 100644 (file)
index 0000000..329a082
--- /dev/null
@@ -0,0 +1,90 @@
+# Written by Jelle Roozenburg 
+# see LICENSE.txt for license information
+
+import sys
+import threading
+from traceback import print_exc, print_stack
+
+from BaseLib.Core.simpledefs import *
+
+class Notifier:
+    
+    SUBJECTS = [NTFY_PEERS, NTFY_TORRENTS, NTFY_PREFERENCES, NTFY_MYPREFERENCES, NTFY_ACTIVITIES, NTFY_REACHABLE, NTFY_CHANNELCAST, NTFY_VOTECAST, NTFY_RICH_METADATA, NTFY_SUBTITLE_CONTENTS]
+
+    #. . .
+    # todo: add all datahandler types+other observables
+    __single = None
+    
+    def __init__(self, pool = None):
+        if Notifier.__single:
+            raise RuntimeError, "Notifier is singleton"
+        self.pool = pool
+        self.observers = []    
+        self.observerLock = threading.Lock()
+        Notifier.__single = self
+        
+    def getInstance(*args, **kw):
+        if Notifier.__single is None:
+            Notifier(*args, **kw)
+        return Notifier.__single
+    getInstance = staticmethod(getInstance)
+    
+    def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], id = None):
+        """
+        Add observer function which will be called upon certain event
+        Example: 
+        addObserver(NTFY_PEERS, [NTFY_INSERT,NTFY_DELETE]) -> get callbacks 
+                    when peers are added or deleted
+        addObserver(NTFY_PEERS, [NTFY_SEARCH_RESULT], 'a_search_id') -> get 
+                    callbacks when peer-searchresults of of search
+                    with id=='a_search_id' come in
+        """
+        assert type(changeTypes) == list
+        assert subject in self.SUBJECTS
+        
+        obs = (func, subject, changeTypes, id)
+        self.observerLock.acquire()
+        self.observers.append(obs)
+        self.observerLock.release()
+        
+    def remove_observer(self, func):
+        """ Remove all observers with function func
+        """
+        
+        self.observerLock.acquire()
+        i=0
+        while i < len(self.observers):
+            ofunc = self.observers[i][0]
+            if ofunc == func:
+                del self.observers[i]
+            else:
+                i+=1
+        self.observerLock.release()
+        
+    def notify(self, subject, changeType, obj_id, *args):
+        """
+        Notify all interested observers about an event with threads from the pool
+        """
+        tasks = []
+        assert subject in self.SUBJECTS
+        
+        self.observerLock.acquire()
+        for ofunc, osubject, ochangeTypes, oid in self.observers:
+            try:
+                if (subject == osubject and
+                    changeType in ochangeTypes and
+                    (oid is None or oid == obj_id)):
+                    tasks.append(ofunc)
+            except:
+                print_stack()
+                print_exc()
+                print >>sys.stderr,"notify: OIDs were",`oid`,`obj_id`
+                
+        self.observerLock.release()
+        args = [subject, changeType, obj_id] + list(args)
+        for task in tasks:
+            if self.pool:
+                self.pool.queueTask(task, args)
+            else:
+                task(*args) # call observer function in this thread
+
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteCacheDBHandler.py b/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteCacheDBHandler.py
new file mode 100644 (file)
index 0000000..c10e830
--- /dev/null
@@ -0,0 +1,4274 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+# Note for Developers: Please write a unittest in Tribler/Test/test_sqlitecachedbhandler.py 
+# for any function you add to database. 
+# Please reuse the functions in sqlitecachedb as much as possible
+
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL
+from copy import deepcopy,copy
+from traceback import print_exc
+from time import time
+from BaseLib.Core.Utilities.Crypto import sha
+from BaseLib.Core.TorrentDef import TorrentDef
+import sys
+import os
+import socket
+import threading
+import base64
+from random import randint, sample
+from sets import Set
+import math
+import re
+
+from maxflow import Network
+from math import atan, pi
+
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from Notifier import Notifier
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.BuddyCast.moderationcast_util import *
+from BaseLib.Core.Overlay.permid import sign_data, verify_data, permid_for_user
+from BaseLib.Core.Search.SearchManager import split_into_keywords
+from BaseLib.Core.Utilities.unicode import name2unicode, dunno2unicode
+from BaseLib.Category.Category import Category
+
+# maxflow constants
+MAXFLOW_DISTANCE = 2
+ALPHA = float(1)/30000
+
+DEBUG = False
+SHOW_ERROR = False
+
+MAX_KEYWORDS_STORED = 5
+MAX_KEYWORD_LENGTH = 50
+
+#Rahim:
+MAX_POPULARITY_REC_PER_TORRENT = 5 # maximum number of records in popularity table for each torrent
+MAX_POPULARITY_REC_PER_TORRENT_PEER = 3 # maximum number of records per each combination of torrent and peer
+
+from BaseLib.Core.Search.SearchManager import split_into_keywords
+
+def show_permid_shorter(permid):
+    if not permid:
+        return 'None'
+    s = base64.encodestring(permid).replace("\n","")
+    return s[-5:]
+
+class BasicDBHandler:
+    def __init__(self,db, table_name): ## self, table_name
+        self._db = db ## SQLiteCacheDB.getInstance()
+        self.table_name = table_name
+        self.notifier = Notifier.getInstance()
+        
+    def __del__(self):
+        try:
+            self.sync()
+        except:
+            if SHOW_ERROR:
+                print_exc()
+        
+    def close(self):
+        try:
+            self._db.close()
+        except:
+            if SHOW_ERROR:
+                print_exc()
+        
+    def size(self):
+        return self._db.size(self.table_name)
+
+    def sync(self):
+        self._db.commit()
+        
+    def commit(self):
+        self._db.commit()
+        
+    def getOne(self, value_name, where=None, conj='and', **kw):
+        return self._db.getOne(self.table_name, value_name, where=where, conj=conj, **kw)
+    
+    def getAll(self, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw):
+        return self._db.getAll(self.table_name, value_name, where=where, group_by=group_by, having=having, order_by=order_by, limit=limit, offset=offset, conj=conj, **kw)
+    
+            
+class MyDBHandler(BasicDBHandler):
+
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if MyDBHandler.__single is None:
+            MyDBHandler.lock.acquire()   
+            try:
+                if MyDBHandler.__single is None:
+                    MyDBHandler(*args, **kw)
+            finally:
+                MyDBHandler.lock.release()
+        return MyDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if MyDBHandler.__single is not None:
+            raise RuntimeError, "MyDBHandler is singleton"
+        MyDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self,db,'MyInfo') ## self,db,'MyInfo'
+        # keys: version, torrent_dir
+        
+    def get(self, key, default_value=None):
+        value = self.getOne('value', entry=key)
+        if value is not NULL:
+            return value
+        else:
+            if default_value is not None:
+                return default_value
+            else:
+                raise KeyError, key
+
+    def put(self, key, value, commit=True):
+        if self.getOne('value', entry=key) is NULL:
+            self._db.insert(self.table_name, commit=commit, entry=key, value=value)
+        else:
+            where = "entry=" + repr(key)
+            self._db.update(self.table_name, where, commit=commit, value=value)
+
+class FriendDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if FriendDBHandler.__single is None:
+            FriendDBHandler.lock.acquire()   
+            try:
+                if FriendDBHandler.__single is None:
+                    FriendDBHandler(*args, **kw)
+            finally:
+                FriendDBHandler.lock.release()
+        return FriendDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if FriendDBHandler.__single is not None:
+            raise RuntimeError, "FriendDBHandler is singleton"
+        FriendDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self,db, 'Peer') ## self,db,'Peer'
+        
+    def setFriendState(self, permid, state=1, commit=True):
+        self._db.update(self.table_name,  'permid='+repr(bin2str(permid)), commit=commit, friend=state)
+        self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid, 'friend', state)
+
+    def getFriends(self,state=1):
+        where = 'friend=%d ' % state
+        res = self._db.getAll('Friend', 'permid',where=where)
+        return [str2bin(p[0]) for p in res]
+        #raise Exception('Use PeerDBHandler getGUIPeers(category = "friend")!')
+
+    def getFriendState(self, permid):
+        res = self.getOne('friend', permid=bin2str(permid))
+        return res
+        
+    def deleteFriend(self,permid):
+        self.setFriendState(permid,0)
+        
+    def searchNames(self,kws):
+        return doPeerSearchNames(self,'Friend',kws)
+        
+    def getRanks(self):
+        # TODO
+        return []
+    
+    def size(self):
+        return self._db.size('Friend')
+    
+    def addExternalFriend(self, peer):
+        peerdb = PeerDBHandler.getInstance()
+        peerdb.addPeer(peer['permid'], peer)
+        self.setFriendState(peer['permid'])
+        
+NETW_MIME_TYPE = 'image/jpeg'
+
+class PeerDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+
+    gui_value_name = ('permid', 'name', 'ip', 'port', 'similarity', 'friend',
+                      'num_peers', 'num_torrents', 'num_prefs', 
+                      'connected_times', 'buddycast_times', 'last_connected',
+                      'is_local')
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if PeerDBHandler.__single is None:
+            PeerDBHandler.lock.acquire()   
+            try:
+                if PeerDBHandler.__single is None:
+                    PeerDBHandler(*args, **kw)
+            finally:
+                PeerDBHandler.lock.release()
+        return PeerDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if PeerDBHandler.__single is not None:
+            raise RuntimeError, "PeerDBHandler is singleton"
+        PeerDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self, db,'Peer') ## self, db ,'Peer'
+        self.pref_db = PreferenceDBHandler.getInstance()
+        self.online_peers = set()
+
+
+    def __len__(self):
+        return self.size()
+
+    def getPeerID(self, permid):
+        return self._db.getPeerID(permid)
+
+    def getPeer(self, permid, keys=None):
+        if keys is not None:
+            res = self.getOne(keys, permid=bin2str(permid))
+            return res
+        else:
+            # return a dictionary
+            # make it compatible for calls to old bsddb interface
+            value_name = ('permid', 'name', 'ip', 'port', 'similarity', 'friend',
+                      'num_peers', 'num_torrents', 'num_prefs', 'num_queries', 
+                      'connected_times', 'buddycast_times', 'last_connected', 'last_seen', 'last_buddycast')
+
+            item = self.getOne(value_name, permid=bin2str(permid))
+            if not item:
+                return None
+            peer = dict(zip(value_name, item))
+            peer['permid'] = str2bin(peer['permid'])
+            return peer
+        
+    def getPeerSim(self, permid):
+        permid_str = bin2str(permid)
+        sim = self.getOne('similarity', permid=permid_str)
+        if sim is None:
+            sim = 0
+        return sim
+        
+    def getPeerList(self, peerids=None):    # get the list of all peers' permid
+        if peerids is None:
+            permid_strs = self.getAll('permid')
+            return [str2bin(permid_str[0]) for permid_str in permid_strs]
+        else:
+            if not peerids:
+                return []
+            s = str(peerids).replace('[','(').replace(']',')')
+#            if len(peerids) == 1:
+#                s = '(' + str(peerids[0]) + ')'    # tuple([1]) = (1,), syntax error for sql
+#            else:
+#                s = str(tuple(peerids))
+            sql = 'select permid from Peer where peer_id in ' + s
+            permid_strs = self._db.fetchall(sql)
+            return [str2bin(permid_str[0]) for permid_str in permid_strs]
+        
+
+    def getPeers(self, peer_list, keys):    # get a list of dictionaries given peer list
+        # BUG: keys must contain 2 entries, otherwise the records in all are single values??
+        value_names = ",".join(keys)
+        sql = 'select %s from Peer where permid=?;'%value_names
+        all = []
+        for permid in peer_list:
+            permid_str = bin2str(permid)
+            p = self._db.fetchone(sql, (permid_str,))
+            all.append(p)
+        
+        peers = []
+        for i in range(len(all)):
+            p = all[i]
+            peer = dict(zip(keys,p))
+            peer['permid'] = peer_list[i]
+            peers.append(peer)
+        
+        return peers
+    
+    def getLocalPeerList(self, max_peers,minoversion=None): # return a list of peer_ids
+        """Return a list of peerids for local nodes, friends first, then random local nodes"""
+        
+        sql = 'select permid from Peer where is_local=1 '
+        if minoversion is not None:
+            sql += 'and oversion >= '+str(minoversion)+' '
+        sql += 'ORDER BY friend DESC, random() limit %d'%max_peers
+        list = []
+        for row in self._db.fetchall(sql):
+            list.append(base64.b64decode(row[0]))
+        return list
+
+
+    def addPeer(self, permid, value, update_dns=True, update_connected=False, commit=True):
+        # add or update a peer
+        # ARNO: AAARGGH a method that silently changes the passed value param!!!
+        # Jie: deepcopy(value)?
+       
+        _permid = _last_seen = _ip = _port = None
+        if 'permid' in value:
+            _permid = value.pop('permid')
+            
+        if not update_dns:
+            if value.has_key('ip'):
+                _ip = value.pop('ip')
+            if value.has_key('port'):
+                _port = value.pop('port')
+                
+        if update_connected:
+            old_connected = self.getOne('connected_times', permid=bin2str(permid))
+            if not old_connected:
+                value['connected_times'] = 1
+            else:
+                value['connected_times'] = old_connected + 1
+            
+            
+        peer_existed = self._db.insertPeer(permid, commit=commit, **value)
+        
+        if _permid is not None:
+            value['permid'] = permid
+        if _last_seen is not None:
+            value['last_seen'] = _last_seen
+        if _ip is not None:
+            value['ip'] = _ip
+        if _port is not None:
+            value['port'] = _port
+        
+        if peer_existed:
+            self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid)
+        # Jie: only notify the GUI when a peer was connected
+        if 'connected_times' in value:
+            self.notifier.notify(NTFY_PEERS, NTFY_INSERT, permid)
+
+        #print >>sys.stderr,"sqldbhand: addPeer",`permid`,self._db.getPeerID(permid),`value`
+        #print_stack()
+            
+            
+    def hasPeer(self, permid):
+        return self._db.hasPeer(permid)
+    
+    
+    def findPeers(self, key, value):    
+        # only used by Connecter
+        if key == 'permid':
+            value = bin2str(value)
+        res = self.getAll('permid', **{key:value})
+        if not res:
+            return []
+        ret = []
+        for p in res:
+            ret.append({'permid':str2bin(p[0])})
+        return ret
+
+    def setPeerLocalFlag(self, permid, is_local, commit=True):
+        # argv = {"is_local":int(is_local)}
+        # updated = self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), **argv)
+        # if commit:
+        #     self.commit()
+        # return updated
+        self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), commit=commit, is_local=int(is_local))
+    
+    def updatePeer(self, permid, commit=True, **argv):
+        self._db.update(self.table_name, 'permid='+repr(bin2str(permid)), commit=commit, **argv)
+        self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid)
+
+        #print >>sys.stderr,"sqldbhand: updatePeer",`permid`,argv
+        #print_stack()
+
+    def deletePeer(self, permid=None, peer_id=None, force=False, commit=True):
+        # don't delete friend of superpeers, except that force is True
+        # to do: add transaction
+        #self._db._begin()    # begin a transaction
+        if peer_id is None:
+            peer_id = self._db.getPeerID(permid)
+        if peer_id is None:
+            return
+        deleted = self._db.deletePeer(permid=permid, peer_id=peer_id, force=force, commit=commit)
+        if deleted:
+            self.pref_db._deletePeer(peer_id=peer_id, commit=commit)
+        self.notifier.notify(NTFY_PEERS, NTFY_DELETE, permid)
+            
+    def updateTimes(self, permid, key, change=1, commit=True):
+        permid_str = bin2str(permid)
+        sql = "SELECT peer_id,%s FROM Peer WHERE permid==?"%key
+        find = self._db.fetchone(sql, (permid_str,))
+        if find:
+            peer_id,value = find
+            if value is None:
+                value = 1
+            else:
+                value += change
+            sql_update_peer = "UPDATE Peer SET %s=? WHERE peer_id=?"%key
+            self._db.execute_write(sql_update_peer, (value, peer_id), commit=commit)
+        self.notifier.notify(NTFY_PEERS, NTFY_UPDATE, permid)
+
+    def updatePeerSims(self, sim_list, commit=True):
+        sql_update_sims = 'UPDATE Peer SET similarity=? WHERE peer_id=?'
+        s = time()
+        self._db.executemany(sql_update_sims, sim_list, commit=commit)
+
+    def getPermIDByIP(self,ip):
+        permid = self.getOne('permid', ip=ip)
+        if permid is not None:
+            return str2bin(permid)
+        else:
+            return None
+        
+    def getPermid(self, peer_id):
+        permid = self.getOne('permid', peer_id=peer_id)
+        if permid is not None:
+            return str2bin(permid)
+        else:
+            return None
+        
+    def getNumberPeers(self, category_name = 'all'):
+        # 28/07/08 boudewijn: counting the union from two seperate
+        # select statements is faster than using a single select
+        # statement with an OR in the WHERE clause. Note that UNION
+        # returns a distinct list of peer_id's.
+        if category_name == 'friend':
+            sql = 'SELECT COUNT(peer_id) FROM Peer WHERE last_connected > 0 AND friend = 1'
+        else:
+            sql = 'SELECT COUNT(peer_id) FROM (SELECT peer_id FROM Peer WHERE last_connected > 0 UNION SELECT peer_id FROM Peer WHERE friend = 1)'
+        res = self._db.fetchone(sql)
+        if not res:
+            res = 0
+        return res
+    
+    def getGUIPeers(self, category_name = 'all', range = None, sort = None, reverse = False, get_online=False, get_ranks=True):
+        #
+        # ARNO: WHY DIFF WITH NORMAL getPeers??????
+        # load peers for GUI
+        #print >> sys.stderr, 'getGUIPeers(%s, %s, %s, %s)' % (category_name, range, sort, reverse)
+        """
+        db keys: peer_id, permid, name, ip, port, thumbnail, oversion, 
+                 similarity, friend, superpeer, last_seen, last_connected, 
+                 last_buddycast, connected_times, buddycast_times, num_peers, 
+                 num_torrents, num_prefs, num_queries, is_local,
+                 
+        @in: get_online: boolean: if true, give peers a key 'online' if there is a connection now
+        """
+        value_name = PeerDBHandler.gui_value_name
+        
+        where = '(last_connected>0 or friend=1 or friend=2 or friend=3) '
+        if category_name in ('friend', 'friends'):
+            # Show mutual, I invited and he invited 
+            where += 'and (friend=1 or friend=2 or friend=3) '
+        if range:
+            offset= range[0]
+            limit = range[1] - range[0]
+        else:
+            limit = offset = None
+        if sort:
+            # Arno, 2008-10-6: buggy: not reverse???
+            desc = (reverse) and 'desc' or ''
+            if sort in ('name'):
+                order_by = ' lower(%s) %s' % (sort, desc)
+            else:
+                order_by = ' %s %s' % (sort, desc)
+        else:
+            order_by = None
+
+        # Must come before query
+        if get_ranks:
+            ranks = self.getRanks()
+        # Arno, 2008-10-23: Someone disabled ranking of people, why?
+            
+        res_list = self.getAll(value_name, where, offset= offset, limit=limit, order_by=order_by)
+        
+        #print >>sys.stderr,"getGUIPeers: where",where,"offset",offset,"limit",limit,"order",order_by
+        #print >>sys.stderr,"getGUIPeers: returned len",len(res_list)
+        
+        peer_list = []
+        for item in res_list:
+            peer = dict(zip(value_name, item))
+            peer['name'] = dunno2unicode(peer['name'])
+            peer['simRank'] = ranksfind(ranks,peer['permid'])
+            peer['permid'] = str2bin(peer['permid'])
+            peer_list.append(peer)
+            
+        if get_online:
+            # Arno, 2010-01-28: Disabled this. Maybe something wrong with setOnline
+            # observer.
+            #self.checkOnline(peer_list)
+            raise ValueError("getGUIPeers get_online parameter currently disabled")
+            
+            
+        # peer_list consumes about 1.5M for 1400 peers, and this function costs about 0.015 second
+        
+        return  peer_list
+
+            
+    def getRanks(self):
+        value_name = 'permid'
+        order_by = 'similarity desc'
+        rankList_size = 20
+        where = '(last_connected>0 or friend=1) '
+        res_list = self._db.getAll('Peer', value_name, where=where, limit=rankList_size, order_by=order_by)
+        return [a[0] for a in res_list]
+        
+    def checkOnline(self, peerlist):
+        # Add 'online' key in peers when their permid
+        # Called by any thread, accesses single online_peers-dict
+        # Peers will never be sorted by 'online' because it is not in the db.
+        # Do not sort here, because then it would be sorted with a partial select (1 page in the grid)
+        self.lock.acquire()
+        for peer in peerlist:
+            peer['online'] = (peer['permid'] in self.online_peers)
+        self.lock.release()
+        
+        
+
+    def setOnline(self,subject,changeType,permid,*args):
+        """Called by callback threads
+        with NTFY_CONNECTION, args[0] is boolean: connection opened/closed
+        """
+        self.lock.acquire()
+        if args[0]: # connection made
+            self.online_peers.add(permid)
+        else: # connection closed
+            self.online_peers.remove(permid)
+        self.lock.release()
+        #print >> sys.stderr, (('#'*50)+'\n')*5+'%d peers online' % len(self.online_peers)
+
+    def registerConnectionUpdater(self, session):
+        # Arno, 2010-01-28: Disabled this. Maybe something wrong with setOnline
+        # observer. ThreadPool may somehow not be executing the calls to setOnline
+        # session.add_observer(self.setOnline, NTFY_PEERS, [NTFY_CONNECTION], None)
+        pass
+    
+    def updatePeerIcon(self, permid, icontype, icondata, commit = True):
+        # save thumb in db
+        self.updatePeer(permid, thumbnail=bin2str(icondata))
+        #if self.mm is not None:
+        #    self.mm.save_data(permid, icontype, icondata)
+    
+
+    def getPeerIcon(self, permid):
+        item = self.getOne('thumbnail', permid=bin2str(permid))
+        if item:
+            return NETW_MIME_TYPE, str2bin(item)
+        else:
+            return None, None
+        #if self.mm is not None:
+        #    return self.mm.load_data(permid)
+        #3else:
+        #    return None
+
+
+    def searchNames(self,kws):
+        return doPeerSearchNames(self,'Peer',kws)
+
+
+
+class SuperPeerDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if SuperPeerDBHandler.__single is None:
+            SuperPeerDBHandler.lock.acquire()   
+            try:
+                if SuperPeerDBHandler.__single is None:
+                    SuperPeerDBHandler(*args, **kw)
+            finally:
+                SuperPeerDBHandler.lock.release()
+        return SuperPeerDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if SuperPeerDBHandler.__single is not None:
+            raise RuntimeError, "SuperPeerDBHandler is singleton"
+        SuperPeerDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self, db, 'SuperPeer')
+        self.peer_db_handler = PeerDBHandler.getInstance()
+        
+    def loadSuperPeers(self, config, refresh=False):
+        filename = os.path.join(config['install_dir'], config['superpeer_file'])
+        superpeer_list = self.readSuperPeerList(filename)
+        self.insertSuperPeers(superpeer_list, refresh)
+
+    def readSuperPeerList(self, filename=u''):
+        """ read (superpeer_ip, superpeer_port, permid [, name]) lines from a text file """
+        
+        try:
+            filepath = os.path.abspath(filename)
+            file = open(filepath, "r")
+        except IOError:
+            print >> sys.stderr, "superpeer: cannot open superpeer file", filepath
+            return []
+            
+        superpeers = file.readlines()
+        file.close()
+        superpeers_info = []
+        for superpeer in superpeers:
+            if superpeer.strip().startswith("#"):    # skip commended lines
+                continue
+            superpeer_line = superpeer.split(',')
+            superpeer_info = [a.strip() for a in superpeer_line]
+            try:
+                superpeer_info[2] = base64.decodestring(superpeer_info[2]+'\n' )
+            except:
+                print_exc()
+                continue
+            try:
+                ip = socket.gethostbyname(superpeer_info[0])
+                superpeer = {'ip':ip, 'port':superpeer_info[1], 
+                          'permid':superpeer_info[2], 'superpeer':1}
+                if len(superpeer_info) > 3:
+                    superpeer['name'] = superpeer_info[3]
+                superpeers_info.append(superpeer)
+            except:
+                print_exc()
+                
+        return superpeers_info
+
+    def insertSuperPeers(self, superpeer_list, refresh=False):
+        for superpeer in superpeer_list:
+            superpeer = deepcopy(superpeer)
+            if not isinstance(superpeer, dict) or 'permid' not in superpeer:
+                continue
+            permid = superpeer.pop('permid')
+            self.peer_db_handler.addPeer(permid, superpeer, commit=False)
+        self.peer_db_handler.commit()
+    
+    def getSuperPeers(self):
+        # return list with permids of superpeers
+        res_list = self._db.getAll(self.table_name, 'permid')
+        return [str2bin(a[0]) for a in res_list]
+        
+    def addExternalSuperPeer(self, peer):
+        _peer = deepcopy(peer)
+        permid = _peer.pop('permid')
+        _peer['superpeer'] = 1
+        self._db.insertPeer(permid, **_peer)
+
+
+class CrawlerDBHandler:
+    """
+    The CrawlerDBHandler is not an actual handle to a
+    database. Instead it uses a local file (usually crawler.txt) to
+    identify crawler processes.
+    """
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if CrawlerDBHandler.__single is None:
+            CrawlerDBHandler.lock.acquire()   
+            try:
+                if CrawlerDBHandler.__single is None:
+                    CrawlerDBHandler(*args, **kw)
+            finally:
+                CrawlerDBHandler.lock.release()
+        return CrawlerDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if CrawlerDBHandler.__single is not None:
+            raise RuntimeError, "CrawlerDBHandler is singleton"
+        CrawlerDBHandler.__single = self
+        self._crawler_list = []
+        
+    def loadCrawlers(self, config, refresh=False):
+        filename = os.path.join(config['crawler_file'])
+        self._crawler_list = self.readCrawlerList(filename)
+
+    def readCrawlerList(self, filename=''):
+        """
+        read (permid [, name]) lines from a text file
+        returns a list containing permids
+        """
+        
+        try:
+            filepath = os.path.abspath(filename)
+            file = open(filepath, "r")
+        except IOError:
+            print >> sys.stderr, "crawler: cannot open crawler file", filepath
+            return []
+            
+        crawlers = file.readlines()
+        file.close()
+        crawlers_info = []
+        for crawler in crawlers:
+            if crawler.strip().startswith("#"):    # skip commended lines
+                continue
+            crawler_info = [a.strip() for a in crawler.split(",")]
+            try:
+                crawler_info[0] = base64.decodestring(crawler_info[0]+'\n')
+            except:
+                print_exc()
+                continue
+            crawlers_info.append(str2bin(crawler))
+                    
+        return crawlers_info
+
+    def temporarilyAddCrawler(self, permid):
+        """
+        Because of security reasons we will not allow crawlers to be
+        added to the crawler.txt list. This temporarilyAddCrawler
+        method can be used to add one for the running session. Usefull
+        for debugging and testing.
+        """
+        if not permid in self._crawler_list:
+            self._crawler_list.append(permid)
+
+    def getCrawlers(self):
+        """
+        returns a list with permids of crawlers
+        """
+        return self._crawler_list
+
+
+        
+class PreferenceDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if PreferenceDBHandler.__single is None:
+            PreferenceDBHandler.lock.acquire()   
+            try:
+                if PreferenceDBHandler.__single is None:
+                    PreferenceDBHandler(*args, **kw)
+            finally:
+                PreferenceDBHandler.lock.release()
+        return PreferenceDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if PreferenceDBHandler.__single is not None:
+            raise RuntimeError, "PreferenceDBHandler is singleton"
+        PreferenceDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self,db, 'Preference') ## self,db,'Preference'
+        
+        self.popularity_db = PopularityDBHandler.getInstance()
+        
+            
+    def _getTorrentOwnersID(self, torrent_id):
+        sql_get_torrent_owners_id = u"SELECT peer_id FROM Preference WHERE torrent_id==?"
+        res = self._db.fetchall(sql_get_torrent_owners_id, (torrent_id,))
+        return [t[0] for t in res]
+    
+    def getPrefList(self, permid, return_infohash=False):
+        # get a peer's preference list of infohash or torrent_id according to return_infohash
+        peer_id = self._db.getPeerID(permid)
+        if peer_id is None:
+            return []
+        
+        if not return_infohash:
+            sql_get_peer_prefs_id = u"SELECT torrent_id FROM Preference WHERE peer_id==?"
+            res = self._db.fetchall(sql_get_peer_prefs_id, (peer_id,))
+            return [t[0] for t in res]
+        else:
+            sql_get_infohash = u"SELECT infohash FROM Torrent WHERE torrent_id IN (SELECT torrent_id FROM Preference WHERE peer_id==?)"
+            res = self._db.fetchall(sql_get_infohash, (peer_id,))
+            return [str2bin(t[0]) for t in res]
+    
+    def _deletePeer(self, permid=None, peer_id=None, commit=True):   # delete a peer from pref_db
+        # should only be called by PeerDBHandler
+        if peer_id is None:
+            peer_id = self._db.getPeerID(permid)
+            if peer_id is None:
+                return
+        
+        self._db.delete(self.table_name, commit=commit, peer_id=peer_id)
+
+    def addPreference(self, permid, infohash, data={}, commit=True):           
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        # This function should be replaced by addPeerPreferences 
+        # peer_permid and prefs are binaries, the peer must have been inserted in Peer table
+        # Nicolas: did not change this function as it seems addPreference*s* is getting called
+        peer_id = self._db.getPeerID(permid)
+        if peer_id is None:
+            print >> sys.stderr, 'PreferenceDBHandler: add preference of a peer which is not existed in Peer table', `permid`
+            return
+        
+        sql_insert_peer_torrent = u"INSERT INTO Preference (peer_id, torrent_id) VALUES (?,?)"        
+        torrent_id = self._db.getTorrentID(infohash)
+        if not torrent_id:
+            self._db.insertInfohash(infohash)
+            torrent_id = self._db.getTorrentID(infohash)
+        try:
+            self._db.execute_write(sql_insert_peer_torrent, (peer_id, torrent_id), commit=commit)
+        except Exception, msg:    # duplicated
+            print_exc()
+            
+            
+
+    def addPreferences(self, peer_permid, prefs, recvTime=0.0, is_torrent_id=False, commit=True):
+        # peer_permid and prefs are binaries, the peer must have been inserted in Peer table
+        # boudewijn: for buddycast version >= OLPROTO_VER_EIGTH the
+        # prefs list may contain both strings (indicating an infohash)
+        # or dictionaries (indicating an infohash with metadata)
+        peer_id = self._db.getPeerID(peer_permid)
+        if peer_id is None:
+            print >> sys.stderr, 'PreferenceDBHandler: add preference of a peer which is not existed in Peer table', `peer_permid`
+            return
+
+        prefs = [type(pref) is str and {"infohash":pref} or pref
+                 for pref
+                 in prefs]
+
+        if __debug__:
+            for pref in prefs:
+                assert isinstance(pref["infohash"], str), "INFOHASH has invalid type: %s" % type(pref["infohash"])
+                assert len(pref["infohash"]) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(pref["infohash"])
+
+        torrent_id_swarm_size =[]
+        torrent_id_prefs =[]
+        if is_torrent_id:
+            for pref in prefs:
+                torrent_id_prefs.append((peer_id, 
+                                 pref['torrent_id'], 
+                                 pref.get('position', -1), 
+                                 pref.get('reranking_strategy', -1)) 
+                                )
+                #Rahim : Since overlay version 11 swarm size information is 
+                # appended and should be added to the database . The code below 
+                # does this. torrent_id, recv_time, calc_age, num_seeders, 
+                # num_leechers, num_sources
+                #
+                #torrent_id_swarm_size =[]
+                if pref.get('calc_age') is not None:
+                    tempAge= pref.get('calc_age')
+                    tempSeeders = pref.get('num_seeders')
+                    tempLeechers = pref.get('num_leechers') 
+                    if tempAge > 0 and tempSeeders >= 0 and tempLeechers >= 0:
+                        torrent_id_swarm_size.append([pref['torrent_id'],
+                                         recvTime, 
+                                         tempAge,  
+                                         tempSeeders, 
+                                         tempLeechers,
+                                         pref.get('num_sources_seen', -1)])# -1 means invalud value 
+        else:
+            # Nicolas: do not know why this would be called, but let's handle 
+            # it smoothly
+            torrent_id_prefs = []
+            #Rahim: I also don't know when this part is run, I just follow the 
+            # way that Nicolas has done.
+            #torrent_id_swarm_size = []
+            for pref in prefs:
+                infohash = pref["infohash"]
+                torrent_id = self._db.getTorrentID(infohash)
+                if not torrent_id:
+                    self._db.insertInfohash(infohash)
+                    torrent_id = self._db.getTorrentID(infohash)
+                torrent_id_prefs.append((peer_id, torrent_id, -1, -1))
+                #Rahim: Amended for handling and adding swarm size info.
+                #torrent_id_swarm_size.append((torrent_id, recvTime,0, -1, -1, -1))
+                
+            
+        sql_insert_peer_torrent = u"INSERT INTO Preference (peer_id, torrent_id, click_position, reranking_strategy) VALUES (?,?,?,?)"        
+        if len(prefs) > 0:
+            try:
+                self._db.executemany(sql_insert_peer_torrent, torrent_id_prefs, commit=commit)
+                popularity_db = PopularityDBHandler.getInstance()
+                if len(torrent_id_swarm_size) > 0:
+                    popularity_db.storePeerPopularity(peer_id, torrent_id_swarm_size, commit=commit)
+            except Exception, msg:    # duplicated
+                print_exc()
+                print >> sys.stderr, 'dbhandler: addPreferences:', Exception, msg
+                
+        # now, store search terms
+        
+        # Nicolas: if maximum number of search terms is exceeded, abort storing them.
+        # Although this may seem a bit strict, this means that something different than a genuine Tribler client
+        # is on the other side, so we might rather err on the side of caution here and simply let clicklog go.
+        nums_of_search_terms = [len(pref.get('search_terms',[])) for pref in prefs]
+        if max(nums_of_search_terms)>MAX_KEYWORDS_STORED:
+            if DEBUG:
+                print >>sys.stderr, "peer %d exceeds max number %d of keywords per torrent, aborting storing keywords"  % \
+                                    (peer_id, MAX_KEYWORDS_STORED)
+            return  
+        
+        all_terms_unclean = Set([])
+        for pref in prefs:
+            newterms = Set(pref.get('search_terms',[]))
+            all_terms_unclean = all_terms_unclean.union(newterms)        
+            
+        all_terms = [] 
+        for term in all_terms_unclean:
+            cleanterm = ''
+            for i in range(0,len(term)):
+                c = term[i]
+                if c.isalnum():
+                    cleanterm += c
+            if len(cleanterm)>0:
+                all_terms.append(cleanterm)
+        # maybe we haven't received a single key word, no need to loop again over prefs then
+        if len(all_terms)==0:
+            return
+           
+        termdb = TermDBHandler.getInstance()
+        searchdb = SearchDBHandler.getInstance()
+                
+        # insert all unknown terms NOW so we can rebuild the index at once
+        termdb.bulkInsertTerms(all_terms)         
+        
+        # get local term ids for terms.
+        foreign2local = dict([(str(foreign_term), termdb.getTermID(foreign_term))
+                              for foreign_term
+                              in all_terms])        
+        
+        # process torrent data
+        for pref in prefs:
+            torrent_id = pref.get('torrent_id', None)
+            search_terms = pref.get('search_terms', [])
+            
+            if search_terms==[]:
+                continue
+            if not torrent_id:
+                if DEBUG:
+                    print >> sys.stderr, "torrent_id not set, retrieving manually!"
+                torrent_id = TorrentDBHandler.getInstance().getTorrentID(infohash)
+                
+            term_ids = [foreign2local[str(foreign)] for foreign in search_terms if str(foreign) in foreign2local]
+            searchdb.storeKeywordsByID(peer_id, torrent_id, term_ids, commit=False)
+        if commit:
+            searchdb.commit()
+    
+    def getAllEntries(self):
+        """use with caution,- for testing purposes"""
+        return self.getAll("rowid, peer_id, torrent_id, click_position,reranking_strategy", order_by="peer_id, torrent_id")
+
+
+    def getRecentPeersPrefs(self, key, num=None):
+        # get the recently seen peers' preference. used by buddycast
+        sql = "select peer_id,torrent_id from Preference where peer_id in (select peer_id from Peer order by %s desc)"%key
+        if num is not None:
+            sql = sql[:-1] + " limit %d)"%num
+        res = self._db.fetchall(sql)
+        return res
+    
+    def getPositionScore(self, torrent_id, keywords):
+        """returns a tuple (num, positionScore) stating how many times the torrent id was found in preferences,
+           and the average position score, where each click at position i receives 1-(1/i) points"""
+           
+        if not keywords:
+            return (0,0)
+           
+        term_db = TermDBHandler.getInstance()
+        term_ids = [term_db.getTermID(keyword) for keyword in keywords]
+        s_term_ids = str(term_ids).replace("[","(").replace("]",")").replace("L","")
+        
+        # we're not really interested in the peer_id here,
+        # just make sure we don't count twice if we hit more than one keyword in a search
+        # ... one might treat keywords a bit more strictly here anyway (AND instead of OR)
+        sql = """
+SELECT DISTINCT Preference.peer_id, Preference.click_position 
+FROM Preference 
+INNER JOIN ClicklogSearch 
+ON 
+    Preference.torrent_id = ClicklogSearch.torrent_id 
+  AND 
+    Preference.peer_id = ClicklogSearch.peer_id 
+WHERE 
+    ClicklogSearch.term_id IN %s 
+  AND
+    ClicklogSearch.torrent_id = %s""" % (s_term_ids, torrent_id)
+        res = self._db.fetchall(sql)
+        scores = [1.0-1.0/float(click_position+1) 
+                  for (peer_id, click_position) 
+                  in res 
+                  if click_position>-1]
+        if len(scores)==0:
+            return (0,0)
+        score = float(sum(scores))/len(scores)
+        return (len(scores), score)
+
+        
+class TorrentDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if TorrentDBHandler.__single is None:
+            TorrentDBHandler.lock.acquire()   
+            try:
+                if TorrentDBHandler.__single is None:
+                    TorrentDBHandler(*args, **kw)
+            finally:
+                TorrentDBHandler.lock.release()
+        return TorrentDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+
+    def __init__(self):
+        if TorrentDBHandler.__single is not None:
+            raise RuntimeError, "TorrentDBHandler is singleton"
+        TorrentDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self,db, 'Torrent') ## self,db,torrent
+        
+        self.mypref_db = MyPreferenceDBHandler.getInstance()
+        
+        self.status_table = {'good':1, 'unknown':0, 'dead':2}
+        self.status_table.update(self._db.getTorrentStatusTable())
+        self.id2status = dict([(x,y) for (y,x) in self.status_table.items()]) 
+        self.torrent_dir = None
+        # 0 - unknown
+        # 1 - good
+        # 2 - dead
+        
+        self.category_table  = {'Video':1,
+                                'VideoClips':2,
+                                'Audio':3,
+                                'Compressed':4,
+                                'Document':5,
+                                'Picture':6,
+                                'xxx':7,
+                                'other':8,}
+        self.category_table.update(self._db.getTorrentCategoryTable())
+        self.category_table['unknown'] = 0 
+        self.id2category = dict([(x,y) for (y,x) in self.category_table.items()])
+        # 1 - Video
+        # 2 - VideoClips
+        # 3 - Audio
+        # 4 - Compressed
+        # 5 - Document
+        # 6 - Picture
+        # 7 - xxx
+        # 8 - other
+        
+        self.src_table = self._db.getTorrentSourceTable()
+        self.id2src = dict([(x,y) for (y,x) in self.src_table.items()])
+        # 0 - ''    # local added
+        # 1 - BC
+        # 2,3,4... - URL of RSS feed
+        self.keys = ['torrent_id', 'name', 'torrent_file_name',
+                'length', 'creation_date', 'num_files', 'thumbnail',
+                'insert_time', 'secret', 'relevance',
+                'source_id', 'category_id', 'status_id',
+                'num_seeders', 'num_leechers', 'comment']
+        self.existed_torrents = Set()
+
+
+        self.value_name = ['C.torrent_id', 'category_id', 'status_id', 'name', 'creation_date', 'num_files',
+                      'num_leechers', 'num_seeders', 'length', 
+                      'secret', 'insert_time', 'source_id', 'torrent_file_name',
+                      'relevance', 'infohash', 'tracker', 'last_check']
+
+        self.value_name_for_channel = ['C.torrent_id', 'infohash', 'name', 'torrent_file_name', 'length', 'creation_date', 'num_files', 'thumbnail', 'insert_time', 'secret', 'relevance', 'source_id', 'category_id', 'status_id', 'num_seeders', 'num_leechers', 'comment'] 
+        
+
+    def register(self, category, torrent_dir):
+        self.category = category
+        self.torrent_dir = torrent_dir
+        
+    def getTorrentID(self, infohash):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        return self._db.getTorrentID(infohash)
+    
+    def getInfohash(self, torrent_id):
+        return self._db.getInfohash(torrent_id)
+
+    def hasTorrent(self, infohash):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if infohash in self.existed_torrents:    #to do: not thread safe
+            return True
+        infohash_str = bin2str(infohash)
+        existed = self._db.getOne('CollectedTorrent', 'torrent_id', infohash=infohash_str)
+        if existed is None:
+            return False
+        else:
+            self.existed_torrents.add(infohash)
+            return True
+    
+    def addExternalTorrent(self, torrentdef, source="BC", extra_info={}, commit=True):
+        assert isinstance(torrentdef, TorrentDef), "TORRENTDEF has invalid type: %s" % type(torrentdef)
+        assert torrentdef.is_finalized(), "TORRENTDEF is not finalized"
+        if torrentdef.is_finalized():
+            infohash = torrentdef.get_infohash()
+            if not self.hasTorrent(infohash):
+                self._addTorrentToDB(torrentdef, source, extra_info, commit)
+                self.notifier.notify(NTFY_TORRENTS, NTFY_INSERT, infohash)
+        
+    def addInfohash(self, infohash, commit=True):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if self._db.getTorrentID(infohash) is None:
+            self._db.insert('Torrent', commit=commit, infohash=bin2str(infohash))
+
+    def _getStatusID(self, status):
+        return self.status_table.get(status.lower(), 0)
+
+    def _getCategoryID(self, category_list):
+        if len(category_list) > 0:
+            category = category_list[0].lower()
+            cat_int = self.category_table[category]
+        else:
+            cat_int = 0
+        return cat_int
+
+    def _getSourceID(self, src):
+        if src in self.src_table:
+            src_int = self.src_table[src]
+        else:
+            src_int = self._insertNewSrc(src)    # add a new src, e.g., a RSS feed
+            self.src_table[src] = src_int
+            self.id2src[src_int] = src
+        return src_int
+
+    def _get_database_dict(self, torrentdef, source="BC", extra_info={}):
+        assert isinstance(torrentdef, TorrentDef), "TORRENTDEF has invalid type: %s" % type(torrentdef)
+        assert torrentdef.is_finalized(), "TORRENTDEF is not finalized"
+        mime, thumb = torrentdef.get_thumbnail()
+
+        return {"infohash":bin2str(torrentdef.get_infohash()),
+                "name":torrentdef.get_name_as_unicode(),
+                "torrent_file_name":extra_info.get("filename", None),
+                "length":torrentdef.get_length(),
+                "creation_date":torrentdef.get_creation_date(),
+                "num_files":len(torrentdef.get_files()),
+                "thumbnail":bool(thumb),
+                "insert_time":long(time()),
+                "secret":0, # todo: check if torrent is secret
+                "relevance":0.0,
+                "source_id":self._getSourceID(source),
+                # todo: the category_id is calculated directly from
+                # torrentdef.metainfo, the category checker should use
+                # the proper torrentdef api
+                "category_id":self._getCategoryID(self.category.calculateCategory(torrentdef.metainfo, torrentdef.get_name_as_unicode())),
+                "status_id":self._getStatusID(extra_info.get("status", "unknown")),
+                "num_seeders":extra_info.get("seeder", -1),
+                "num_leechers":extra_info.get("leecher", -1),
+                "comment":torrentdef.get_comment_as_unicode()}
+    
+    def _addTorrentToDB(self, torrentdef, source, extra_info, commit):
+        assert isinstance(torrentdef, TorrentDef), "TORRENTDEF has invalid type: %s" % type(torrentdef)
+        assert torrentdef.is_finalized(), "TORRENTDEF is not finalized"
+
+        # ARNO: protect against injection attacks
+        # 27/01/10 Boudewijn: all inserts are done using '?' in the
+        # sql query.  The sqlite database will ensure that the values
+        # are properly escaped.
+
+        infohash = torrentdef.get_infohash()
+        torrent_name = torrentdef.get_name_as_unicode()
+        database_dict = self._get_database_dict(torrentdef, source, extra_info)
+
+        # see if there is already a torrent in the database with this
+        # infohash
+        torrent_id = self._db.getTorrentID(infohash)
+
+        if torrent_id is None:  # not in database
+            self._db.insert("Torrent", commit=True, **database_dict)
+            torrent_id = self._db.getTorrentID(infohash)
+
+        else:    # infohash in db
+            where = 'torrent_id = %d' % torrent_id
+            self._db.update('Torrent', where=where, commit=False, **database_dict)
+
+        # boudewijn: we are using a Set to ensure that all keywords
+        # are unique.  no use having the database layer figuring this
+        # out when we can do it now, in memory
+        keywords = Set(split_into_keywords(torrent_name))
+
+        # search through the .torrent file for potential keywords in
+        # the filenames
+        for filename in torrentdef.get_files_as_unicode():
+            keywords.update(split_into_keywords(filename))
+
+        # store the keywords in the InvertedIndex table in the database
+        if len(keywords) > 0:
+            values = [(keyword, torrent_id) for keyword in keywords]
+            self._db.executemany(u"INSERT OR REPLACE INTO InvertedIndex VALUES(?, ?)", values, commit=False)
+            if DEBUG:
+                print >> sys.stderr, "torrentdb: Extending the InvertedIndex table with", len(values), "new keywords for", torrent_name
+        
+        self._addTorrentTracker(torrent_id, torrentdef, extra_info, commit=False)
+        if commit:
+            self.commit()    
+        return torrent_id
+
+    def getInfohashFromTorrentName(self, name): ##
+        sql = "select infohash from Torrent where name='" + str2bin(name) + "'"
+        infohash = self._db.fetchone(sql)
+        return infohash
+
+    def _insertNewSrc(self, src, commit=True):
+        desc = ''
+        if src.startswith('http') and src.endswith('xml'):
+            desc = 'RSS'
+        self._db.insert('TorrentSource', commit=commit, name=src, description=desc)
+        src_id = self._db.getOne('TorrentSource', 'source_id', name=src)
+        return src_id
+
+    def _addTorrentTracker(self, torrent_id, torrentdef, extra_info={}, add_all=False, commit=True):
+        # Set add_all to True if you want to put all multi-trackers into db.
+        # In the current version (4.2) only the main tracker is used.
+        exist = self._db.getOne('TorrentTracker', 'tracker', torrent_id=torrent_id)
+        if exist:
+            return
+        
+        # announce = data['announce']
+        # ignore_number = data['ignore_number']
+        # retry_number = data['retry_number']
+        # last_check_time = data['last_check_time']
+        # announce_list = data['announce-list']
+
+        announce = torrentdef.get_tracker()
+        announce_list = torrentdef.get_tracker_hierarchy()
+        ignore_number = 0
+        retry_number = 0
+        last_check_time = 0
+        if "last_check_time" in extra_info:
+            last_check_time = int(time() - extra_info["last_check_time"])
+        
+        sql_insert_torrent_tracker = """
+        INSERT INTO TorrentTracker
+        (torrent_id, tracker, announce_tier, 
+        ignored_times, retried_times, last_check)
+        VALUES (?,?,?, ?,?,?)
+        """
+        
+        values = [(torrent_id, announce, 1, ignore_number, retry_number, last_check_time)]
+        # each torrent only has one announce with tier number 1
+        tier_num = 2
+        trackers = {announce:None}
+        if add_all:
+            for tier in announce_list:
+                for tracker in tier:
+                    if tracker in trackers:
+                        continue
+                    value = (torrent_id, tracker, tier_num, 0, 0, 0)
+                    values.append(value)
+                    trackers[tracker] = None
+                tier_num += 1
+            
+        self._db.executemany(sql_insert_torrent_tracker, values, commit=commit)
+        
+    def updateTorrent(self, infohash, commit=True, **kw):    # watch the schema of database
+        if 'category' in kw:
+            cat_id = self._getCategoryID(kw.pop('category'))
+            kw['category_id'] = cat_id
+        if 'status' in kw:
+            status_id = self._getStatusID(kw.pop('status'))
+            kw['status_id'] = status_id
+        if 'progress' in kw:
+            self.mypref_db.updateProgress(infohash, kw.pop('progress'), commit=False)# commit at end of function
+        if 'seeder' in kw:
+            kw['num_seeders'] = kw.pop('seeder')
+        if 'leecher' in kw:
+            kw['num_leechers'] = kw.pop('leecher')
+        if 'last_check_time' in kw or 'ignore_number' in kw or 'retry_number' in kw \
+          or 'retried_times' in kw or 'ignored_times' in kw:
+            self.updateTracker(infohash, kw, commit=False)
+        
+        for key in kw.keys():
+            if key not in self.keys:
+                kw.pop(key)
+                
+        if len(kw) > 0:
+            infohash_str = bin2str(infohash)
+            where = "infohash='%s'"%infohash_str
+            self._db.update(self.table_name, where, commit=False, **kw)
+            
+        if commit:
+            self.commit()
+            # to.do: update the torrent panel's number of seeders/leechers 
+        self.notifier.notify(NTFY_TORRENTS, NTFY_UPDATE, infohash)
+        
+    def updateTracker(self, infohash, kw, tier=1, tracker=None, commit=True):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None:
+            return
+        update = {}
+        assert type(kw) == dict and kw, 'updateTracker error: kw should be filled dict, but is: %s' % kw
+        if 'last_check_time' in kw:
+            update['last_check'] = kw.pop('last_check_time')
+        if 'ignore_number' in kw:
+            update['ignored_times'] = kw.pop('ignore_number')
+        if 'ignored_times' in kw:
+            update['ignored_times'] = kw.pop('ignored_times')
+        if 'retry_number' in kw:
+            update['retried_times'] = kw.pop('retry_number')
+        if 'retried_times' in kw:
+            update['retried_times'] = kw.pop('retried_times')
+            
+        if tracker is None:
+            where = 'torrent_id=%d AND announce_tier=%d'%(torrent_id, tier)
+        else:
+            where = 'torrent_id=%d AND tracker=%s'%(torrent_id, repr(tracker))
+        self._db.update('TorrentTracker', where, commit=commit, **update)
+
+    def deleteTorrent(self, infohash, delete_file=False, commit = True):
+        if not self.hasTorrent(infohash):
+            return False
+        
+        if self.mypref_db.hasMyPreference(infohash):  # don't remove torrents in my pref
+            return False
+
+        if delete_file:
+            deleted = self.eraseTorrentFile(infohash)
+        else:
+            deleted = True
+        
+        if deleted:
+            self._deleteTorrent(infohash, commit=commit)
+            
+        self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, infohash)
+        return deleted
+
+    def _deleteTorrent(self, infohash, keep_infohash=True, commit=True):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is not None:
+            if keep_infohash:
+                self._db.update(self.table_name, where="torrent_id=%d"%torrent_id, commit=commit, torrent_file_name=None)
+            else:
+                self._db.delete(self.table_name, commit=commit, torrent_id=torrent_id)
+            if infohash in self.existed_torrents:
+                self.existed_torrents.remove(infohash)
+            self._db.delete('TorrentTracker', commit=commit, torrent_id=torrent_id)
+            #print '******* delete torrent', torrent_id, `infohash`, self.hasTorrent(infohash)
+            
+    def eraseTorrentFile(self, infohash):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is not None:
+            torrent_dir = self.getTorrentDir()
+            torrent_name = self.getOne('torrent_file_name', torrent_id=torrent_id)
+            src = os.path.join(torrent_dir, torrent_name)
+            if not os.path.exists(src):    # already removed
+                return True
+            
+            try:
+                os.remove(src)
+            except Exception, msg:
+                print >> sys.stderr, "cachedbhandler: failed to erase torrent", src, Exception, msg
+                return False
+        
+        return True
+            
+    def getTracker(self, infohash, tier=0):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is not None:
+            sql = "SELECT tracker, announce_tier FROM TorrentTracker WHERE torrent_id==%d"%torrent_id
+            if tier > 0:
+                sql += " AND announce_tier<=%d"%tier
+            return self._db.fetchall(sql)
+        
+    def getSwarmInfo(self, torrent_id):
+        """
+        returns info about swarm size from Torrent and TorrentTracker tables.
+        @author: Rahim
+        @param torrentId: The index of the torrent.
+        @return: A tuple of the form:(torrent_id, num_seeders, num_leechers, num_sources_seen, last_check)
+        """
+        if torrent_id is not None:
+            sql = """SELECT  tr.torrent_id, tr.num_seeders, tr.num_leechers, tt.last_check 
+            FROM TorrentTracker tt, Torrent tr  WHERE tr.torrent_id=tt.torrent_id AND tr.torrent_id==%d"""%torrent_id
+            sql +=" order by tt.last_check DESC limit 1"
+            sizeInfo = self._db.fetchall(sql)
+
+            if len(sizeInfo) == 1:
+                num_seeders  = sizeInfo[0][1]
+                num_leechers = sizeInfo[0][2]
+                last_check = sizeInfo[0][3]
+            
+                sql1= """SELECT COUNT(*) FROM Preference WHERE torrent_id=%d"""%torrent_id
+                mySeenSources = self._db.fetchone(sql1)
+            
+                return [(torrent_id, num_seeders, num_leechers, last_check, mySeenSources, sizeInfo)]
+
+        return [()]  
+            
+    
+    def getLargestSourcesSeen(self, torrent_id, timeNow, freshness=-1):
+        """
+        Returns the largest number of the sources that have seen the torrent.
+        @author: Rahim
+        @param torrent_id: the id of the torrent.
+        @param freshness: A parameter that filters old records. The assumption is that those popularity reports that are
+        older than a rate are not reliable
+        @return: The largest number of the torrents that have seen the torrent.
+        """
+        
+        if freshness == -1:
+            sql2 = """SELECT MAX(num_of_sources) FROM Popularity WHERE torrent_id=%d"""%torrent_id
+        else:
+            latestValidTime = timeNow - freshness
+            sql2 = """SELECT MAX(num_of_sources) FROM Popularity WHERE torrent_id=%d AND msg_receive_time > %d"""%(torrent_id, latestValidTime) 
+        
+        othersSeenSources = self._db.fetchone(sql2)
+        if othersSeenSources is None:
+            othersSeenSources =0
+        return othersSeenSources 
+        
+    def getTorrentDir(self):
+        return self.torrent_dir
+    
+    def getTorrent(self, infohash, keys=None, include_mypref=True):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        # to do: replace keys like source -> source_id and status-> status_id ??
+        
+        if keys is None:
+            keys = deepcopy(self.value_name)
+            #('torrent_id', 'category_id', 'status_id', 'name', 'creation_date', 'num_files',
+            # 'num_leechers', 'num_seeders',   'length', 
+            # 'secret', 'insert_time', 'source_id', 'torrent_file_name',
+            # 'relevance', 'infohash', 'torrent_id')
+        else:
+            keys = list(keys)
+        where = 'C.torrent_id = T.torrent_id and announce_tier=1 '
+
+        res = self._db.getOne('CollectedTorrent C, TorrentTracker T', keys, where=where, infohash=bin2str(infohash))
+        if not res:
+            return None
+        torrent = dict(zip(keys, res))
+        if 'source_id' in torrent:
+            torrent['source'] = self.id2src[torrent['source_id']]
+            del torrent['source_id']
+        if 'category_id' in torrent:
+            torrent['category'] = [self.id2category[torrent['category_id']]]
+            del torrent['category_id']
+        if 'status_id' in torrent:
+            torrent['status'] = self.id2status[torrent['status_id']]
+            del torrent['status_id']
+        torrent['infohash'] = infohash
+        if 'last_check' in torrent:
+            torrent['last_check_time'] = torrent['last_check']
+            del torrent['last_check']
+        
+        if include_mypref:
+            tid = torrent['C.torrent_id']
+            stats = self.mypref_db.getMyPrefStats(tid)
+            del torrent['C.torrent_id']
+            if stats:
+                torrent['myDownloadHistory'] = True
+                torrent['creation_time'] = stats[tid][0]
+                torrent['progress'] = stats[tid][1]
+                torrent['destination_path'] = stats[tid][2]
+                
+              
+        return torrent
+
+    def getNumberTorrents(self, category_name = 'all', library = False):
+        table = 'CollectedTorrent'
+        value = 'count(torrent_id)'
+        where = '1 '
+
+        if category_name != 'all':
+            where += ' and category_id= %d' % self.category_table.get(category_name.lower(), -1) # unkown category_name returns no torrents
+        if library:
+            where += ' and torrent_id in (select torrent_id from MyPreference where destination_path != "")'
+        else:
+            where += ' and status_id=%d ' % self.status_table['good']
+            # add familyfilter
+            where += self.category.get_family_filter_sql(self._getCategoryID)
+        
+        number = self._db.getOne(table, value, where)
+        if not number:
+            number = 0
+        return number
+    
+    def getTorrents(self, category_name = 'all', range = None, library = False, sort = None, reverse = False):
+        """
+        get Torrents of some category and with alive status (opt. not in family filter)
+        
+        @return Returns a list of dicts with keys: 
+            torrent_id, infohash, name, category, status, creation_date, num_files, num_leechers, num_seeders,
+            length, secret, insert_time, source, torrent_filename, relevance, simRank, tracker, last_check
+            (if in library: myDownloadHistory, download_started, progress, dest_dir)
+            
+        """
+        
+        #print >> sys.stderr, 'TorrentDBHandler: getTorrents(%s, %s, %s, %s, %s)' % (category_name, range, library, sort, reverse)
+        s = time()
+        
+        value_name = deepcopy(self.value_name)
+            
+        where = 'T.torrent_id = C.torrent_id and announce_tier=1 '
+        
+        if category_name != 'all':
+            where += ' and category_id= %d' % self.category_table.get(category_name.lower(), -1) # unkown category_name returns no torrents
+        if library:
+            if sort in value_name:
+                where += ' and C.torrent_id in (select torrent_id from MyPreference where destination_path != "")'
+            else:
+                value_name[0] = 'C.torrent_id'
+                where += ' and C.torrent_id = M.torrent_id and announce_tier=1'
+        else:
+            where += ' and status_id=%d ' % self.status_table['good'] # if not library, show only good files
+            # add familyfilter
+            where += self.category.get_family_filter_sql(self._getCategoryID)
+        if range:
+            offset= range[0]
+            limit = range[1] - range[0]
+        else:
+            limit = offset = None
+        if sort:
+            # Arno, 2008-10-6: buggy: not reverse???
+            desc = (reverse) and 'desc' or ''
+            if sort in ('name'):
+                order_by = ' lower(%s) %s' % (sort, desc)
+            else:
+                order_by = ' %s %s' % (sort, desc)
+        else:
+            order_by = None
+            
+        #print >>sys.stderr,"TorrentDBHandler: GET TORRENTS val",value_name,"where",where,"limit",limit,"offset",offset,"order",order_by
+        #print_stack
+        
+        # Must come before query
+        ranks = self.getRanks()
+
+        #self._db.show_execute = True
+        if library and sort not in value_name:
+            res_list = self._db.getAll('CollectedTorrent C, MyPreference M, TorrentTracker T', value_name, where, limit=limit, offset=offset, order_by=order_by)
+        else:
+            res_list = self._db.getAll('CollectedTorrent C, TorrentTracker T', value_name, where, limit=limit, offset=offset, order_by=order_by)
+        #self._db.show_execute = False
+        
+        mypref_stats = self.mypref_db.getMyPrefStats()
+        
+        #print >>sys.stderr,"TorrentDBHandler: getTorrents: getAll returned ###################",len(res_list)
+        
+        torrent_list = self.valuelist2torrentlist(value_name,res_list,ranks,mypref_stats)
+        del res_list
+        del mypref_stats
+        return torrent_list
+
+    def valuelist2torrentlist(self,value_name,res_list,ranks,mypref_stats):
+        
+        torrent_list = []
+        for item in res_list:
+            value_name[0] = 'torrent_id'
+            torrent = dict(zip(value_name, item))
+            
+            try:
+                torrent['source'] = self.id2src[torrent['source_id']]
+            except:
+                print_exc()
+                # Arno: RSS subscription and id2src issue
+                torrent['source'] = 'http://some/RSS/feed'
+            
+            torrent['category'] = [self.id2category[torrent['category_id']]]
+            torrent['status'] = self.id2status[torrent['status_id']]
+            torrent['simRank'] = ranksfind(ranks,torrent['infohash'])
+            torrent['infohash'] = str2bin(torrent['infohash'])
+            #torrent['num_swarm'] = torrent['num_seeders'] + torrent['num_leechers']
+            torrent['last_check_time'] = torrent['last_check']
+            del torrent['last_check']
+            del torrent['source_id']
+            del torrent['category_id']
+            del torrent['status_id']
+            torrent_id = torrent['torrent_id']
+            if mypref_stats is not None and torrent_id in mypref_stats:
+                # add extra info for torrent in mypref
+                torrent['myDownloadHistory'] = True
+                data = mypref_stats[torrent_id]  #(create_time,progress,destdir)
+                torrent['download_started'] = data[0]
+                torrent['progress'] = data[1]
+                torrent['destdir'] = data[2]
+            
+            #print >>sys.stderr,"TorrentDBHandler: GET TORRENTS",`torrent`
+                
+            torrent_list.append(torrent)
+        return  torrent_list
+        
+    def getRanks(self):
+        value_name = 'infohash'
+        order_by = 'relevance desc'
+        rankList_size = 20
+        where = 'status_id=%d ' % self.status_table['good']
+        res_list = self._db.getAll('Torrent', value_name, where = where, limit=rankList_size, order_by=order_by)
+        return [a[0] for a in res_list]
+
+    def getNumberCollectedTorrents(self): 
+        #return self._db.size('CollectedTorrent')
+        return self._db.getOne('CollectedTorrent', 'count(torrent_id)')
+
+    def freeSpace(self, torrents2del):
+#        if torrents2del > 100:  # only delete so many torrents each time
+#            torrents2del = 100
+        sql = """
+            select torrent_file_name, torrent_id, infohash, relevance,
+                min(relevance,2500) +  min(500,num_leechers) + 4*min(500,num_seeders) - (max(0,min(500,(%d-creation_date)/86400)) ) as weight
+            from CollectedTorrent
+            where  torrent_id not in (select torrent_id from MyPreference)
+            order by weight  
+            limit %d  
+        """ % (int(time()), torrents2del)
+        res_list = self._db.fetchall(sql)
+        if len(res_list) == 0: 
+            return False
+        
+        # delete torrents from db
+        sql_del_torrent = "delete from Torrent where torrent_id=?"
+        sql_del_tracker = "delete from TorrentTracker where torrent_id=?"
+        sql_del_pref = "delete from Preference where torrent_id=?"
+        tids = [(torrent_id,) for torrent_file_name, torrent_id, infohash, relevance, weight in res_list]
+
+        self._db.executemany(sql_del_torrent, tids, commit=False)
+        self._db.executemany(sql_del_tracker, tids, commit=False)
+        self._db.executemany(sql_del_pref, tids, commit=False)
+        
+        self._db.commit()
+        
+        # but keep the infohash in db to maintain consistence with preference db
+        #torrent_id_infohashes = [(torrent_id,infohash_str,relevance) for torrent_file_name, torrent_id, infohash_str, relevance, weight in res_list]
+        #sql_insert =  "insert into Torrent (torrent_id, infohash, relevance) values (?,?,?)"
+        #self._db.executemany(sql_insert, torrent_id_infohashes, commit=True)
+        
+        torrent_dir = self.getTorrentDir()
+        deleted = 0 # deleted any file?
+        for torrent_file_name, torrent_id, infohash, relevance, weight in res_list:
+            torrent_path = os.path.join(torrent_dir, torrent_file_name)
+            try:
+                os.remove(torrent_path)
+                print >> sys.stderr, "Erase torrent:", os.path.basename(torrent_path)
+                deleted += 1
+            except Exception, msg:
+                #print >> sys.stderr, "Error in erase torrent", Exception, msg
+                pass
+        
+        self.notifier.notify(NTFY_TORRENTS, NTFY_DELETE, str2bin(infohash)) # refresh gui
+        
+        return deleted
+
+    def hasMetaData(self, infohash):
+        return self.hasTorrent(infohash)
+    
+    def getTorrentRelevances(self, tids):
+        sql = 'SELECT torrent_id, relevance from Torrent WHERE torrent_id in ' + str(tuple(tids))
+        return self._db.fetchall(sql)
+    
+    def updateTorrentRelevance(self, infohash, relevance):
+        self.updateTorrent(infohash, relevance=relevance)
+
+    def updateTorrentRelevances(self, tid_rel_pairs, commit=True):
+        if len(tid_rel_pairs) > 0:
+            sql_update_sims = 'UPDATE Torrent SET relevance=? WHERE torrent_id=?'
+            self._db.executemany(sql_update_sims, tid_rel_pairs, commit=commit)
+    
+    def searchNames(self,kws,local=True):
+        t1 = time()
+        value_name = ['torrent_id',
+                      'infohash',
+                      'name',
+                       'torrent_file_name',                        
+                       'length', 
+                       'creation_date', 
+                       'num_files',
+                       'thumbnail',                       
+                      'insert_time', 
+                      'secret', 
+                      'relevance',  
+                      'source_id', 
+                      'category_id', 
+                       'status_id',
+                       'num_seeders',
+                      'num_leechers', 
+                      'comment',
+                      'channel_permid',
+                      'channel_name']        
+        
+        sql = ""
+        count = 0
+        for word in kws:
+            word = word.lower()
+            count += 1
+            sql += " select torrent_id from InvertedIndex where word='" + word + "' "
+            if count < len(kws):
+                sql += " intersect "
+        
+        mainsql = """select T.*, C.publisher_id as channel_permid, C.publisher_name as channel_name 
+                     from Torrent T LEFT OUTER JOIN ChannelCast C on T.infohash = C.infohash 
+                     where T.torrent_id in (%s) order by T.num_seeders desc """ % (sql)
+        if not local:
+            mainsql += " limit 20"
+            
+        results = self._db.fetchall(mainsql)
+        t2 = time()
+        sql = "select mod_id, sum(vote), count(*) from VoteCast group by mod_id order by 2 desc"
+        votecast_records = self._db.fetchall(sql)         
+        
+        votes = {}
+        for vote in votecast_records:
+            votes[vote[0]] = (vote[1], vote[2])
+        t3 = time()
+        
+        torrents_dict = {}
+        for result in results:
+            a = time()
+            torrent = dict(zip(value_name,result))
+            
+            #bug fix: If channel_permid and/or channel_name is None, it cannot bencode
+            #bencode(None) is an Error
+            if torrent['channel_permid'] is None:
+                torrent['channel_permid'] = ""
+            if torrent['channel_name'] is None:
+                torrent['channel_name'] = ""
+                            
+            # check if this torrent belongs to more than one channel
+            if torrent['infohash'] in torrents_dict:
+                old_record = torrents_dict[torrent['infohash']]
+                # check if this channel has votes and if so, is it better than previous channel
+                if torrent['channel_permid'] in votes:
+                    sum, count = votes[torrent['channel_permid']] 
+                    numsubscriptions = (sum + count)/3
+                    negvotes = (2*count-sum)/3
+                    if numsubscriptions-negvotes > old_record['subscriptions'] - old_record['neg_votes']:
+                        #print >> sys.stderr, "overridden", torrent['channel_name'], old_record['channel_name']
+                        old_record['channel_permid'] = torrent['channel_permid']
+                        old_record['channel_name'] = torrent['channel_name']
+                        old_record['subscriptions'] = numsubscriptions
+                        old_record['neg_votes'] = negvotes
+                else:
+                    if old_record['subscriptions'] - old_record['neg_votes'] < 0: # SPAM cutoff
+                        old_record['channel_permid'] = torrent['channel_permid']
+                        old_record['channel_name'] = torrent['channel_name']
+                        old_record['subscriptions'] = 0
+                        old_record['neg_votes'] = 0
+                continue
+            
+            torrents_dict[torrent['infohash']] = torrent
+            try:
+                torrent['source'] = self.id2src[torrent['source_id']]
+            except:
+                print_exc()
+                # Arno: RSS subscription and id2src issue
+                torrent['source'] = 'http://some/RSS/feed'
+            
+            torrent['category'] = [self.id2category[torrent['category_id']]]
+            torrent['status'] = self.id2status[torrent['status_id']]
+            torrent['simRank'] = ranksfind(None,torrent['infohash'])
+            torrent['infohash'] = str2bin(torrent['infohash'])
+            #torrent['num_swarm'] = torrent['num_seeders'] + torrent['num_leechers']
+            torrent['last_check_time'] = 0 #torrent['last_check']
+            #del torrent['last_check']
+            del torrent['source_id']
+            del torrent['category_id']
+            del torrent['status_id']
+            torrent_id = torrent['torrent_id']
+            
+            torrent['neg_votes']=0
+            torrent['subscriptions']=0
+            if torrent['channel_permid'] in votes:
+                sum, count = votes[torrent['channel_permid']]
+                numsubscriptions = (sum + count)/3
+                negvotes = (2*count-sum)/3                
+                torrent['neg_votes']=negvotes
+                torrent['subscriptions']=numsubscriptions
+            
+            #print >> sys.stderr, "hello.. %.3f,%.3f" %((time()-a), time())
+        def compare(a,b):
+            return -1*cmp(a['num_seeders'], b['num_seeders'])
+        torrent_list = torrents_dict.values()
+        torrent_list.sort(compare)
+        #print >> sys.stderr, "# hits:%d; search time:%.3f,%.3f,%.3f" % (len(torrent_list),t2-t1, t3-t2, time()-t3 )
+        return torrent_list
+
+
+    def selectTorrentToCollect(self, permid, candidate_list=None):
+        """ 
+        select a torrent to collect from a given candidate list
+        If candidate_list is not present or None, all torrents of 
+        this peer will be used for sampling.
+        Return: the infohashed of selected torrent
+        """
+        
+        if candidate_list is None:
+            sql = """SELECT similarity, infohash FROM Peer, Preference, Torrent
+                     WHERE Peer.peer_id = Preference.peer_id
+                     AND Torrent.torrent_id = Preference.torrent_id
+                     AND Peer.peer_id IN(Select peer_id from Peer WHERE similarity > 0 ORDER By similarity DESC,last_connected DESC Limit ?)
+                     AND Preference.torrent_id IN(Select torrent_id from Peer, Preference WHERE Peer.peer_id = Preference.peer_id AND Peer.permid = ?)
+                     AND torrent_file_name is NULL
+                  """
+            permid_str = bin2str(permid)
+            results = self._db.fetchall(sql, (50, permid_str))
+        else:
+            #print >>sys.stderr,"torrentdb: selectTorrentToCollect: cands",`candidate_list`
+            
+            cand_str = [bin2str(infohash) for infohash in candidate_list]
+            s = repr(cand_str).replace('[','(').replace(']',')')
+            sql = """SELECT similarity, infohash FROM Peer, Preference, Torrent
+                     WHERE Peer.peer_id = Preference.peer_id
+                     AND Torrent.torrent_id = Preference.torrent_id
+                     AND Peer.peer_id IN(Select peer_id from Peer WHERE similarity > 0 ORDER By similarity DESC Limit ?)
+                     AND infohash in """+s+"""
+                     AND torrent_file_name is NULL
+                  """
+            results = self._db.fetchall(sql, (50,))
+        
+        res = None
+        #convert top-x similarities into item recommendations
+        infohashes = {}
+        for sim, infohash in results:
+            infohashes[infohash] = infohashes.get(infohash,0) + sim
+        
+        keys = infohashes.keys()
+        if len(keys) > 0:
+            keys.sort(lambda a,b: cmp(infohashes[b], infohashes[a]))
+            
+            #add all items with highest relevance to candidate_list
+            candidate_list = []
+            for infohash in keys:
+                if infohashes[infohash] == infohashes[keys[0]]:
+                    candidate_list.append(str2bin(infohash))
+
+            #if only 1 candidate use that as result
+            if len(candidate_list) == 1:
+                res = keys[0]
+                candidate_list = None
+                        
+        #No torrent found with relevance, fallback to most downloaded torrent
+        if res is None:
+            if candidate_list is None or len(candidate_list) == 0:
+                sql = """SELECT infohash FROM Torrent, Peer, Preference
+                         WHERE Peer.permid == ?  
+                         AND Peer.peer_id == Preference.peer_id 
+                         AND Torrent.torrent_id == Preference.torrent_id
+                         AND torrent_file_name is NULL
+                         GROUP BY Preference.torrent_id
+                         ORDER BY Count(Preference.torrent_id) DESC
+                         LIMIT 1"""
+                permid_str = bin2str(permid)
+                res = self._db.fetchone(sql, (permid_str,))
+            else:
+                cand_str = [bin2str(infohash) for infohash in candidate_list]
+                s = repr(cand_str).replace('[','(').replace(']',')')
+                sql = """SELECT infohash FROM Torrent, Preference
+                         WHERE Torrent.torrent_id == Preference.torrent_id
+                         AND torrent_file_name is NULL
+                         AND infohash IN """ + s + """
+                         GROUP BY Preference.torrent_id
+                         ORDER BY Count(Preference.torrent_id) DESC
+                         LIMIT 1"""
+                res = self._db.fetchone(sql)
+        
+        if res is None:
+            return None
+        return str2bin(res)
+        
+    def selectTorrentToCheck(self, policy='random', infohash=None, return_value=None):    # for tracker checking
+        """ select a torrent to update tracker info (number of seeders and leechers)
+        based on the torrent checking policy.
+        RETURN: a dictionary containing all useful info.
+
+        Policy 1: Random [policy='random']
+           Randomly select a torrent to collect (last_check < 5 min ago)
+        
+        Policy 2: Oldest (unknown) first [policy='oldest']
+           Select the non-dead torrent which was not been checked for the longest time (last_check < 5 min ago)
+        
+        Policy 3: Popular first [policy='popular']
+           Select the non-dead most popular (3*num_seeders+num_leechers) one which has not been checked in last N seconds 
+           (The default N = 4 hours, so at most 4h/torrentchecking_interval popular peers)
+        """
+        
+        #import threading
+        #print >> sys.stderr, "****** selectTorrentToCheck", threading.currentThread().getName()
+        
+        if infohash is None:
+            # create a view?
+            sql = """select T.torrent_id, ignored_times, retried_times, torrent_file_name, infohash, status_id, num_seeders, num_leechers, last_check 
+                     from CollectedTorrent T, TorrentTracker TT
+                     where TT.torrent_id=T.torrent_id and announce_tier=1 """
+            if policy.lower() == 'random':
+                ntorrents = self.getNumberCollectedTorrents()
+                if ntorrents == 0:
+                    rand_pos = 0
+                else:                    
+                    rand_pos = randint(0, ntorrents-1)
+                last_check_threshold = int(time()) - 300
+                sql += """and last_check < %d 
+                        limit 1 offset %d """%(last_check_threshold, rand_pos)
+            elif policy.lower() == 'oldest':
+                last_check_threshold = int(time()) - 300
+                sql += """ and last_check < %d and status_id <> 2
+                         order by last_check
+                         limit 1 """%last_check_threshold
+            elif policy.lower() == 'popular':
+                last_check_threshold = int(time()) - 4*60*60
+                sql += """ and last_check < %d and status_id <> 2 
+                         order by 3*num_seeders+num_leechers desc
+                         limit 1 """%last_check_threshold
+            res = self._db.fetchone(sql)
+        else:
+            sql = """select T.torrent_id, ignored_times, retried_times, torrent_file_name, infohash, status_id, num_seeders, num_leechers, last_check 
+                     from CollectedTorrent T, TorrentTracker TT
+                     where TT.torrent_id=T.torrent_id and announce_tier=1
+                     and infohash=? 
+                  """
+            infohash_str = bin2str(infohash)
+            res = self._db.fetchone(sql, (infohash_str,))
+        
+        if res:
+            torrent_file_name = res[3]
+            torrent_dir = self.getTorrentDir()
+            torrent_path = os.path.join(torrent_dir, torrent_file_name)
+            if res is not None:
+                res = {'torrent_id':res[0], 
+                       'ignored_times':res[1], 
+                       'retried_times':res[2], 
+                       'torrent_path':torrent_path,
+                       'infohash':str2bin(res[4])
+                      }
+            return_value['torrent'] = res
+        return_value['event'].set()
+
+
+    def getTorrentsFromSource(self,source):
+        """ Get all torrents from the specified Subscription source. 
+        Return a list of dictionaries. Each dict is in the NEWDBSTANDARD format.
+        """
+        id = self._getSourceID(source)
+
+        where = 'C.source_id = %d and C.torrent_id = T.torrent_id and announce_tier=1' % (id)
+        # add familyfilter
+        where += self.category.get_family_filter_sql(self._getCategoryID)
+        
+        value_name = deepcopy(self.value_name)
+
+        res_list = self._db.getAll('Torrent C, TorrentTracker T', value_name, where)
+        
+        torrent_list = self.valuelist2torrentlist(value_name,res_list,None,None)
+        del res_list
+        
+        return torrent_list
+
+        
+    def setSecret(self,infohash,secret):
+        kw = {'secret': secret}
+        self.updateTorrent(infohash, commit=True, **kw)
+        
+
+class MyPreferenceDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if MyPreferenceDBHandler.__single is None:
+            MyPreferenceDBHandler.lock.acquire()   
+            try:
+                if MyPreferenceDBHandler.__single is None:
+                    MyPreferenceDBHandler(*args, **kw)
+            finally:
+                MyPreferenceDBHandler.lock.release()
+        return MyPreferenceDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if MyPreferenceDBHandler.__single is not None:
+            raise RuntimeError, "MyPreferenceDBHandler is singleton"
+        MyPreferenceDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self,db, 'MyPreference') ## self,db,'MyPreference'
+
+        self.status_table = {'good':1, 'unknown':0, 'dead':2}
+        self.status_table.update(self._db.getTorrentStatusTable())
+        self.status_good = self.status_table['good']
+        # Arno, 2010-02-04: ARNOCOMMENT ARNOTODO Get rid of this g*dd*mn caching
+        # or keep it consistent with the DB!
+        self.recent_preflist = None
+        self.recent_preflist_with_clicklog = None
+        self.recent_preflist_with_swarmsize = None
+        self.rlock = threading.RLock()
+        
+        self.popularity_db = PopularityDBHandler.getInstance()
+        
+        
+    def loadData(self):
+        """ Arno, 2010-02-04: Brute force update method for the self.recent_
+        caches, because people don't seem to understand that caches need
+        to be kept consistent with the database. Caches are evil in the first place.
+        """
+        self.rlock.acquire()
+        try:
+            self.recent_preflist = self._getRecentLivePrefList()
+            self.recent_preflist_with_clicklog = self._getRecentLivePrefListWithClicklog()
+            self.recent_preflist_with_swarmsize = self._getRecentLivePrefListOL11()
+        finally:
+            self.rlock.release()
+                
+    def getMyPrefList(self, order_by=None):
+        res = self.getAll('torrent_id', order_by=order_by)
+        return [p[0] for p in res]
+
+    def getMyPrefListInfohash(self):
+        sql = 'select infohash from Torrent where torrent_id in (select torrent_id from MyPreference)'
+        res = self._db.fetchall(sql)
+        return [str2bin(p[0]) for p in res]
+    
+    def getMyPrefStats(self, torrent_id=None):
+        # get the full {torrent_id:(create_time,progress,destdir)}
+        value_name = ('torrent_id','creation_time','progress','destination_path')
+        if torrent_id is not None:
+            where = 'torrent_id=%s' % torrent_id
+        else:
+            where = None
+        res = self.getAll(value_name, where)
+        mypref_stats = {}
+        for pref in res:
+            torrent_id,creation_time,progress,destination_path = pref
+            mypref_stats[torrent_id] = (creation_time,progress,destination_path)
+        return mypref_stats
+        
+    def getCreationTime(self, infohash):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is not None:
+            ct = self.getOne('creation_time', torrent_id=torrent_id)
+            return ct
+        else:
+            return None
+
+    def getRecentLivePrefListWithClicklog(self, num=0):
+        """returns OL 8 style preference list: a list of lists, with each of the inner lists
+           containing infohash, search terms, click position, and reranking strategy"""
+           
+        if self.recent_preflist_with_clicklog is None:
+            self.rlock.acquire()
+            try:
+                if self.recent_preflist_with_clicklog is None:
+                    self.recent_preflist_with_clicklog = self._getRecentLivePrefListWithClicklog()
+            finally:
+                self.rlock.release()
+        if num > 0:
+            return self.recent_preflist_with_clicklog[:num]
+        else:
+            return self.recent_preflist_with_clicklog  
+
+    def getRecentLivePrefListOL11(self, num=0):
+        """
+        Returns OL 11 style preference list. It contains all info from previous 
+        versions like clickLog info and some additional info related to swarm size.
+        @author: Rahim
+        @param num: if num be equal to zero the lenghth of the return list is unlimited, otherwise it's maximum lenght will be num.
+        @return: a list of lists. Each inner list is like:
+        [previous info , num_seeders, num_leechers, swarm_size_calc_age, number_of_sources]
+        """
+        if self.recent_preflist_with_swarmsize is None:
+            self.rlock.acquire()
+            try:
+                #if self.recent_preflist_with_swarmsize is None:
+                self.recent_preflist_with_swarmsize = self._getRecentLivePrefListOL11()
+            finally:
+                self.rlock.release()
+        if num > 0:
+            return self.recent_preflist_with_swarmsize[:num]
+        else:
+            return self.recent_preflist_with_swarmsize  
+        
+        
+    def getRecentLivePrefList(self, num=0):
+        if self.recent_preflist is None:
+            self.rlock.acquire()
+            try:
+                if self.recent_preflist is None:
+                    self.recent_preflist = self._getRecentLivePrefList()
+            finally:
+                self.rlock.release()
+        if num > 0:
+            return self.recent_preflist[:num]
+        else:
+            return self.recent_preflist
+
+
+        
+    def addClicklogToMyPreference(self, infohash, clicklog_data, commit=True):
+        torrent_id = self._db.getTorrentID(infohash)
+        clicklog_already_stored = False # equivalent to hasMyPreference TODO
+        if torrent_id is None or clicklog_already_stored:
+            return False
+
+        d = {}
+        # copy those elements of the clicklog data which are used in the update command
+        for clicklog_key in ["click_position", "reranking_strategy"]: 
+            if clicklog_key in clicklog_data: 
+                d[clicklog_key] = clicklog_data[clicklog_key]
+                                
+        if d=={}:
+            if DEBUG:
+                print >> sys.stderr, "no updatable information given to addClicklogToMyPreference"
+        else:
+            if DEBUG:
+                print >> sys.stderr, "addClicklogToMyPreference: updatable clicklog data: %s" % d
+            self._db.update(self.table_name, 'torrent_id=%d' % torrent_id, commit=commit, **d)
+                
+        # have keywords stored by SearchDBHandler
+        if 'keywords' in clicklog_data:
+            if not clicklog_data['keywords']==[]:
+                searchdb = SearchDBHandler.getInstance() 
+                searchdb.storeKeywords(peer_id=0, 
+                                       torrent_id=torrent_id, 
+                                       terms=clicklog_data['keywords'], 
+                                       commit=commit)   
+                    
+        
+    def _getRecentLivePrefListWithClicklog(self, num=0):
+        """returns a list containing a list for each torrent: [infohash, [seach terms], click position, reranking strategy]"""
+        
+        sql = """
+        select infohash, click_position, reranking_strategy, m.torrent_id from MyPreference m, Torrent t 
+        where m.torrent_id == t.torrent_id 
+        and status_id == %d
+        order by creation_time desc
+        """ % self.status_good
+        
+        recent_preflist_with_clicklog = self._db.fetchall(sql)
+        if recent_preflist_with_clicklog is None:
+            recent_preflist_with_clicklog = []
+        else:
+            recent_preflist_with_clicklog = [[str2bin(t[0]),
+                                              t[3],   # insert search terms in next step, only for those actually required, store torrent id for now
+                                              t[1], # click position
+                                              t[2]]  # reranking strategy
+                                             for t in recent_preflist_with_clicklog]
+
+        if num != 0:
+            recent_preflist_with_clicklog = recent_preflist_with_clicklog[:num]
+
+        # now that we only have those torrents left in which we are actually interested, 
+        # replace torrent id by user's search terms for torrent id
+        termdb = TermDBHandler.getInstance()
+        searchdb = SearchDBHandler.getInstance()
+        for pref in recent_preflist_with_clicklog:
+            torrent_id = pref[1]
+            search_terms = searchdb.getMyTorrentSearchTerms(torrent_id)
+            # Arno, 2010-02-02: Explicit encoding
+            pref[1] = self.searchterms2utf8pref(termdb,search_terms)
+
+        return recent_preflist_with_clicklog
+
+    def searchterms2utf8pref(self,termdb,search_terms):            
+        terms = [termdb.getTerm(search_term) for search_term in search_terms]
+        eterms = []
+        for term in terms:
+            eterms.append(term.encode("UTF-8"))
+        return eterms
+    
+    
+    def _getRecentLivePrefListOL11(self, num=0): 
+        """
+        first calls the previous method to get a list of torrents and related info from MyPreference db 
+        (_getRecentLivePrefListWithClicklog) and then appendes it with swarm size info or ( num_seeders, num_leechers, calc_age, num_seeders).
+        @author: Rahim
+        @param num: if num=0 it returns all items otherwise it restricts the return result to num.
+        @return: a list that each item conatins below info:
+        [infohash, [seach terms], click position, reranking strategy, num_seeders, num_leechers, calc_age, num_of_sources] 
+        """
+        
+        sql = """
+        select infohash, click_position, reranking_strategy, m.torrent_id from MyPreference m, Torrent t 
+        where m.torrent_id == t.torrent_id 
+        and status_id == %d
+        order by creation_time desc
+        """ % self.status_good
+        
+        recent_preflist_with_swarmsize = self._db.fetchall(sql)
+        if recent_preflist_with_swarmsize is None:
+            recent_preflist_with_swarmsize = []
+        else:
+            recent_preflist_with_swarmsize = [[str2bin(t[0]),
+                                              t[3],   # insert search terms in next step, only for those actually required, store torrent id for now
+                                              t[1], # click position
+                                              t[2]]  # reranking strategy
+                                             for t in recent_preflist_with_swarmsize]
+
+        if num != 0:
+            recent_preflist_with_swarmsize = recent_preflist_with_swarmsize[:num]
+
+        # now that we only have those torrents left in which we are actually interested, 
+        # replace torrent id by user's search terms for torrent id
+        termdb = TermDBHandler.getInstance()
+        searchdb = SearchDBHandler.getInstance()
+        tempTorrentList = []
+        for pref in recent_preflist_with_swarmsize:
+            torrent_id = pref[1]
+            tempTorrentList.append(torrent_id)
+            search_terms = searchdb.getMyTorrentSearchTerms(torrent_id)
+            # Arno, 2010-02-02: Explicit encoding
+            pref[1] = self.searchterms2utf8pref(termdb,search_terms)
+        
+        #Step 3: appending swarm size info to the end of the inner lists
+        swarmSizeInfoList= self.popularity_db.calculateSwarmSize(tempTorrentList, 'TorrentIds', toBC=True) # returns a list of items [torrent_id, num_seeders, num_leechers, num_sources_seen]
+
+        index = 0
+        for  index in range(0,len(swarmSizeInfoList)):
+            recent_preflist_with_swarmsize[index].append(swarmSizeInfoList[index][1]) # number of seeders
+            recent_preflist_with_swarmsize[index].append(swarmSizeInfoList[index][2])# number of leechers
+            recent_preflist_with_swarmsize[index].append(swarmSizeInfoList[index][3])  # age of the report 
+            recent_preflist_with_swarmsize[index].append(swarmSizeInfoList[index][4]) # number of sources seen this torrent 
+        return recent_preflist_with_swarmsize
+        
+    def _getRecentLivePrefList(self, num=0):    # num = 0: all files
+        # get recent and live torrents
+        sql = """
+        select infohash from MyPreference m, Torrent t 
+        where m.torrent_id == t.torrent_id 
+        and status_id == %d
+        order by creation_time desc
+        """ % self.status_good
+
+        recent_preflist = self._db.fetchall(sql)
+        if recent_preflist is None:
+            recent_preflist = []
+        else:
+            recent_preflist = [str2bin(t[0]) for t in recent_preflist]
+
+        if num != 0:
+            return recent_preflist[:num]
+        else:
+            return recent_preflist
+
+    def hasMyPreference(self, infohash):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None:
+            return False
+        res = self.getOne('torrent_id', torrent_id=torrent_id)
+        if res is not None:
+            return True
+        else:
+            return False
+            
+    def addMyPreference(self, infohash, data, commit=True):
+        # keys in data: destination_path, progress, creation_time, torrent_id
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None or self.hasMyPreference(infohash):
+            # Arno, 2009-03-09: Torrent already exists in myrefs.
+            # Hack for hiding from lib while keeping in myprefs.
+            # see standardOverview.removeTorrentFromLibrary()
+            #
+            self.updateDestDir(infohash,data.get('destination_path'),commit=commit)
+            return False
+        d = {}
+        d['destination_path'] = data.get('destination_path')
+        d['progress'] = data.get('progress', 0)
+        d['creation_time'] = data.get('creation_time', int(time()))
+        d['torrent_id'] = torrent_id
+
+        self._db.insert(self.table_name, commit=commit, **d)
+        self.notifier.notify(NTFY_MYPREFERENCES, NTFY_INSERT, infohash)
+        
+        # Arno, 2010-02-04: Update self.recent_ caches :-(
+        self.loadData()
+
+        return True
+
+    def deletePreference(self, infohash, commit=True):
+        # Arno: when deleting a preference, you may also need to do
+        # some stuff in BuddyCast: see delMyPref()
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None:
+            return
+        self._db.delete(self.table_name, commit=commit, **{'torrent_id':torrent_id})
+        self.notifier.notify(NTFY_MYPREFERENCES, NTFY_DELETE, infohash)
+
+        # Arno, 2010-02-04: Update self.recent_ caches :-(
+        self.loadData()
+            
+            
+    def updateProgress(self, infohash, progress, commit=True):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None:
+            return
+        self._db.update(self.table_name, 'torrent_id=%d'%torrent_id, commit=commit, progress=progress)
+        #print >> sys.stderr, '********* update progress', `infohash`, progress, commit
+
+    def getAllEntries(self):
+        """use with caution,- for testing purposes"""
+        return self.getAll("torrent_id, click_position, reranking_strategy", order_by="torrent_id")
+
+    def updateDestDir(self, infohash, destdir, commit=True):
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None:
+            return
+        self._db.update(self.table_name, 'torrent_id=%d'%torrent_id, commit=commit, destination_path=destdir)
+    
+
+#    def getAllTorrentCoccurrence(self):
+#        # should be placed in PreferenceDBHandler, but put here to be convenient for TorrentCollecting
+#        sql = """select torrent_id, count(torrent_id) as coocurrency from Preference where peer_id in
+#            (select peer_id from Preference where torrent_id in 
+#            (select torrent_id from MyPreference)) and torrent_id not in 
+#            (select torrent_id from MyPreference)
+#            group by torrent_id
+#            """
+#        coccurrence = dict(self._db.fetchall(sql))
+#        return coccurrence
+
+        
+class BarterCastDBHandler(BasicDBHandler):
+
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        
+        if BarterCastDBHandler.__single is None:
+            BarterCastDBHandler.lock.acquire()   
+            try:
+                if BarterCastDBHandler.__single is None:
+                    BarterCastDBHandler(*args, **kw)
+            finally:
+                BarterCastDBHandler.lock.release()
+        return BarterCastDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+
+    def __init__(self):
+        BarterCastDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self, db,'BarterCast') ## self,db,'BarterCast'
+        self.peer_db = PeerDBHandler.getInstance()
+        
+        # create the maxflow network
+        self.network = Network({})
+        self.update_network()
+                   
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb:"
+
+        
+    ##def registerSession(self, session):
+    ##    self.session = session
+
+        # Retrieve MyPermid
+    ##    self.my_permid = session.get_permid()
+
+
+    def registerSession(self, session):
+        self.session = session
+
+        # Retrieve MyPermid
+        self.my_permid = session.get_permid()
+
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb: MyPermid is ", `self.my_permid`
+
+        if self.my_permid is None:
+            raise ValueError('Cannot get permid from Session')
+
+        # Keep administration of total upload and download
+        # (to include in BarterCast message)
+        self.my_peerid = self.getPeerID(self.my_permid)
+        
+        if self.my_peerid != None:
+            where = "peer_id_from=%s" % (self.my_peerid)
+            item = self.getOne(('sum(uploaded)', 'sum(downloaded)'), where=where)
+        else:
+            item = None
+        
+        if item != None and len(item) == 2 and item[0] != None and item[1] != None:
+            self.total_up = int(item[0])
+            self.total_down = int(item[1])
+        else:
+            self.total_up = 0
+            self.total_down = 0
+            
+#         if DEBUG:
+#             print >> sys.stderr, "My reputation: ", self.getMyReputation()
+            
+    
+    def getTotals(self):
+        return (self.total_up, self.total_down)
+                        
+    def getName(self, permid):
+
+        if permid == 'non-tribler':
+            return "non-tribler"
+        elif permid == self.my_permid:
+            return "local_tribler"
+
+        name = self.peer_db.getPeer(permid, 'name')
+        
+        if name == None or name == '':
+            return 'peer %s' % show_permid_shorter(permid) 
+        else:
+            return name
+
+    def getNameByID(self, peer_id):
+        permid = self.getPermid(peer_id)
+        return self.getName(permid)
+
+
+    def getPermid(self, peer_id):
+
+        # by convention '-1' is the id of non-tribler peers
+        if peer_id == -1:
+            return 'non-tribler'
+        else:
+            return self.peer_db.getPermid(peer_id)
+
+
+    def getPeerID(self, permid):
+        
+        # by convention '-1' is the id of non-tribler peers
+        if permid == "non-tribler":
+            return -1
+        else:
+            return self.peer_db.getPeerID(permid)
+
+    def getItem(self, (permid_from, permid_to), default=False):
+
+        # ARNODB: now converting back to dbid! just did reverse in getItemList
+        peer_id1 = self.getPeerID(permid_from)
+        peer_id2 = self.getPeerID(permid_to)
+        
+        if peer_id1 is None:
+            self._db.insertPeer(permid_from) # ARNODB: database write
+            peer_id1 = self.getPeerID(permid_from) # ARNODB: database write
+        
+        if peer_id2 is None:
+            self._db.insertPeer(permid_to)
+            peer_id2 = self.getPeerID(permid_to)
+            
+        return self.getItemByIDs((peer_id1,peer_id2),default=default)
+
+
+    def getItemByIDs(self, (peer_id_from, peer_id_to), default=False):
+        if peer_id_from is not None and peer_id_to is not None:
+            
+            where = "peer_id_from=%s and peer_id_to=%s" % (peer_id_from, peer_id_to)
+            item = self.getOne(('downloaded', 'uploaded', 'last_seen'), where=where)
+        
+            if item is None:
+                return None
+        
+            if len(item) != 3:
+                return None
+            
+            itemdict = {}
+            itemdict['downloaded'] = item[0]
+            itemdict['uploaded'] = item[1]
+            itemdict['last_seen'] = item[2]
+            itemdict['peer_id_from'] = peer_id_from
+            itemdict['peer_id_to'] = peer_id_to
+
+            return itemdict
+
+        else:
+            return None
+
+
+    def getItemList(self):    # get the list of all peers' permid
+        
+        keys = self.getAll(('peer_id_from','peer_id_to'))
+        # ARNODB: this dbid -> permid translation is more efficiently done
+        # on the final top-N list.
+        keys = map(lambda (id_from, id_to): (self.getPermid(id_from), self.getPermid(id_to)), keys)
+        return keys
+
+
+    def addItem(self, (permid_from, permid_to), item, commit=True):
+
+#        if value.has_key('last_seen'):    # get the latest last_seen
+#            old_last_seen = 0
+#            old_data = self.getPeer(permid)
+#            if old_data:
+#                old_last_seen = old_data.get('last_seen', 0)
+#            last_seen = value['last_seen']
+#            value['last_seen'] = max(last_seen, old_last_seen)
+
+        # get peer ids
+        peer_id1 = self.getPeerID(permid_from)
+        peer_id2 = self.getPeerID(permid_to)
+                
+        # check if they already exist in database; if not: add
+        if peer_id1 is None:
+            self._db.insertPeer(permid_from)
+            peer_id1 = self.getPeerID(permid_from)
+        if peer_id2 is None:
+            self._db.insertPeer(permid_to)
+            peer_id2 = self.getPeerID(permid_to)
+            
+        item['peer_id_from'] = peer_id1
+        item['peer_id_to'] = peer_id2    
+            
+        self._db.insert(self.table_name, commit=commit, **item)
+
+    def updateItem(self, (permid_from, permid_to), key, value, commit=True):
+        
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb: update (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value))
+
+        itemdict = self.getItem((permid_from, permid_to))
+
+        # if item doesn't exist: add it
+        if itemdict == None:
+            self.addItem((permid_from, permid_to), {'uploaded':0, 'downloaded': 0, 'last_seen': int(time())}, commit=True)
+            itemdict = self.getItem((permid_from, permid_to))
+
+        # get peer ids
+        peer_id1 = itemdict['peer_id_from']
+        peer_id2 = itemdict['peer_id_to']
+
+        if key in itemdict.keys():
+            
+            where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2)
+            item = {key: value}
+            self._db.update(self.table_name, where = where, commit=commit, **item)            
+
+    def incrementItem(self, (permid_from, permid_to), key, value, commit=True):
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb: increment (%s, %s) [%s] += %s" % (self.getName(permid_from), self.getName(permid_to), key, str(value))
+
+        # adjust total_up and total_down
+        if permid_from == self.my_permid:
+            if key == 'uploaded':
+                self.total_up += int(value)
+            if key == 'downloaded':
+                self.total_down += int(value)
+    
+        itemdict = self.getItem((permid_from, permid_to))
+
+        # if item doesn't exist: add it
+        if itemdict == None:
+            self.addItem((permid_from, permid_to), {'uploaded':0, 'downloaded': 0, 'last_seen': int(time())}, commit=True)
+            itemdict = self.getItem((permid_from, permid_to))
+            
+        # get peer ids
+        peer_id1 = itemdict['peer_id_from']
+        peer_id2 = itemdict['peer_id_to']
+
+        if key in itemdict.keys():
+            old_value = itemdict[key]
+            new_value = old_value + value
+            
+            where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2)
+
+            item = {key: new_value}
+            self._db.update(self.table_name, where = where, commit=commit, **item)            
+            return new_value
+
+        return None
+
+    def addPeersBatch(self,permids):
+        """ Add unknown permids as batch -> single transaction """
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb: addPeersBatch: n=",len(permids)
+        
+        for permid in permids:
+            peer_id = self.getPeerID(permid)
+            # check if they already exist in database; if not: add
+            if peer_id is None:
+                self._db.insertPeer(permid,commit=False)
+        self._db.commit()
+
+    def updateULDL(self, (permid_from, permid_to), ul, dl, commit=True):
+        """ Add ul/dl record to database as a single write """
+        
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb: updateULDL (%s, %s) ['ul'] += %s ['dl'] += %s" % (self.getName(permid_from), self.getName(permid_to), str(ul), str(dl))
+
+        itemdict = self.getItem((permid_from, permid_to))
+
+        # if item doesn't exist: add it
+        if itemdict == None:
+            itemdict =  {'uploaded':ul, 'downloaded': dl, 'last_seen': int(time())}
+            self.addItem((permid_from, permid_to), itemdict, commit=commit)
+            return
+
+        # get peer ids
+        peer_id1 = itemdict['peer_id_from']
+        peer_id2 = itemdict['peer_id_to']
+
+        if 'uploaded' in itemdict.keys() and 'downloaded' in itemdict.keys():
+            where = "peer_id_from=%s and peer_id_to=%s" % (peer_id1, peer_id2)
+            item = {'uploaded': ul, 'downloaded':dl}
+            self._db.update(self.table_name, where = where, commit=commit, **item)            
+
+    def getPeerIDPairs(self):
+        keys = self.getAll(('peer_id_from','peer_id_to'))
+        return keys
+        
+    def getTopNPeers(self, n, local_only = False):
+        """
+        Return (sorted) list of the top N peers with the highest (combined) 
+        values for the given keys. This version uses batched reads and peer_ids
+        in calculation
+        @return a dict containing a 'top' key with a list of (permid,up,down) 
+        tuples, a 'total_up', 'total_down', 'tribler_up', 'tribler_down' field. 
+        Sizes are in kilobytes.
+        """
+        
+        # TODO: this won't scale to many interactions, as the size of the DB
+        # is NxN
+        
+        if DEBUG:
+            print >> sys.stderr, "bartercastdb: getTopNPeers: local = ", local_only
+            #print_stack()
+        
+        n = max(1, n)
+        my_peer_id = self.getPeerID(self.my_permid)
+        total_up = {}
+        total_down = {}
+        # Arno, 2008-10-30: I speculate this is to count transfers only once,
+        # i.e. the DB stored (a,b) and (b,a) and we want to count just one.
+        
+        processed =  Set()
+        
+
+        value_name = '*'
+        increment = 500
+        
+        nrecs = self.size()
+        #print >>sys.stderr,"NEXTtopN: size is",nrecs
+        
+        for offset in range(0,nrecs,increment):
+            if offset+increment > nrecs:
+                limit = nrecs-offset
+            else:
+                limit = increment
+            #print >>sys.stderr,"NEXTtopN: get",offset,limit
+        
+            reslist = self.getAll(value_name, offset=offset, limit=limit)
+            #print >>sys.stderr,"NEXTtopN: res len is",len(reslist),`reslist`
+            for res in reslist:
+                (peer_id_from,peer_id_to,downloaded,uploaded,last_seen,value) = res
+            
+                if local_only:
+                    if not (peer_id_to == my_peer_id or peer_id_from == my_peer_id):
+                        # get only items of my local dealings
+                        continue
+                        
+                if (not (peer_id_to, peer_id_from) in processed) and (not peer_id_to == peer_id_from):
+                #if (not peer_id_to == peer_id_from):
+        
+                    up = uploaded *1024 # make into bytes
+                    down = downloaded *1024
+    
+                    if DEBUG:
+                        print >> sys.stderr, "bartercastdb: getTopNPeers: DB entry: (%s, %s) up = %d down = %d" % (self.getNameByID(peer_id_from), self.getNameByID(peer_id_to), up, down)
+    
+                    processed.add((peer_id_from, peer_id_to))
+    
+                    # fix for multiple my_permids
+                    if peer_id_from == -1: # 'non-tribler':
+                        peer_id_to = my_peer_id
+                    if peer_id_to == -1: # 'non-tribler':
+                        peer_id_from = my_peer_id
+    
+                    # process peer_id_from
+                    total_up[peer_id_from] = total_up.get(peer_id_from, 0) + up
+                    total_down[peer_id_from] = total_down.get(peer_id_from, 0) + down
+    
+                    # process peer_id_to
+                    total_up[peer_id_to] = total_up.get(peer_id_to, 0) + down
+                    total_down[peer_id_to] = total_down.get(peer_id_to, 0) +  up
+
+                    
+        # create top N peers
+        top = []
+        min = 0
+
+        for peer_id in total_up.keys():
+
+            up = total_up[peer_id]
+            down = total_down[peer_id]
+
+            if DEBUG:
+                print >> sys.stderr, "bartercastdb: getTopNPeers: total of %s: up = %d down = %d" % (self.getName(peer_id), up, down)
+
+            # we know rank on total upload?
+            value = up
+
+            # check if peer belongs to current top N
+            if peer_id != -1 and peer_id != my_peer_id and (len(top) < n or value > min):
+
+                top.append((peer_id, up, down))
+
+                # sort based on value
+                top.sort(cmp = lambda (p1, u1, d1), (p2, u2, d2): cmp(u2, u1))
+
+                # if list contains more than N elements: remove the last (=lowest value)
+                if len(top) > n:
+                    del top[-1]
+
+                # determine new minimum of values    
+                min = top[-1][1]
+
+        # Now convert to permid
+        permidtop = []
+        for peer_id,up,down in top:
+            permid = self.getPermid(peer_id)
+            permidtop.append((permid,up,down))
+
+        result = {}
+
+        result['top'] = permidtop
+
+        # My total up and download, including interaction with non-tribler peers
+        result['total_up'] = total_up.get(my_peer_id, 0)
+        result['total_down'] = total_down.get(my_peer_id, 0)
+
+        # My up and download with tribler peers only
+        result['tribler_up'] = result['total_up'] - total_down.get(-1, 0) # -1 = 'non-tribler'
+        result['tribler_down'] = result['total_down'] - total_up.get(-1, 0) # -1 = 'non-tribler'
+
+        if DEBUG:
+            print >> sys.stderr, result
+
+        return result
+        
+        
+    ################################
+    def update_network(self):
+
+
+        keys = self.getPeerIDPairs() #getItemList()
+
+
+    ################################
+    def getMyReputation(self, alpha = ALPHA):
+
+        rep = atan((self.total_up - self.total_down) * alpha)/(0.5 * pi)
+        return rep   
+
+
+class VoteCastDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        
+        if VoteCastDBHandler.__single is None:
+            VoteCastDBHandler.lock.acquire()   
+            try:
+                if VoteCastDBHandler.__single is None:
+                    VoteCastDBHandler(*args, **kw)
+            finally:
+                VoteCastDBHandler.lock.release()
+        return VoteCastDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+
+    def __init__(self):
+        VoteCastDBHandler.__single = self
+        try:
+            db = SQLiteCacheDB.getInstance()
+            BasicDBHandler.__init__(self,db,'VoteCast')
+            if DEBUG: print >> sys.stderr, "votecast: DB made" 
+        except: 
+            print >> sys.stderr, "votecast: couldn't make the table"
+        
+        self.peer_db = PeerDBHandler.getInstance()
+        if DEBUG:
+            print >> sys.stderr, "votecast: "
+    
+    def registerSession(self, session):
+        self.session = session
+        self.my_permid = session.get_permid()
+
+        if DEBUG:
+            print >> sys.stderr, "votecast: My permid is",`self.my_permid`
+
+    def getAllVotes(self, permid):
+        sql = 'select * from VoteCast where mod_id==?'
+        
+        records = self._db.fetchall(sql, (permid,))
+        return records
+    
+    def getAll(self):
+        sql = 'select * from VoteCast'
+        
+        records = self._db.fetchall(sql)
+        return records
+        
+    def getPosNegVotes(self, permid):
+        sql = 'select * from VoteCast where mod_id==?'
+        
+        records = self._db.fetchall(sql, (permid[0],))
+        pos_votes = 0
+        neg_votes = 0
+        
+        if records is None:
+            return(pos_votes,neg_votes)
+        
+        for vote in records:
+            
+            if vote[2] == "1":
+                pos_votes +=1
+            else:
+                neg_votes +=1
+        return (pos_votes, neg_votes)
+    
+
+    def hasVote(self, permid, voter_peerid):
+        sql = 'select mod_id, voter_id from VoteCast where mod_id==? and voter_id==?'
+        item = self._db.fetchone(sql,(permid,voter_peerid,))
+        #print >> sys.stderr,"well well well",infohash," sdd",item
+        if item is None:
+            return False
+        else:
+            return True
+    
+    def getBallotBox(self):
+        sql = 'select * from VoteCast'
+        items = self._db.fetchall(sql)
+        return items   
+    
+    def addVotes(self, votes):
+        sql = 'insert into VoteCast Values (?,?,?,?)'
+        self._db.executemany(sql,votes,commit=True)
+    
+    def addVote(self, vote, clone=True):
+        if self.hasVote(vote['mod_id'],vote['voter_id']):
+            self.deleteVote(vote['mod_id'],vote['voter_id'])
+        self._db.insert(self.table_name, **vote)        
+    
+    def deleteVotes(self, permid):
+        sql = 'Delete From VoteCast where mod_id==?'
+        self._db.execute_write(sql,(permid,))
+    
+    def deleteVote(self, permid, voter_id):
+        sql = 'Delete From VoteCast where mod_id==? and voter_id==?'
+        self._db.execute_write(sql,(permid,voter_id,))
+    
+    def getPermid(self, peer_id):
+
+        # by convention '-1' is the id of non-tribler peers
+        if peer_id == -1:
+            return 'non-tribler'
+        else:
+            return self.peer_db.getPermid(peer_id)
+
+
+    def getPeerID(self, permid):
+        # by convention '-1' is the id of non-tribler peers
+        if permid == "non-tribler":
+            return -1
+        else:
+            return self.peer_db.getPeerID(permid)
+
+    
+    def hasPeer(self, permid):
+        return self.peer_db.hasPeer(permid)
+    
+    def getRecentAndRandomVotes(self, recent=25, random=25):
+        allrecords = []
+        
+        sql = "SELECT mod_id, vote, time_stamp from VoteCast where voter_id==? order by time_stamp desc limit ?"
+        myrecentvotes = self._db.fetchall(sql,(permid_for_user(self.my_permid),recent,))
+        allrecords.extend(myrecentvotes)
+        
+        if myrecentvotes is not None and len(myrecentvotes)>=recent:
+            t = myrecentvotes[len(myrecentvotes)-1][2]
+            sql = "select mod_id, vote, time_stamp from VoteCast where voter_id==? and time_stamp<? order by random() limit ?"
+            myrandomvotes = self._db.fetchall(sql,(permid_for_user(self.my_permid),t,random,))
+            allrecords.extend(myrandomvotes)
+        
+        # Arno, 2010-02-04: Doesn't do anything. Nitin?
+        records = []
+        for record in allrecords:
+            records.append((str2bin(record[0]),record[1], record[2]))
+            
+        return records #allrecords... Nitin: should have been records
+    
+
+    def hasSubscription(self, permid, voter_peerid): ##
+        sql = 'select mod_id, voter_id from VoteCast where mod_id==? and voter_id==? and vote=2'
+        item = self._db.fetchone(sql,(permid,voter_peerid,))
+        if item is None:
+            return False
+        else:
+            return True
+
+
+    def subscribe(self,permid):
+        """insert/change the vote status to 2"""
+        sql = "select vote from VoteCast where mod_id==? and voter_id==?"
+        vote = self._db.fetchone(sql,(permid,bin2str(self.my_permid),))
+        if vote is None:
+            sql = "insert into VoteCast Values(?,?,'2',?)"
+            self._db.execute_write(sql,(permid,bin2str(self.my_permid),now(),))
+        elif vote!=2:
+            sql = "update VoteCast set vote=2 where mod_id==? and voter_id==?"
+            self._db.execute_write(sql,(permid,bin2str(self.my_permid),))    
+
+    def unsubscribe(self, permid): ###
+        """ change the vote status to 0, if unsubscribed"""
+        sql = "select vote from VoteCast where mod_id==? and voter_id==?"
+        vote = self._db.fetchone(sql,(permid,bin2str(self.my_permid),))
+        if vote is not None and vote==2:
+            sql = "delete from VoteCast where mod_id==? and voter_id==?"
+            self._db.execute_write(sql,(permid,bin2str(self.my_permid),))
+    
+    def spam(self, permid):
+        """ insert/change the vote status to -1"""
+        sql = "select vote from VoteCast where mod_id==? and voter_id==?"
+        vote = self._db.fetchone(sql,(permid,bin2str(self.my_permid),))
+        if vote is None:
+            sql = "insert into VoteCast Values(?,?,'-1',?)"
+            self._db.execute_write(sql,(permid,bin2str(self.my_permid),now(),))
+        elif vote>=0 and vote<=2:
+            sql = "update VoteCast set vote=-1 where mod_id==? and voter_id==?"
+            self._db.execute_write(sql,(permid,bin2str(self.my_permid),))    
+    
+    def getVote(self,publisher_id,subscriber_id):
+        """ return the vote status if such record exists, otherwise None  """
+        sql = "select vote from VoteCast where mod_id==? and voter_id==?"
+        return self._db.fetchone(sql, (publisher_id,subscriber_id,))    
+    
+    def getPublishersWithNegVote(self, subscriber_id):
+        ''' return the publisher_ids having a negative vote from subscriber_id '''
+        sql = "select mod_id from VoteCast where voter_id==? and vote=-1"
+        res = self._db.fetchall(sql,(subscriber_id,))
+        result_list = Set()
+        for entry in res:
+            result_list.add(entry[0])
+        return result_list
+    
+    def getNegVotes(self,publisher_id):
+        """returns the number of negative votes in integer format"""
+        sql = "select count(*) from VoteCast where mod_id==? and vote=-1"
+        return self._db.fetchone(sql, (publisher_id,))
+    
+    def getNumSubscriptions(self,publisher_id): ###
+        """returns the number of subscribers in integer format"""
+        sql = "select count(*) from VoteCast where mod_id==? and vote=2" # before select vote
+        return self._db.fetchone(sql, (publisher_id,))
+    
+    def getVotes(self, publisher_id):
+        """ returns (sum, count) from VoteCast """
+        sql = "select sum(vote), count(*) from VoteCast where mod_id==?"
+        return self._db.fetchone(sql, (publisher_id,))
+
+    def getEffectiveVote(self, publisher_id):
+        """ returns positive - negative votes """
+        sql = "select count(*) from VoteCast where mod_id==? and vote=2" 
+        subscriptions = self._db.fetchone(sql, (publisher_id,))
+        sql = "select count(*) from VoteCast where mod_id==? and vote=-1" 
+        negative_votes = self._db.fetchone(sql, (publisher_id,))
+        return (subscriptions - negative_votes)
+          
+
+                        
+#end votes
+
+class ChannelCastDBHandler(BasicDBHandler):
+    """ """
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):        
+        if ChannelCastDBHandler.__single is None:
+            ChannelCastDBHandler.lock.acquire()   
+            try:
+                if ChannelCastDBHandler.__single is None:
+                    ChannelCastDBHandler(*args, **kw)
+            finally:
+                ChannelCastDBHandler.lock.release()
+        return ChannelCastDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+
+    def __init__(self):
+        ChannelCastDBHandler.__single = self
+        try:
+            db = SQLiteCacheDB.getInstance()
+            BasicDBHandler.__init__(self,db,'ChannelCast')
+            print >> sys.stderr, "ChannelCast: DB made" 
+        except: 
+            print >> sys.stderr, "ChannelCast: couldn't make the table"
+        
+        self.peer_db = PeerDBHandler.getInstance()
+        self.firstQueryMySubscriptions=True
+        self.allRecordsMySubscriptions=None
+        self.firstQueryPopularChannels=True
+        self.allRecordsPopularChannels=None
+        
+        if DEBUG:
+            print >> sys.stderr, "ChannelCast: "
+            
+        self.value_name = ['publisher_id','publisher_name','infohash','torrenthash','torrentname','time_stamp','signature'] ##
+    
+    def registerSession(self, session):
+        self.session = session
+        self.my_permid = session.get_permid()
+        self.getMySubscribedChannels()
+        self.getMostPopularUnsubscribedChannels()
+        self.ensureRecentNames()
+        if DEBUG:
+            print >> sys.stderr, "ChannelCast: My permid is",`self.my_permid`
+        
+    def _sign(self, record):
+        assert record is not None
+        # Nitin on Feb 5, 2010: Signature is generated using binary forms of permid, infohash, torrenthash fields 
+        r = (str2bin(record[0]),str2bin(record[2]),str2bin(record[3]),record[5])
+        bencoding = bencode(r)
+        signature = bin2str(sign_data(bencoding, self.session.keypair))
+        record.append(signature)
+
+    def ensureRecentNames(self):
+        sql = "select distinct publisher_id from ChannelCast"
+        publisher_ids = self._db.fetchall(sql)
+        for publisher_id in publisher_ids:
+            sql = "select publisher_name from ChannelCast where publisher_id==? order by time_stamp desc limit 1"
+            latest_publisher_name = self._db.fetchone(sql,(publisher_id[0],))
+            sql = "update ChannelCast set publisher_name==? where publisher_id==?"
+            self._db.execute_write(sql,(latest_publisher_name,publisher_id[0],))        
+
+    def addOwnTorrent(self, torrentdef): #infohash, torrentdata):
+        assert isinstance(torrentdef, TorrentDef), "TORRENTDEF has invalid type: %s" % type(torrentdef)
+        flag = False
+        publisher_id = bin2str(self.my_permid)
+        infohash = bin2str(torrentdef.get_infohash())
+        sql = "select count(*) from ChannelCast where publisher_id==? and infohash==?"
+        num_records = self._db.fetchone(sql, (publisher_id, infohash,))
+        if num_records==0:
+            torrenthash = bin2str(sha(bencode(torrentdef.get_metainfo())).digest())
+            # Arno, 2010-01-27: sqlite don't like binary encoded names
+            unickname = self.session.get_nickname()
+            utorrentname = torrentdef.get_name_as_unicode()
+            record = [publisher_id,unickname,infohash,torrenthash,utorrentname,now()]
+            self._sign(record)
+            sql = "insert into ChannelCast Values(?,?,?,?,?,?,?)"
+            self._db.execute_write(sql,(record[0], record[1], record[2], record[3], record[4], record[5], record[6]), commit=True)
+            flag = True
+        sql = "select publisher_name from ChannelCast where publisher_id==? order by time_stamp desc limit 1"
+        latest_publisher_name = self._db.fetchone(sql,(publisher_id,))
+        sql = "update ChannelCast set publisher_name==? where publisher_id==?"
+        self._db.execute_write(sql,(latest_publisher_name,publisher_id,))        
+        return flag
+        
+
+    def deleteOwnTorrent(self, infohash): ##
+        sql = 'Delete From ChannelCast where infohash=? and publisher_id=?'
+        self._db.execute_write(sql,(bin2str(infohash),bin2str(self.my_permid),))
+
+
+    def deleteTorrentsFromPublisherId(self, permid): ##
+        sql = "Delete From ChannelCast where publisher_id==?"
+        self._db.execute_write(sql,(bin2str(permid),))
+
+    
+    def updateMyChannelName(self, name): ##
+        sql = "update ChannelCast set publisher_name==? where publisher_id==?"
+        self._db.execute_write(sql,(name,bin2str(self.my_permid),))
+
+    
+    def addTorrent(self,record):
+        if __debug__:
+            assert len(record) == 7, "RECORD has invalid length: %d" % len(record)
+            publisher_id, publisher_name, infohash, torrenthash, torrentname, timestamp, signature = record
+            assert isinstance(publisher_id, str), "PUBLISHER_ID has invalid type: %s" % type(publisher_id)
+            assert isinstance(publisher_name, unicode), "PUBLISHER_NAME has invalid type: %s" % type(publisher_name)
+            assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+            assert isinstance(torrentname, unicode), "TORRENTNAME has invalid type: %s" % type(torrentname)
+            assert isinstance(timestamp, int), "TIMESTAMP has invalid type: %s" % type(timestamp)
+            assert isinstance(signature, str), "SIGNATURE has invalid type: %s" % type(signature)
+        flag = False
+        sql = "select count(*) from ChannelCast where publisher_id='" + record[0] + "' and infohash='" + record[2] + "'"
+        num_records = self._db.fetchone(sql)
+        if num_records==0:
+            sql = "insert into ChannelCast (publisher_id, publisher_name, infohash, torrenthash, torrentname, time_stamp, signature) Values(?,?,?,?,?,?,?)"
+            self._db.execute_write(sql,(record[0], record[1], record[2], record[3], record[4], record[5], record[6]), commit=True)
+            flag = True
+        sql = "select publisher_name from ChannelCast where publisher_id==? order by time_stamp desc limit 1"
+        latest_publisher_name = self._db.fetchone(sql,(record[0],))
+        sql = "update ChannelCast set publisher_name==? where publisher_id==?"
+        self._db.execute_write(sql,(latest_publisher_name,record[0],)) 
+        return flag
+        
+    def existsTorrent(self, infohash):
+        sql = "select count(*) from Torrent where infohash==? and name<>''"
+        num_records = self._db.fetchone(sql, (bin2str(infohash),))
+        if num_records > 0:
+            return True
+        return False
+    
+    def getRecentAndRandomTorrents(self,NUM_OWN_RECENT_TORRENTS=15,NUM_OWN_RANDOM_TORRENTS=10,NUM_OTHERS_RECENT_TORRENTS=15,NUM_OTHERS_RANDOM_TORRENTS=10):
+        allrecords = []
+        
+        sql = "select * from ChannelCast where publisher_id==? order by time_stamp desc limit ?"
+        myrecenttorrents = self._db.fetchall(sql,(permid_for_user(self.my_permid),NUM_OWN_RECENT_TORRENTS,))
+        allrecords.extend(myrecenttorrents)
+        
+        if myrecenttorrents is not None and len(myrecenttorrents)>=NUM_OWN_RECENT_TORRENTS:
+            t = myrecenttorrents[len(myrecenttorrents)-1][5]
+            sql = "select * from ChannelCast where publisher_id==? and time_stamp<? order by random() limit ?"
+            myrandomtorrents = self._db.fetchall(sql,(permid_for_user(self.my_permid),t,NUM_OWN_RANDOM_TORRENTS,))
+            allrecords.extend(myrandomtorrents)
+        
+        sql = "select * from ChannelCast where publisher_id in (select mod_id from VoteCast where voter_id=? and vote=2) order by time_stamp desc limit ?"
+        othersrecenttorrents = self._db.fetchall(sql,(permid_for_user(self.my_permid),NUM_OTHERS_RECENT_TORRENTS,))
+        if othersrecenttorrents is not None and len(othersrecenttorrents)>0: 
+            allrecords.extend(othersrecenttorrents)
+        
+        if othersrecenttorrents is not None and len(othersrecenttorrents)>=NUM_OTHERS_RECENT_TORRENTS:
+            t = othersrecenttorrents[len(othersrecenttorrents)-1][5]
+            sql = "select * from ChannelCast where publisher_id in (select mod_id from VoteCast where voter_id=? and vote=2) and time_stamp<? order by random() limit ?"
+            othersrandomtorrents = self._db.fetchall(sql,(permid_for_user(self.my_permid),t,NUM_OTHERS_RANDOM_TORRENTS,))
+            allrecords.extend(othersrandomtorrents)
+        
+        records = []
+        for record in allrecords:
+            records.append((str2bin(record[0]), record[1], str2bin(record[2]), str2bin(record[3]), record[4], record[5], str2bin(record[6])))
+        
+        return records
+    
+
+
+    def getTorrentsFromPublisherId(self, publisher_id): ##
+        sql = "select * from Torrent where infohash in (select infohash from ChannelCast where publisher_id==? ) and name<>'' "
+        torrent_results = self._db.fetchall(sql,(publisher_id,))
+        results=[]
+
+        # convert infohashes to binary
+        for torrent in torrent_results: 
+            t_name = torrent[2]
+
+            # check if torrent name contains only white spaces
+            index=0
+            while index <= len(t_name) - 1 and t_name[index] == ' ':
+                index += 1
+            if index == len(t_name):
+                continue
+
+            t_list = list(torrent)
+            infohash = str2bin(t_list[1])
+            t_list[1] = infohash
+            t_tuple = tuple(t_list)
+            results.append(t_tuple)
+        return results
+
+    def searchChannels(self,query):
+        # query would be of the form: "k barack obama" or "p 4fw342d23re2we2w3e23d2334d" permid
+        value_name = deepcopy(self.value_name) ##
+        if query[0] == 'k': 
+            # search torrents based on keywords
+
+            kwlist = split_into_keywords(query[2:])
+            sql = "select publisher_id, publisher_name from ChannelCast where "
+            count = 0
+            for kw in kwlist:
+                count += 1
+                if kw is None or len(kw)==0:
+                    continue
+                sql += " publisher_name like '%" + kw + "%' "
+                if count<len(kwlist):
+                    sql += " and "
+
+            
+            channellist = self._db.fetchall(sql)
+            channels = {}
+            allrecords = []
+            for channel in channellist:
+                if channel[0] in channels:
+                    continue
+                channels[channel[0]] = channel[1]
+                #print >>sys.stderr, "channel:", repr(channel)
+                # now, retrieve the last 20 of each of these channels' torrents                             
+                s = "select * from ChannelCast where publisher_id==? order by time_stamp desc limit 20"
+                record = self._db.fetchall(s,(channel[0],))
+                if record is not None and len(record)>0:
+                    allrecords.extend(record)
+
+            records = []
+            for record in allrecords:
+                records.append((str2bin(record[0]), record[1], str2bin(record[2]), str2bin(record[3]), record[4], record[5], str2bin(record[6])))
+            return records         
+        elif query[0] == 'p': 
+            # search channel's torrents based on permid
+            q = query[2:]
+            #print>>sys.stderr, "ChannelCastDB: searchChannels: This is a permid-based search:", `q`            
+            s = "select * from ChannelCast where publisher_id==? order by time_stamp desc limit 20"
+            allrecords = self._db.fetchall(s,(q,)) ## before records = {'torrents':self._db.fetchall(s)}
+            #channelList = self.valuelist2channellist(records,value_name)  
+            records = []
+            for record in allrecords:
+                records.append((str2bin(record[0]), record[1], str2bin(record[2]), str2bin(record[3]), record[4], record[5], str2bin(record[6])))
+            
+            return records #channelList # 
+        else:
+            # Query is invalid: hence, it should not even come here
+            return None
+        
+    def getTorrents(self, publisher_id):
+        sql = "select * from Torrent where infohash in (select infohash from ChannelCast where publisher_id==?)"
+        return self._db.fetchall(sql,(publisher_id,))
+    
+    def getInfohashesForChannel(self, publisher_id):
+        sql = "select infohash from ChannelCast where publisher_id==? ;"
+        return self._db.fetchall(sql,(publisher_id,))
+    
+    def isItemInChannel(self,publisher_id,infohash):
+        sql = "select count(*) from ChannelCast where publisher_id=? and infohash=? ;"
+        
+        isAvailable = self._db.fetchone(sql,(publisher_id,infohash))
+        if isAvailable:
+            return True
+        else:
+            return False
+        
+    def valuelist2channellist(self,res_list,value_name): ##
+        
+        channel_list = []
+        for item in res_list:
+            channel = dict(zip(value_name, item))
+                        
+            channel['infohash'] = str2bin(channel['infohash'])
+            channel['torrenthash'] = str2bin(channel['torrenthash'])
+               
+            channel_list.append(channel)
+        return  channel_list        
+        
+    def getMostPopularChannels(self):
+        """return a list of tuples: [(permid,channel_name,#subscriptions)]"""
+        records = []
+        votecastdb = VoteCastDBHandler.getInstance()
+        # Inner query: First, identify the publishers you are subscribed to
+        # Outer query: Get all publishers that are not in your publishers' list, along with the number of subscriptions
+        ## sql = "select mod_id, count(*) from VoteCast where mod_id not in (select mod_id from VoteCast where voter_id='"+ bin2str(self.my_permid)+"' and vote=2) and mod_id<>'"+bin2str(self.my_permid)+"' group by mod_id order by 2 desc"
+        sql = "select mod_id, count(*) from VoteCast where mod_id<>? group by mod_id order by 2 desc" ## Richard : for now popular channels can contain channels i am subscribed to
+        votes = self._db.fetchall(sql,(bin2str(self.my_permid),))
+        for vote in votes:
+            sql = "select publisher_name, time_stamp from ChannelCast where publisher_id==? order by 2 desc" 
+            record = self._db.fetchone(sql, (vote[0],))
+            if not record is None:
+                mod_name = record[0]
+                records.append((vote[0],mod_name,vote[1], {}))
+        return records
+
+
+
+    def getMostPopularUnsubscribedChannels(self,from_channelcast=False): ##
+        """return a list of tuples: [(permid,channel_name,#votes)]"""
+        
+        #if not self.firstQueryPopularChannels and not from_channelcast:
+        #    self.firstQueryPopularChannels=True
+        #    return self.allRecordsPopularChannels
+
+        votecastdb = VoteCastDBHandler.getInstance()
+        allrecords = []
+
+
+        sql = "select distinct publisher_id, publisher_name from ChannelCast"
+        channel_records = self._db.fetchall(sql)
+
+#        sql = "select mod_id, (2*sum(vote)-count(*))/3 from VoteCast group by mod_id order by 2 desc"
+        sql = "select mod_id, sum(vote-1) from VoteCast group by mod_id order by 2 desc" # only subscriptions, not spam votes
+
+        votecast_records = self._db.fetchall(sql)
+
+        sql = "select distinct mod_id from VoteCast where voter_id==? and vote=2"
+        subscribed_channels = self._db.fetchall(sql,(bin2str(self.my_permid),))
+        
+        subscribers = {}
+        for record in subscribed_channels:
+            subscribers[record[0]]="12"
+
+        publishers = {}
+        for publisher_id, publisher_name in channel_records:
+            if publisher_id not in publishers and publisher_id!=bin2str(self.my_permid):
+                publishers[publisher_id]=[publisher_name, 0]
+
+        for mod_id, vote in votecast_records:
+            if vote < -5: # it is considered SPAM
+                if mod_id in publishers:
+                    del publishers[mod_id]
+                continue
+            if mod_id in publishers: 
+                if mod_id not in subscribers:
+                    publishers[mod_id][1] = vote
+                else:
+                    del publishers[mod_id]
+        for k, v in publishers.items():
+            if votecastdb.getVote(k, bin2str(self.my_permid)) != -1:
+                allrecords.append((k, v[0], v[1], {}))
+        def compare(a,b):
+            if a[2]>b[2] : return -1
+            if a[2]<b[2] : return 1
+            return 0
+        allrecords.sort(compare)
+        #print >> sys.stderr, "getMostPopularUnsubscribedChannels: execution times %.3f, %.3f, %.3f" %(t2-t1, t3-t2, time()-t3)
+        
+        
+        #if not from_channelcast:
+        #    if self.allRecordsPopularChannels is None:
+        #        self.firstQueryPopularChannels=False
+        #    self.allRecordsPopularChannels=allrecords
+        return allrecords
+    
+
+    def getMyChannel(self):
+        mychannel = []
+        votecastdb = VoteCastDBHandler.getInstance()
+        sql = "select publisher_id, publisher_name from ChannelCast where publisher_id==? group by publisher_id"
+        res = self._db.fetchall(sql,(bin2str(self.my_permid),)) 
+        if res is not None:
+            # mychannel.append((self.my_permid,"MyChannel" , votecastdb.getNumSubscriptions(bin2str(self.my_permid)) - votecastdb.getNegVotes(bin2str(self.my_permid)), {}))
+            # for now only count subscriptions, not negative votes
+            mychannel.append((self.my_permid,"MyChannel" , votecastdb.getNumSubscriptions(bin2str(self.my_permid)),{}))
+
+
+        else:
+            mychannel.append((self.my_permid,"MyChannel" , 0, {}))
+        return mychannel
+
+
+
+    def getSubscribersCount(self,permid):
+        """returns the number of subscribers in integer format"""
+        sql = "select count(*) from VoteCast where mod_id==? and vote=2"
+        numrecords = self._db.fetchone(sql, (permid,))
+        return numrecords
+
+    def getMyNumberSubscriptions(self): ##
+        """returns the number of subscribers in integer format"""
+        sql = "select count(*) from VoteCast where voter_id==? and vote=2"
+        numrecords = self._db.fetchone(sql, (bin2str(self.my_permid),))
+        return numrecords
+    
+
+    def getOtherChannels(self): ##
+        """Returns all the channels different from my channel
+           Returns a list of tuples: [(permid,channel_name,#votes)]
+        """
+        records = []
+        votecastdb = VoteCastDBHandler.getInstance()
+        sql = "select distinct publisher_id, publisher_name from ChannelCast" 
+        channels = self._db.fetchall(sql)
+        for channel in channels:
+            if channel[0] != bin2str(self.my_permid):
+                num_votes = self.getSubscribersCount(channel[0])
+                records.append((channel[0], channel[1], num_votes, {}))
+        if DEBUG: print >> sys.stderr , "records" , records
+        return records
+
+
+    
+    def getMySubscribedChannels(self, from_channelcast=False):
+        """return a list of tuples: [(permid,channel_name,#votes)]"""
+#        records = []
+#        votecastdb = VoteCastDBHandler.getInstance()
+        #sql = "select mod_id, count(*) from VoteCast where mod_id in (select mod_id from VoteCast where voter_id='"+ bin2str(self.my_permid)+"' and vote=2) and mod_id<>'"+bin2str(self.my_permid)+"' group by mod_id order by 2 desc"
+
+#        t1 = time()
+#        sql = "select mod_id, count(*) from VoteCast where mod_id <>'"+bin2str(self.my_permid)+"'" + " and vote=2 and voter_id='" + bin2str(self.my_permid) + "'" + " group by mod_id order by 2 desc"
+#        votes = self._db.fetchall(sql)
+#        for vote in votes:
+#            sql = "select publisher_name, time_stamp from ChannelCast where publisher_id='"+vote[0]+"' order by 2 desc" 
+#            record = self._db.fetchone(sql)
+#            mod_name = record[0]
+#            records.append((vote[0],mod_name,vote[1]))
+#        t2 = time()
+#        print >> sys.stderr , "subscribed" , t2 - t1
+
+#        return records
+
+        if DEBUG and from_channelcast:
+            print >> sys.stderr , "FROM CHANNELCAST"
+
+        if not self.firstQueryMySubscriptions and not from_channelcast:
+            self.firstQueryMySubscriptions=True
+            return self.allRecordsMySubscriptions
+
+
+
+        if DEBUG:
+            print >> sys.stderr , "getMySubscribedChannels"
+        allrecords = []
+
+        sql = "select distinct publisher_id, publisher_name from ChannelCast"
+        channel_records = self._db.fetchall(sql)
+
+#        sql = "select mod_id, (2*sum(vote)-count(*))/3 from VoteCast group by mod_id order by 2 desc"
+        sql = "select mod_id, sum(vote-1) from VoteCast group by mod_id order by 2 desc" # only subscriptions, not spam votes
+        votecast_records = self._db.fetchall(sql)
+
+        sql = "select distinct mod_id from VoteCast where voter_id==? and vote=2"
+        subscribed_channels = self._db.fetchall(sql,(bin2str(self.my_permid),))
+
+
+        
+        subscribers = {}
+        for record in subscribed_channels:
+            subscribers[record[0]]="12"
+
+        publishers = {}
+        for publisher_id, publisher_name in channel_records:
+            if publisher_id not in publishers and publisher_id in subscribers and publisher_id!=bin2str(self.my_permid):
+                publishers[publisher_id]=[publisher_name, 0]
+
+        for mod_id, vote in votecast_records:
+            if mod_id in publishers: 
+                publishers[mod_id][1] = vote
+
+        for k, v in publishers.items():
+            allrecords.append((k, v[0], v[1], {}))
+        def compare(a,b):
+            if a[2]>b[2] : return -1
+            if a[2]<b[2] : return 1
+            return 0
+        allrecords.sort(compare)
+
+
+        if not from_channelcast:
+            if self.allRecordsMySubscriptions is None:            
+                self.firstQueryMySubscriptions=False
+            self.allRecordsMySubscriptions=allrecords
+
+        return allrecords
+
+    def getMostPopularChannelFromTorrent(self, infohash): ##
+        """Returns name of most popular channel if any"""
+        vcdb = VoteCastDBHandler.getInstance()
+        sql = "select * from ChannelCast where infohash==?" 
+        publishers = self._db.fetchall(sql,(bin2str(infohash),))
+        if len(publishers) == 0:
+            return None
+        else:
+            maxvote = -1
+            for publisher_item in publishers:
+                num_subscribers = vcdb.getEffectiveVote(publisher_item[0])
+                if num_subscribers > maxvote:
+                    publisher_id = publisher_item[0]
+                    publisher_name = publisher_item[1]
+                    maxvote = num_subscribers
+            channel = (publisher_id, publisher_name, maxvote, {})
+            return channel
+
+    
+            
+class GUIDBHandler:
+    """ All the functions of this class are only (or mostly) used by GUI.
+        It is not associated with any db table, but will use any of them
+    """
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if GUIDBHandler.__single is None:
+            GUIDBHandler.lock.acquire()   
+            try:
+                if GUIDBHandler.__single is None:
+                    GUIDBHandler(*args, **kw)
+            finally:
+                GUIDBHandler.lock.release()
+        return GUIDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if GUIDBHandler.__single is not None:
+            raise RuntimeError, "GUIDBHandler is singleton"
+        self._db = SQLiteCacheDB.getInstance()
+        self.notifier = Notifier.getInstance()
+        GUIDBHandler.__single = self
+        
+    def getCommonFiles(self, permid):
+        peer_id = self._db.getPeerID(permid)
+        if peer_id is None:
+            return []
+        
+        sql_get_common_files = """select name from CollectedTorrent where torrent_id in (
+                                    select torrent_id from Preference where peer_id=?
+                                      and torrent_id in (select torrent_id from MyPreference)
+                                    ) and status_id <> 2
+                               """ + self.get_family_filter_sql()
+        res = self._db.fetchall(sql_get_common_files, (peer_id,))
+        return [t[0] for t in res]
+        
+    def getOtherFiles(self, permid):
+        peer_id = self._db.getPeerID(permid)
+        if peer_id is None:
+            return []
+        
+        sql_get_other_files = """select infohash,name from CollectedTorrent where torrent_id in (
+                                    select torrent_id from Preference where peer_id=?
+                                      and torrent_id not in (select torrent_id from MyPreference)
+                                    ) and status_id <> 2
+                              """ + self.get_family_filter_sql()
+        res = self._db.fetchall(sql_get_other_files, (peer_id,))
+        return [(str2bin(t[0]),t[1]) for t in res]
+    
+    def getSimItems(self, infohash, limit):
+        # recommendation based on collaborative filtering
+        torrent_id = self._db.getTorrentID(infohash)
+        if torrent_id is None:
+            return []
+        
+        sql_get_sim_files = """
+            select infohash, name, status_id, count(P2.torrent_id) c 
+             from Preference as P1, Preference as P2, CollectedTorrent as T
+             where P1.peer_id=P2.peer_id and T.torrent_id=P2.torrent_id 
+             and P2.torrent_id <> P1.torrent_id
+             and P1.torrent_id=?
+             and P2.torrent_id not in (select torrent_id from MyPreference)
+             %s
+             group by P2.torrent_id
+             order by c desc
+             limit ?    
+        """ % self.get_family_filter_sql('T')
+         
+        res = self._db.fetchall(sql_get_sim_files, (torrent_id,limit))
+        return [(str2bin(t[0]),t[1], t[2], t[3]) for t in res]
+        
+    def getSimilarTitles(self, name, limit, infohash, prefix_len=5):
+        # recommendation based on similar titles
+        name = name.replace("'","`")
+        sql_get_sim_files = """
+            select infohash, name, status_id from Torrent 
+            where name like '%s%%'
+             and infohash <> '%s'
+             and torrent_id not in (select torrent_id from MyPreference)
+             %s
+            order by name
+             limit ?    
+        """ % (name[:prefix_len], bin2str(infohash), self.get_family_filter_sql())
+        
+        res = self._db.fetchall(sql_get_sim_files, (limit,))
+        return [(str2bin(t[0]),t[1], t[2]) for t in res]
+
+    def _how_many_prefix(self):
+        """ test how long the prefix is enough to find similar titles """
+        # Jie: I found 5 is the best value.
+        
+        sql = "select name from Torrent where name is not NULL order by name"
+        names = self._db.fetchall(sql)
+        
+        for top in range(3, 10):
+            sta = {}
+            for line in names:
+                prefix = line[0][:top]
+                if prefix not in sta:
+                    sta[prefix] = 1
+                else:
+                    sta[prefix] += 1
+            
+            res = [(v,k) for k,v in sta.items()]
+            res.sort()
+            res.reverse()
+        
+            print >> sys.stderr, '------------', top, '-------------'
+            for k in res[:10]:
+                print >> sys.stderr, k
+         
+    def get_family_filter_sql(self, table_name=''):
+        torrent_db_handler = TorrentDBHandler.getInstance()
+        return torrent_db_handler.category.get_family_filter_sql(torrent_db_handler._getCategoryID, table_name=table_name)
+
+
+class PopularityDBHandler(BasicDBHandler):
+    '''
+    @author: Rahim    04-2009
+    This class handles access to Popularity tables that is used for 
+    keeping swarm size info, received through BuddyCast messages.
+    '''
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if PopularityDBHandler.__single is None:
+            PopularityDBHandler.lock.acquire()   
+            try:
+                if PopularityDBHandler.__single is None:
+                    PopularityDBHandler(*args, **kw)
+            finally:
+                PopularityDBHandler.lock.release()
+        return PopularityDBHandler.__single
+    getInstance = staticmethod(getInstance)
+
+    def __init__(self):
+        if PopularityDBHandler.__single is not None:
+            raise RuntimeError, "PopularityDBHandler is singleton"
+        PopularityDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()        
+        BasicDBHandler.__init__(self,db, 'Popularity')
+        
+        # define local handlers to access Peer and Torrent tables.
+        self.peer_db = PeerDBHandler.getInstance()
+        self.torrent_db = TorrentDBHandler.getInstance()
+        
+    ###--------------------------------------------------------------------------------------------------------------------------
+        
+    def calculateSwarmSize(self, torrentList, content, toBC=True):
+        """
+        This method gets a list of torrent_ids and then calculat the size of the swarm for those torrents.
+        @author: Rahim
+        @param torrentList: A list of torrent_id.
+        @param content: If it be 'infohash' , the content of the torrentList is infohsh of torrents. If it be 'torrentIds', the content is a list 
+        of torrent_id.
+        @param toBc: This flag determines that whether the returned result will be used to create a new BC message or not. The difference is that nodes 
+        just send first hand information to each other. The prevent speard of contamination if one of the nodes receive incorrent value from sender. 
+        The main difference in the flow of the process is that, if toBC be set to False, this fucntion will look for the most recenct report inside 
+        both Popularity and Torrent table, otherwise it will just use torrent table.
+        @return: returns a list with the same size az input and each items is composed of below items:
+                  (torrent_id, num_seeders, num_leechers, num_of_sources)
+        """
+        if content=='Infohash':
+            torrentList = [self.torrent_db.getTorrentID(infohash) for infohash in torrentList ]
+        elif content=='TorrentIds':
+            pass
+        else:
+            return []
+        
+        trackerSizeList =[]
+        popularityList=[]
+        for torrentId in torrentList:
+            trackerSizeList.append(self.torrent_db.getSwarmInfo(torrentId))
+            if not toBC:
+                popularityList.append(self.getPopularityList(torrent_id=torrentId))
+        result =[]
+        timeNow=int(time())
+        
+        averagePeerUpTime = 2 * 60 * 60  # we suppose that the average uptime is roughly two hours.
+        listIndex = 0
+        for id in torrentList:
+            result.append([id, -1, -1, -1, -1])  # (torrent_id, calc_age, num_seeders, num_leechers, num_sources_seen)
+            if not toBC and len(popularityList[listIndex]) > 0 :
+                #if popularityList[listIndex][0] is not None:
+                latest = self.getLatestPopularityReport(popularityList[listIndex], timeNow)
+                result[listIndex][1] = latest[4]  # num_seeders
+                result[listIndex][2] = latest[5]  # num_leechers
+                result[listIndex][3] = timeNow - latest[2]+latest[3]  # age of the report
+                result[listIndex][4] = latest[6]   # num_sources
+                    # print latest
+                if len(trackerSizeList[listIndex]) > 0 and len(trackerSizeList[listIndex][0]) > 0:
+                    #if trackerSizeList[listIndex][0] is not None:
+                    temp=trackerSizeList[listIndex][0]
+                    tempAge = timeNow - temp[3]
+                    if tempAge < result[listIndex][3]:
+                        result[listIndex][1] = temp[1] #num_seeders
+                        result[listIndex][2] = temp[2] #num_leechers
+                        result[listIndex][3] = tempAge # Age of the tracker asking 
+                        othersSeenSources = self.torrent_db.getLargestSourcesSeen(id, timeNow, averagePeerUpTime)
+                        result[listIndex][4] = max(temp[4], othersSeenSources) # num_sources
+
+            elif len(trackerSizeList[listIndex]) > 0 and len(trackerSizeList[listIndex][0]) > 0:
+                #if trackerSizeList[listIndex][0] is not None:
+               temp=trackerSizeList[listIndex][0]
+               result[listIndex][1] = temp[1] #num seeders
+               result[listIndex][2] = temp[2] #num leechers
+               result[listIndex][3] = timeNow - temp[3] # age of check
+               result[listIndex][4] = temp[4] # num_sources
+            listIndex +=1
+                    
+        return result
+    
+    def getLatestPopularityReport(self, reportList, timeNow):
+       
+        """
+        gets a list of list and then returns on of the them that has highest value in the specified index.
+        @author: Rahim    
+        @param reportList: A list that contains popularity report for specified torrent. The first item contains torrent_id.
+        @param index: The index of item that comparision is done based on it.
+        @param timeNow: Indictes local time of the node that runs this process.
+       
+        """
+        if len(reportList) ==0:
+            return []
+       
+        result=reportList.pop(0)
+        listLength = len(reportList)
+        
+        for i in range(0,listLength):
+            if (timeNow - reportList[i][2] + reportList[i][3])  < (timeNow - result[2] + result[3]): #it selects the youngest report
+                result = reportList[i]
+       
+        return result
+       
+        
+    ###--------------------------------------------------------------------------------------------------------------------------         
+    def checkPeerValidity(self, peer_id):
+        '''
+        checks whether the peer_id is valid or not, in other word it is in the Peer table or not?
+        @param peer_id: the id of the peer to be checked.
+        @return: True if the peer_is is valid, False if not.
+        '''
+        if self.peer_db.getPermid(peer_id) is None:
+            return False
+        else: 
+            return True  
+    ###--------------------------------------------------------------------------------------------------------------------------            
+    def checkTorrentValidity(self, torrent_id):
+        '''
+        checks whether the torrent_id is valid or not, in other word it is in the Torrent table or not?
+        @param torrent_id: the id of the torrent to be checked.
+        @return: True if the torrent_is is valid, False if not.
+        '''        
+        if self.torrent_db.getInfohash(torrent_id) is None:
+            return False
+        else:
+            return True
+    ###--------------------------------------------------------------------------------------------------------------------------        
+    def addPopularity(self, torrent_id, peer_id, recv_time, calc_age=sys.maxint, num_seeders=-1, num_leechers=-1, num_sources=-1, validatePeerId=False, validateTorrentId=False,
+                       checkNumRecConstraint=True, commit=True):
+        '''
+        Addes a new popularity record to the popularity table.
+        @param torrent_id: The id of the torrent that is added to the table.
+        @param peer_id: The id of the peer that is added to the table.
+        @param recv_time: The time that peer has received the message.
+        @param num_seeders: Number of seeders reportd by the remote peer.
+        @param num_leechers: Number of leechers reported by the remote peer.
+        @param num_sources: Number of the Tribler sources that have seen this torrent, reported by the remote peer.
+        @param calc_age: The time that the remote peer has calculated( or message send time) the swarm size.
+        @param validateTorrent: If set to True check validity of the Torrent otherwise no.
+        @param validatePeer: If set to True check validity of the Peer otherwise no.
+        '''
+        if validatePeerId: # checks whether the peer is valid or not
+            if not self.checkPeerValidity(peer_id):
+                return None
+        if validateTorrentId: #checks whether the torrent is valid or not
+            if not self.checkTorrentValidity(torrent_id):
+                return None
+               
+        sql_delete_already_existing_record = u"""DELETE FROM Popularity WHERE torrent_id=? AND peer_id=? AND msg_receive_time=?"""
+        self._db.execute_write(sql_delete_already_existing_record, (torrent_id, peer_id, recv_time), commit=commit)
+
+        
+        sql_insert_new_populairty = u"""INSERT INTO Popularity (torrent_id, peer_id, msg_receive_time, size_calc_age, num_seeders,
+                                        num_leechers, num_of_sources) VALUES (?,?,?,?,?,?,?)"""
+        try:
+            self._db.execute_write(sql_insert_new_populairty, (torrent_id, peer_id, recv_time, calc_age, num_seeders, num_leechers, num_sources), commit=commit)
+        except Exception, msg:    
+            print_exc() 
+        
+        timeNow = int(time())
+        if checkNumRecConstraint: # Removes old records. The number of records should not exceed defined limitations.
+            availableRecsT = self.countTorrentPopularityRec(torrent_id, timeNow)
+            if availableRecsT[0] > MAX_POPULARITY_REC_PER_TORRENT:
+                self.deleteOldTorrentRecords(torrent_id, availableRecsT[0] - MAX_POPULARITY_REC_PER_TORRENT, timeNow, commit=commit)
+    
+    
+            availableRecsTP = self.countTorrentPeerPopularityRec(torrent_id, peer_id, timeNow)
+            if availableRecsTP[0] > MAX_POPULARITY_REC_PER_TORRENT_PEER:
+                self.deleteOldTorrentPeerRecords(torrent_id,peer_id, availableRecsTP[0] - MAX_POPULARITY_REC_PER_TORRENT_PEER, timeNow, commit=commit)
+    
+    ###--------------------------------------------------------------------------------------------------------------------------            
+    def storePeerPopularity(self, peer_id, popularityList, validatePeerId=False, validateTorrentId=False, commit=True):
+        '''
+        Insert all popularity info received through BuddyCast message. popularityList is a tuple of 
+        @param peer_id: The id of the popularity info sender.
+        @param popularityList: A list of tuple (torrent_id, recv_time, calc_age, num_seeders, num_leechers, num_sources), usually received through BuddyCast message.
+        '''
+        if validatePeerId:
+           if not self.checkPeerValidity(peer_id):
+               return None
+        
+        for item in popularityList[:-1]:
+           self.addPopularity(item[0], peer_id, item[1], item[2], item[3], item[4], item[5], validateTorrentId=validateTorrentId, commit=False)
+        
+        if len(popularityList)>0:
+            item = popularityList[-1]
+            self.addPopularity(item[0], peer_id, item[1], item[2], item[3], item[4], item[5], validateTorrentId=validateTorrentId, commit=commit)
+    ###--------------------------------------------------------------------------------------------------------------------------        
+    def countTorrentPopularityRec(self, torrent_id, timeNow):
+        '''
+        This method counts the number of logged popularity for the input torrrent.
+        @param torrent_id: the id of the torrent
+        @return: (number_of_records, oldest_record_time)
+        '''     
+        
+        count_sql = "SELECT count(*) FROM Popularity WHERE torrent_id=?" 
+        num_of_popularity = self._db.fetchone(count_sql,(torrent_id, ))
+                    
+        if num_of_popularity > 0:
+            sql_oldest_record = "SELECT size_calc_age FROM Popularity WHERE torrent_id=? ORDER BY ( ? - msg_receive_time+size_calc_age) DESC LIMIT ?"
+            oldest_record_age = self._db.fetchone(sql_oldest_record, (torrent_id, timeNow, 1))
+            return (num_of_popularity, oldest_record_age)
+        else:
+            if DEBUG:
+                print >> sys.stderr, "The torrent with the id ", torrent_id, " does not have any popularity record."
+            return (0 , sys.maxint) 
+    ###--------------------------------------------------------------------------------------------------------------------------        
+    def countTorrentPeerPopularityRec(self, torrent_id, peer_id, timeNow):
+        '''
+        counts the number of popularity records done for the input torrent_id by the input peer_id.
+        @param torrent_id: the id of the torrent.
+        @param peer_id: the id of the peer.
+        @return: (number_of_records, oldest_record_time) with same torrent_id and peer_id as input.
+        '''
+        count_sql = "SELECT count(*) FROM Popularity WHERE torrent_id=? AND peer_id=?" 
+        num_of_popularity = self._db.fetchone(count_sql,(torrent_id, peer_id))
+        
+        if num_of_popularity > 0:
+            sql_oldest_record = "SELECT size_calc_age FROM Popularity WHERE torrent_id=? AND peer_id=? ORDER BY ( ? - msg_receive_time+size_calc_age) DESC LIMIT ?"
+            oldest_record_age = self._db.fetchone(sql_oldest_record, (torrent_id, peer_id, timeNow, 1))
+            return (num_of_popularity, oldest_record_age)
+        else:
+            if DEBUG:
+                print >> sys.stderr, "The peer with the id ", peer_id, "has not reported any thing about the torrent: ", torrent_id
+            return (0 , sys.maxint) 
+    ###--------------------------------------------------------------------------------------------------------------------------     
+    def deleteOldTorrentRecords(self, torrent_id, num_rec_to_delete, timeNow, commit=True):
+         '''
+         Deletes the oldest num_rec_to_del popularity records about the torrect_id from popularity table.
+         @param torrent_id: the id of the torrent.
+         @param num_rec_to_delete: Number of the oldest records that should be removed from the table.
+         '''
+         
+         sql_delete = u""" DELETE FROM Popularity WHERE torrent_id=? AND size_calc_age IN 
+                           (SELECT size_calc_age FROM Popularity WHERE torrent_id=? 
+                           ORDER BY (? - msg_receive_time+size_calc_age) DESC LIMIT ?)"""
+         
+         self._db.execute_write(sql_delete, (torrent_id, torrent_id, timeNow, num_rec_to_delete), commit=commit)
+
+    ###--------------------------------------------------------------------------------------------------------------------------
+    def deleteOldTorrentPeerRecords(self, torrent_id, peer_id, num_rec_to_delete, timeNow, commit=True):
+         '''
+         Deletes the oldest num_rec_to_del popularity records about the torrect_id repported by peer_id from popularity table.
+         @param torrent_id: the id of the torrent.
+         @param peer_id: the id of the popularity sender.
+         @param num_rec_to_delete: Number of the oldest records that should be removed from the table.
+         '''
+         
+         sql_delete = u""" DELETE FROM Popularity where torrent_id=? AND peer_id=? AND size_calc_age IN 
+                           (SELECT size_calc_age FROM popularity WHERE torrent_id=? AND peer_id=?
+                           ORDER BY (? - msg_receive_time+size_calc_age) DESC LIMIT ?)"""
+         
+         self._db.execute_write(sql_delete, (torrent_id, peer_id,torrent_id, peer_id,timeNow, num_rec_to_delete), commit=commit)
+         
+    ###--------------------------------------------------------------------------------------------------------------------------
+    def getPopularityList(self, torrent_id=None, peer_id=None , recv_time_lbound=0, recv_time_ubound=sys.maxint):
+         '''
+         Returns a list of the records from the Popularity table, by using input parameters.
+         @param torremt_id: The id of the torrent.
+         @param peer_id: The id of the peer.
+         @param recv_time_lbound: Lower bound for the message receive time. Default value is 0.
+         @param recv_time_ubound: Upper bound for the message receive time. Default value is 0x10000000L
+         @return: A list of tuple (torrent_id, recv_time, calc_age, num_seeders, num_leechers, num_sources)
+         '''
+         sql_getPopList=" SELECT * FROM Popularity"
+         
+         if (torrent_id is not None) or (peer_id is not None) or (not recv_time_lbound==0) or (not recv_time_ubound==sys.maxint):
+             sql_getPopList += " WHERE "
+         
+         if torrent_id is not None:
+             sql_getPopList += "torrent_id = %s" % torrent_id 
+             if (peer_id is not None) or (not recv_time_lbound==0) or (not recv_time_ubound==sys.maxint):
+                 sql_getPopList += " AND "
+         
+         if peer_id is not None:
+             sql_getPopList += "peer_id = %d" % peer_id 
+             if (not recv_time_lbound==0) or (not recv_time_ubound==sys.maxint):
+                 sql_getPopList += " AND "
+             
+         if not recv_time_lbound==0:
+             sql_getPopList += "msg_receive_time >= %d" % recv_time_lbound
+             if not recv_time_ubound==sys.maxint: 
+                 sql_getPopList += " AND " 
+         
+         if not recv_time_ubound==sys.maxint:
+             sql_getPopList += "msg_receive_time <= %d" % recv_time_ubound 
+          
+         print sql_getPopList 
+         popularityList = self._db.fetchall(sql_getPopList)
+         
+         return popularityList 
+     
+    ###----------------------------------------------------------------------------------------------------------------------
+    
+    def addPopularityRecord(self, peer_permid, pops, selversion, recvTime, is_torrent_id=False, commit=True):
+        """
+        """       
+       
+        peer_id = self._db.getPeerID(peer_permid)
+        if peer_id is None:
+            print >> sys.stderr, 'PopularityDBHandler: update received popularity list from a peer that is not existed in Peer table', `peer_permid`
+            return
+
+        pops = [type(pop) is str and {"infohash":pop} or pop
+                 for pop
+                 in pops]
+        
+        if __debug__:
+            for pop in pops:
+                assert isinstance(pop["infohash"], str), "INFOHASH has invalid type: %s" % type(pop["infohash"])
+                assert len(pop["infohash"]) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(pop["infohash"])
+        
+        if is_torrent_id:
+            #Rahim : Since overlay version 11 swarm size information is 
+            # appended and should be added to the database . The codes below 
+            # does this. torrent_id, recv_time, calc_age, num_seeders, 
+            # num_leechers, num_sources
+            #
+            torrent_id_swarm_size =[]
+            for pop in pops:
+                if pop is not None:
+                    tempAge = pop.get('calc_age')
+                    tempSeeders = pop.get('num_seeders')
+                    tempLeechers = pop.get('num_leechers')
+                    if tempAge > 0 and tempSeeders >= 0 and tempLeechers >= 0:
+                        torrent_id_swarm_size.append( [pop['torrent_id'],
+                                     recvTime, 
+                                     tempAge,  
+                                     tempSeeders,
+                                     tempLeechers,
+                                     pop.get('num_sources_seen', -1)]# -1 means invalud value 
+                                     )
+        else:
+            torrent_id_swarm_size = []
+            for pop in pops:
+                if type(pop)==dict:
+                    infohash = pop["infohash"]
+                else:
+                    # Nicolas: from wherever this might come, we even handle 
+                    # old list of infohashes style
+                    infohash = pop 
+                torrent_id = self._db.getTorrentID(infohash)
+                if not torrent_id:
+                    self._db.insertInfohash(infohash)
+                    torrent_id = self._db.getTorrentID(infohash)
+                #Rahim: Amended for handling and adding swarm size info.
+                #torrent_id_swarm_size.append((torrent_id, timeNow,0, -1, -1, -1))
+        if len(torrent_id_swarm_size) > 0:
+            try:
+                #popularity_db = PopularityDBHandler.getInstance()
+                #popularity_db.storePeerPopularity(peer_id, torrent_id_swarm_size, commit=commit)
+                self.storePeerPopularity(peer_id, torrent_id_swarm_size, commit=commit)
+            except Exception, msg:    
+                print_exc()
+                print >> sys.stderr, 'dbhandler: updatePopularity:', Exception, msg 
+
+class TermDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if TermDBHandler.__single is None:
+            TermDBHandler.lock.acquire()   
+            try:
+                if TermDBHandler.__single is None:
+                    TermDBHandler(*args, **kw)
+            finally:
+                TermDBHandler.lock.release()
+        return TermDBHandler.__single
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if TermDBHandler.__single is not None:
+            raise RuntimeError, "TermDBHandler is singleton"
+        TermDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()        
+        BasicDBHandler.__init__(self,db, 'ClicklogTerm') 
+        
+        
+    def getNumTerms(self):
+        """returns number of terms stored"""
+        return self.getOne("count(*)")
+    
+    
+    def bulkInsertTerms(self, terms, commit=True):
+        for term in terms:
+            term_id = self.getTermIDNoInsert(term)
+            if not term_id:
+                self.insertTerm(term, commit=False) # this HAS to commit, otherwise last_insert_row_id() won't work. 
+            # if you want to avoid committing too often, use bulkInsertTerm
+        if commit:         
+            self.commit()
+            
+    def getTermIDNoInsert(self, term):
+        return self.getOne('term_id', term=term[:MAX_KEYWORD_LENGTH].lower())
+            
+    def getTermID(self, term):
+        """returns the ID of term in table ClicklogTerm; creates a new entry if necessary"""
+        term_id = self.getTermIDNoInsert(term)
+        if term_id:
+            return term_id
+        else:
+            self.insertTerm(term, commit=True) # this HAS to commit, otherwise last_insert_row_id() won't work. 
+            return self.getOne("last_insert_rowid()")
+    
+    def insertTerm(self, term, commit=True):
+        """creates a new entry for term in table Term"""
+        self._db.insert(self.table_name, commit=commit, term=term[:MAX_KEYWORD_LENGTH])
+    
+    def getTerm(self, term_id):
+        """returns the term for a given term_id"""
+        return self.getOne("term", term_id=term_id)
+        # if term_id==-1:
+        #     return ""
+        # term = self.getOne('term', term_id=term_id)
+        # try:
+        #     return str2bin(term)
+        # except:
+        #     return term
+    
+    def getTermsStartingWith(self, beginning, num=10):
+        """returns num most frequently encountered terms starting with beginning"""
+        
+        # request twice the amount of hits because we need to apply
+        # the familiy filter...
+        terms = self.getAll('term', 
+                            term=("like", u"%s%%" % beginning),
+                            order_by="times_seen DESC",
+                            limit=num * 2)
+
+        if terms:
+            # terms is a list containing lists. We only want the first
+            # item of the inner lists.
+            terms = [term for (term,) in terms]
+
+            catobj = Category.getInstance()
+            if catobj.family_filter_enabled():
+                return filter(lambda term: not catobj.xxx_filter.foundXXXTerm(term), terms)[:num]
+            else:
+                return terms[:num]
+
+        else:
+            return []
+    
+    def getAllEntries(self):
+        """use with caution,- for testing purposes"""
+        return self.getAll("term_id, term", order_by="term_id")
+    
+class SimilarityDBHandler:
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if SimilarityDBHandler.__single is None:
+            SimilarityDBHandler.lock.acquire()   
+            try:
+                if SimilarityDBHandler.__single is None:
+                    SimilarityDBHandler(*args, **kw)
+            finally:
+                SimilarityDBHandler.lock.release()
+        return SimilarityDBHandler.__single
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if SimilarityDBHandler.__single is not None:
+            raise RuntimeError, "SimilarityDBHandler is singleton"
+        SimilarityDBHandler.__single = self
+        self._db = SQLiteCacheDB.getInstance()
+    
+    def getOverlapWithPeer(self, peer_id, myprefs):
+        sql_get_overlap_with_peer = """SELECT Peer.peer_id, num_prefs, COUNT(torrent_id) FROM Peer
+                                        JOIN Preference ON Peer.peer_id = Preference.peer_id 
+                                        WHERE torrent_id IN("""+','.join(map(str,myprefs))+""") 
+                                        AND Peer.peer_id = ? GROUP BY Peer.peer_id"""
+        row = self._db.fetchone(sql_get_overlap_with_peer, (peer_id,))
+        return row
+    
+    def getPeersWithOverlap(self, not_peer_id, myprefs):
+        sql_get_peers_with_overlap = """SELECT Peer.peer_id, num_prefs, COUNT(torrent_id) FROM Peer
+                                        JOIN Preference ON Peer.peer_id = Preference.peer_id 
+                                        WHERE torrent_id IN("""+','.join(map(str,myprefs))+""") 
+                                        AND Peer.peer_id <> ? GROUP BY Peer.peer_id"""
+        row = self._db.fetchall(sql_get_peers_with_overlap, (not_peer_id,))
+        return row
+    
+    def getTorrentsWithSimilarity(self, myprefs, top_x):
+        sql_get_torrents_with_similarity = """SELECT similarity, torrent_id FROM Peer
+                                              JOIN Preference ON Peer.peer_id = Preference.peer_id
+                                              WHERE Peer.peer_id IN(Select peer_id from Peer WHERE similarity > 0 ORDER By similarity DESC Limit ?)
+                                              AND torrent_id NOT IN(""" + ','.join(map(str,myprefs))+""")"""
+        row = self._db.fetchall(sql_get_torrents_with_similarity, (top_x,))
+        return row
+    
+                 
+                                        
+
+class SearchDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if SearchDBHandler.__single is None:
+            SearchDBHandler.lock.acquire()   
+            try:
+                if SearchDBHandler.__single is None:
+                    SearchDBHandler(*args, **kw)
+            finally:
+                SearchDBHandler.lock.release()
+        return SearchDBHandler.__single
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if SearchDBHandler.__single is not None:
+            raise RuntimeError, "SearchDBHandler is singleton"
+        SearchDBHandler.__single = self
+        db = SQLiteCacheDB.getInstance()
+        BasicDBHandler.__init__(self,db, 'ClicklogSearch') ## self,db,'Search'
+        
+        
+    ### write methods
+    
+    def storeKeywordsByID(self, peer_id, torrent_id, term_ids, commit=True):
+        sql_insert_search = u"INSERT INTO ClicklogSearch (peer_id, torrent_id, term_id, term_order) values (?, ?, ?, ?)"
+        
+        if len(term_ids)>MAX_KEYWORDS_STORED:
+            term_ids= term_ids[0:MAX_KEYWORDS_STORED]
+
+        # TODO before we insert, we should delete all potentially existing entries
+        # with these exact values
+        # otherwise, some strange attacks might become possible
+        # and again we cannot assume that user/torrent/term only occurs once
+
+        # create insert data
+        values = [(peer_id, torrent_id, term_id, term_order) 
+                  for (term_id, term_order) 
+                  in zip(term_ids, range(len(term_ids)))]
+        self._db.executemany(sql_insert_search, values, commit=commit)        
+        
+        # update term popularity
+        sql_update_term_popularity= u"UPDATE ClicklogTerm SET times_seen = times_seen+1 WHERE term_id=?"        
+        self._db.executemany(sql_update_term_popularity, [[term_id] for term_id in term_ids], commit=commit)        
+        
+    def storeKeywords(self, peer_id, torrent_id, terms, commit=True):
+        """creates a single entry in Search with peer_id and torrent_id for every term in terms"""
+        terms = [term.strip() for term in terms if len(term.strip())>0]
+        term_db = TermDBHandler.getInstance()
+        term_ids = [term_db.getTermID(term) for term in terms]
+        self.storeKeywordsByID(peer_id, torrent_id, term_ids, commit)
+
+    def getAllEntries(self):
+        """use with caution,- for testing purposes"""
+        return self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", order_by="rowid")
+    
+    def getAllOwnEntries(self):
+        """use with caution,- for testing purposes"""
+        return self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", where="peer_id=0", order_by="rowid")
+    
+
+    
+    ### read methods
+    
+    def getNumTermsPerTorrent(self, torrent_id):
+        """returns the number of terms associated with a given torrent"""
+        return self.getOne("COUNT (DISTINCT term_id)", torrent_id=torrent_id)
+        
+    def getNumTorrentsPerTerm(self, term_id):
+        """returns the number of torrents stored with a given term."""
+        return self.getOne("COUNT (DISTINCT torrent_id)", term_id=term_id)
+    
+    def getNumTorrentTermCooccurrences(self, term_id, torrent_id):
+        """returns the number of times a torrent has been associated with a term"""
+        return self.getOne("COUNT (*)", term_id=term_id, torrent_id=torrent_id)    
+    
+    def getRelativeTermFrequency(self, term_id, torrent_id):
+        """returns the relative importance of a term for a torrent
+        This is basically tf/idf 
+        term frequency tf = # keyword used per torrent/# keywords used with torrent at all
+        inverse document frequency = # of torrents associated with term at all
+        
+        normalization in tf ensures that a torrent cannot get most important for all keywords just 
+        by, e.g., poisoning the db with a lot of keywords for this torrent
+        idf normalization ensures that returned values are meaningful across several keywords 
+        """
+        
+        terms_per_torrent = self.getNumTermsPerTorrent(torrent_id)
+        if terms_per_torrent==0:
+            return 0
+        
+        torrents_per_term = self.getNumTorrentsPerTerm(term_id)
+        if torrents_per_term == 0:
+            return 0
+        
+        coocc = self.getNumTorrentTermCooccurrences(term_id, torrent_id)
+        
+        tf = coocc/float(terms_per_torrent)
+        idf = 1.0/math.log(torrents_per_term+1)
+        
+        return tf*idf
+    
+    
+    def getTorrentSearchTerms(self, torrent_id, peer_id):
+        return self.getAll("term_id", "torrent_id=%d AND peer_id=%s" % (torrent_id, peer_id), order_by="term_order")
+    
+    def getMyTorrentSearchTerms(self, torrent_id):
+        return [x[0] for x in self.getTorrentSearchTerms(torrent_id, peer_id=0)]
+        
+                
+    ### currently unused
+                  
+    def numSearchesWithTerm(self, term_id):
+        """returns the number of searches stored with a given term. 
+        I feel like I might miss something, but this should simply be the number of rows containing
+        the term"""
+        return self.getOne("COUNT (*)", term_id=term_id)
+    
+    def getNumTorrentPeers(self, torrent_id):
+        """returns the number of users for a given torrent. if this should be used 
+        extensively, an index on torrent_id might be in order"""
+        return self.getOne("COUNT (DISTINCT peer_id)", torrent_id=torrent_id)
+    
+    def removeKeywords(self, peer_id, torrent_id, commit=True):
+        """removes records of keywords used by peer_id to find torrent_id"""
+        # TODO
+        # would need to be called by deletePreference
+        pass
+    
+    
+    
+    
+def doPeerSearchNames(self,dbname,kws):
+    """ Get all peers that have the specified keywords in their name. 
+    Return a list of dictionaries. Each dict is in the NEWDBSTANDARD format.
+    """
+    if dbname == 'Peer':
+        where = '(Peer.last_connected>0 or Peer.friend=1) and '
+    elif dbname == 'Friend':
+        where  = ''
+    else:
+        raise Exception('unknown dbname: %s' % dbname)
+    
+    # Must come before query
+    ranks = self.getRanks()
+
+    for i in range(len(kws)):
+        kw = kws[i]
+        where += ' name like "%'+kw+'%"'
+        if (i+1) != len(kws):
+            where += ' and'
+            
+    # See getGUIPeers()
+    value_name = PeerDBHandler.gui_value_name
+    
+    #print >>sys.stderr,"peer_db: searchNames: sql",where
+    res_list = self._db.getAll(dbname, value_name, where)
+    #print >>sys.stderr,"peer_db: searchNames: res",res_list
+    
+    peer_list = []
+    for item in res_list:
+        #print >>sys.stderr,"peer_db: searchNames: Got Record",`item`
+        peer = dict(zip(value_name, item))
+        peer['name'] = dunno2unicode(peer['name'])
+        peer['simRank'] = ranksfind(ranks,peer['permid'])
+        peer['permid'] = str2bin(peer['permid'])
+        peer_list.append(peer)
+    return peer_list
+
+def ranksfind(ranks,key):
+    if ranks is None:
+        return -1
+    try:
+        return ranks.index(key)+1
+    except:
+        return -1
+    
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteFriendshipStatsCacheDB.py b/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteFriendshipStatsCacheDB.py
new file mode 100644 (file)
index 0000000..a368148
--- /dev/null
@@ -0,0 +1,201 @@
+# Written by Ali Abbas\r
+# see LICENSE.txt for license information\r
+\r
+import sys\r
+import os\r
+import threading\r
+\r
+from BaseLib.__init__ import LIBRARYNAME\r
+from BaseLib.Core.CacheDB.sqlitecachedb import *\r
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler\r
+\r
+CREATE_FRIENDSHIP_STATS_SQL_FILE = None\r
+CREATE_FRIENDSHIP_STATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', 'tribler_friendship_stats_sdb.sql')\r
+DB_FILE_NAME = 'tribler_friendship_stats.sdb'\r
+DB_DIR_NAME = 'sqlite'    # db file path = DB_DIR_NAME/DB_FILE_NAME\r
+CURRENT_DB_VERSION = 2\r
+\r
+DEBUG = False\r
+\r
+def init_friendship_stats(config, db_exception_handler = None):\r
+    """ create friendship statistics database """\r
+    global CREATE_FRIENDSHIP_STATS_SQL_FILE\r
+    config_dir = config['state_dir']\r
+    install_dir = config['install_dir']\r
+    CREATE_FRIENDSHIP_STATS_SQL_FILE = os.path.join(install_dir,CREATE_FRIENDSHIP_STATS_SQL_FILE_POSTFIX)\r
+    sqlitedb = SQLiteFriendshipStatsCacheDB.getInstance(db_exception_handler)   \r
+    sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME)\r
+    sqlitedb.initDB(sqlite_db_path, CREATE_FRIENDSHIP_STATS_SQL_FILE,current_db_version=CURRENT_DB_VERSION)  # the first place to create db in Tribler\r
+    return sqlitedb\r
+\r
+\r
+class FSCacheDBBaseV2(SQLiteCacheDBBase):\r
+    """ See Tribler/Core/Statistics/tribler_friendship_stats_sdb.sql \r
+    for a description of the various versions\r
+    """\r
+    \r
+    def updateDB(self,fromver,tover):\r
+        if DEBUG:\r
+            print >>sys.stderr,"fscachedb2: Upgrading",fromver,tover\r
+        if fromver == 1 and tover == 2:\r
+            # Do ALTER TABLE stuff to add crawler_permid field.\r
+            sql = "ALTER TABLE FriendshipStatistics ADD COLUMN crawled_permid TEXT DEFAULT client NOT NULL;"\r
+            self.execute_write(sql, commit=False)\r
+            # updating version stepwise so if this works, we store it\r
+            # regardless of later, potentially failing updates\r
+            self.writeDBVersion(2, commit=False)\r
+            self.commit()\r
+            \r
+\r
+class SQLiteFriendshipStatsCacheDB(FSCacheDBBaseV2):\r
+    __single = None    # used for multithreaded singletons pattern\r
+    lock = threading.RLock()\r
+\r
+    @classmethod\r
+    def getInstance(cls, *args, **kw):\r
+        # Singleton pattern with double-checking to ensure that it can only create one object\r
+        if cls.__single is None:\r
+            cls.lock.acquire()   \r
+            try:\r
+                if cls.__single is None:\r
+                    cls.__single = cls(*args, **kw)\r
+            finally:\r
+                cls.lock.release()\r
+        return cls.__single\r
+    \r
+    def __init__(self, *args, **kw):\r
+        # always use getInstance() to create this object\r
+        if self.__single != None:\r
+            raise RuntimeError, "SQLiteFriendshipStatsCacheDB is singleton"\r
+        \r
+        FSCacheDBBaseV2.__init__(self, *args, **kw)\r
+    \r
+    \r
+    \r
+class FriendshipStatisticsDBHandler(BasicDBHandler):\r
+    \r
+    __single = None    # used for multi-threaded singletons pattern\r
+    lock = threading.Lock()\r
+    \r
+    def getInstance(*args, **kw):\r
+        # Singleton pattern with double-checking\r
+        if FriendshipStatisticsDBHandler.__single is None:\r
+            FriendshipStatisticsDBHandler.lock.acquire()   \r
+            try:\r
+                if FriendshipStatisticsDBHandler.__single is None:\r
+                    FriendshipStatisticsDBHandler(*args, **kw)\r
+            finally:\r
+                FriendshipStatisticsDBHandler.lock.release()\r
+        return FriendshipStatisticsDBHandler.__single\r
+    \r
+    getInstance = staticmethod(getInstance)\r
+    \r
+    def __init__(self):\r
+        if FriendshipStatisticsDBHandler.__single is not None:\r
+            raise RuntimeError, "FriendshipStatisticsDBHandler is singleton"\r
+        FriendshipStatisticsDBHandler.__single = self\r
+        db = SQLiteFriendshipStatsCacheDB.getInstance()\r
+        BasicDBHandler.__init__(self, db, 'FriendshipStatistics')\r
+        #BasicDBHandler.__init__(self, 'Peer')\r
+        #self.tableName = 'FriendshipStatistics'\r
+\r
+       \r
+    def getAllFriendshipStatistics(self, permid, last_update_time = None, range = None, sort = None, reverse = False):\r
+    \r
+        """\r
+        db keys: 'source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', \r
+                 'no_of_attempts', 'no_of_helpers' \r
+                 \r
+        @in: get_online: boolean: if true, give peers a key 'online' if there is a connection now\r
+        """\r
+        \r
+        value_name = ('source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', 'no_of_attempts',\r
+                      'no_of_helpers', 'modified_on')\r
+        where = 'request_time > '+str(last_update_time) # source_permid done below\r
+        \r
+        if range:\r
+            offset= range[0]\r
+            limit = range[1] - range[0]\r
+        else:\r
+            limit = offset = None\r
+        if sort:\r
+            desc = (not reverse) and 'desc' or ''\r
+            if sort in ('name'):\r
+                order_by = ' lower(%s) %s' % (sort, desc)\r
+            else:\r
+                order_by = ' %s %s' % (sort, desc)\r
+        else:\r
+            order_by = None\r
+            \r
+        permidstr = bin2str(permid)\r
+        res_list = self.getAll(value_name, where=where, offset= offset, limit=limit, order_by=order_by, source_permid=permidstr)\r
+\r
+        if DEBUG:\r
+            print >>sys.stderr,"FriendshipStatisticsDBHandler: getAll: result is",res_list\r
+        \r
+        return res_list\r
+    \r
+    def saveFriendshipStatisticData (self, data):\r
+        \r
+        self._db.insertMany('FriendshipStatistics', data)\r
+    \r
+    def insertFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True):\r
+       \r
+#        db keys: 'source_permid', 'target_permid', 'isForwarder', 'request_time', 'response_time', \r
+#                 'no_of_attempts', 'no_of_helpers'\r
+#        self._db.insert(self.table_name, entry=key, value=value)\r
+        \r
+        sql_insert_friendstatistics = "INSERT INTO FriendshipStatistics (source_permid, target_permid, isForwarder, request_time, response_time, no_of_attempts, no_of_helpers, modified_on) VALUES ('"+my_permid+"','"+target_permid+"',"+str(isForwarder)+","+str(current_time)+", 0 , "+str(no_of_attempts)+","+str(no_of_helpers)+","+str(current_time)+")"\r
+        \r
+        self._db.execute_write(sql_insert_friendstatistics,commit=commit)\r
+        \r
+    def updateFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True):\r
+       \r
+        sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET request_time = "+str(current_time) +", no_of_attempts = "+str(no_of_attempts)+", no_of_helpers = "+str(no_of_helpers)+", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'"\r
+        \r
+        self._db.execute_write(sql_insert_friendstatistics,commit=commit)\r
+    \r
+    def updateFriendshipResponseTime(self, my_permid, target_permid, current_time, commit = True):\r
+        \r
+                   \r
+        sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET response_time = "+str(current_time)+ ", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'"\r
+        \r
+        if DEBUG:\r
+            print >> sys.stderr, sql_insert_friendstatistics\r
+        \r
+        self._db.execute_write(sql_insert_friendstatistics,commit=commit)\r
+        \r
+    def insertOrUpdateFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True):\r
+        \r
+#        sql_entry_exists_of_the_peer = "SELECT souce_permid FROM FriendshipStatistics where source_permid = " + my_permid\r
+        if DEBUG:\r
+            print >> sys.stderr, 'Friendship record being inserted of permid'\r
+            print >> sys.stderr,  target_permid\r
+        res = self._db.getOne('FriendshipStatistics', 'target_permid', target_permid=target_permid)\r
+        \r
+        if not res:\r
+            sql_insert_friendstatistics = "INSERT INTO FriendshipStatistics (source_permid, target_permid, isForwarder, request_time, response_time, no_of_attempts, no_of_helpers, modified_on) VALUES ('"+my_permid+"','"+target_permid+"',"+str(isForwarder)+","+str(current_time)+", 0 , "+str(no_of_attempts)+","+str(no_of_helpers)+","+str(current_time)+")"\r
+        else:\r
+            sql_insert_friendstatistics = "UPDATE FriendshipStatistics SET no_of_attempts = "+str(no_of_attempts)+", no_of_helpers = "+str(no_of_helpers)+", modified_on = "+str(current_time)+" where source_permid = '"+my_permid+"' and target_permid = '"+target_permid+"'"\r
+        \r
+        if DEBUG:\r
+            print >> sys.stderr, 'result is ', res\r
+            print >> sys.stderr, sql_insert_friendstatistics\r
+            \r
+        try:    \r
+            self._db.execute_write(sql_insert_friendstatistics,commit=commit)\r
+        except:\r
+            print >> sys.stderr\r
+\r
+    \r
+    def getLastUpdateTimeOfThePeer(self, permid):\r
+        \r
+        res = self._db.getAll('FriendshipStatistics', 'source_permid', order_by='modified_on desc', limit=1)\r
+        \r
+        if not res:\r
+            return 0\r
+        else:\r
+            # todo!\r
+            return 0 # bug??? res['modified_on']\r
+             \r
+        \r
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteSeedingStatsCacheDB.py b/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteSeedingStatsCacheDB.py
new file mode 100644 (file)
index 0000000..06adfcc
--- /dev/null
@@ -0,0 +1,202 @@
+# Written by Boxun Zhang
+# see LICENSE.txt for license information
+
+import os
+from time import time
+import threading
+from traceback import print_exc
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.CacheDB.sqlitecachedb import *
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler
+from BaseLib.Core.simpledefs import *
+
+CREATE_SEEDINGSTATS_SQL_FILE = None
+CREATE_SEEDINGSTATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', 'tribler_seedingstats_sdb.sql')
+DB_FILE_NAME = 'tribler_seedingstats.sdb'
+DB_DIR_NAME = 'sqlite'    # db file path = DB_DIR_NAME/DB_FILE_NAME
+CURRENT_DB_VERSION = 1
+DEFAULT_BUSY_TIMEOUT = 10000
+MAX_SQL_BATCHED_TO_TRANSACTION = 1000   # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds
+SHOW_ALL_EXECUTE = False
+costs = []
+cost_reads = []
+
+DEBUG = False
+
+def init_seeding_stats(config, db_exception_handler = None):
+    """ create SeedingStats database """
+    global CREATE_SEEDINGSTATS_SQL_FILE
+    config_dir = config['state_dir']
+    install_dir = config['install_dir']
+    CREATE_SEEDINGSTATS_SQL_FILE = os.path.join(install_dir,CREATE_SEEDINGSTATS_SQL_FILE_POSTFIX)
+    sqlitedb = SQLiteSeedingStatsCacheDB.getInstance(db_exception_handler)   
+    sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME)
+    sqlitedb.initDB(sqlite_db_path, CREATE_SEEDINGSTATS_SQL_FILE)  # the first place to create db in Tribler
+    return sqlitedb
+
+class SQLiteSeedingStatsCacheDB(SQLiteCacheDBBase):
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.RLock()
+    
+    @classmethod
+    def getInstance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+            finally:
+                cls.lock.release()
+        return cls.__single
+
+    def __init__(self, *args, **kw):
+        # always use getInstance() to create this object
+        if self.__single != None:
+            raise RuntimeError, "SQLiteSeedingStatsCacheDB is singleton"
+        
+        SQLiteCacheDBBase.__init__(self, *args, **kw)
+    
+    
+class SeedingStatsDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if SeedingStatsDBHandler.__single is None:
+            SeedingStatsDBHandler.lock.acquire()   
+            try:
+                if SeedingStatsDBHandler.__single is None:
+                    SeedingStatsDBHandler(*args, **kw)
+            finally:
+                SeedingStatsDBHandler.lock.release()
+        return SeedingStatsDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if SeedingStatsDBHandler.__single is not None:
+            raise RuntimeError, "SeedingStatDBHandler is singleton"
+        SeedingStatsDBHandler.__single = self
+        db = SQLiteSeedingStatsCacheDB.getInstance()
+        BasicDBHandler.__init__(self, db, 'SeedingStats')
+    
+    def updateSeedingStats(self, permID, reputation, dslist, interval):
+        permID = bin2str(permID)
+        
+        seedings = []
+        
+        for item in dslist:
+            if item.get_status() == DLSTATUS_SEEDING:
+                seedings.append(item)
+                
+        commit = False
+        for i in range(0, len(seedings)):
+            ds = seedings[i]
+            
+            infohash = bin2str(ds.get_download().get_def().get_infohash())
+            
+            stats = ds.stats['stats']
+            ul = stats.upTotal
+                
+            if i == len(seedings)-1:
+                commit = True
+                
+            res = self.existedInfoHash(infohash)
+                
+            if res is not None:
+                # res is list of ONE tuple
+                #self.updateSeedingStat(infohash, reputation, res[0][0], interval, commit)
+                
+                # NAT/Firewall & Seeding Behavior
+                # Store upload amount instead peer reputation
+                self.updateSeedingStat(infohash, ul, res[0][0], interval, commit)
+            else:
+                # Insert new record
+                #self.insertSeedingStat(infohash, permID, reputation, interval, commit)
+                
+                # NAT/Firewall & Seeding Behavior
+                # Store upload amount instead peer reputation
+                self.insertSeedingStat(infohash, permID, ul, interval, commit)
+            
+    
+    def existedInfoHash(self, infohash):
+                
+        sql = "SELECT seeding_time FROM SeedingStats WHERE info_hash='%s' and crawled=0"%infohash
+        
+        try:
+            cursor = self._db.execute_read(sql)
+            if cursor:
+                res = list(cursor)
+                
+                if len(res) > 0:
+                    return res
+                else:
+                    return None
+            else:
+                # something wrong, throw an exception?
+                return None
+        except:
+            return None
+    
+    def updateSeedingStat(self, infohash, reputation, seedingtime, interval, commit): 
+        try:
+            sql_update = "UPDATE SeedingStats SET seeding_time=%s, reputation=%s WHERE info_hash='%s' AND crawled=0"%(seedingtime + interval, reputation, infohash)
+            self._db.execute_write(sql_update, None, commit)
+        except:
+            print_exc()
+    
+    def insertSeedingStat(self, infohash, permID, reputation, interval, commit):
+        try:
+            sql_insert = "INSERT INTO SeedingStats VALUES(%s, '%s', '%s', %s, %s, %s)"%(time(), permID, infohash, interval, reputation, 0)
+            self._db.execute_write(sql_insert, None, commit)
+        except:
+            print_exc()
+
+
+class SeedingStatsSettingsDBHandler(BasicDBHandler):
+    
+    __single = None    # used for multithreaded singletons pattern
+    lock = threading.Lock()
+    
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if SeedingStatsSettingsDBHandler.__single is None:
+            SeedingStatsSettingsDBHandler.lock.acquire()   
+            try:
+                if SeedingStatsSettingsDBHandler.__single is None:
+                    SeedingStatsSettingsDBHandler(*args, **kw)
+            finally:
+                SeedingStatsSettingsDBHandler.lock.release()
+        return SeedingStatsSettingsDBHandler.__single
+    
+    getInstance = staticmethod(getInstance)
+    
+    def __init__(self):
+        if SeedingStatsSettingsDBHandler.__single is not None:
+            raise RuntimeError, "SeedingStatDBHandler is singleton"
+        SeedingStatsSettingsDBHandler.__single = self
+        db = SQLiteSeedingStatsCacheDB.getInstance()
+        BasicDBHandler.__init__(self, db, 'CrawlingSettings')
+    
+    def loadCrawlingSettings(self):
+        try:
+            sql_query = "SELECT * FROM SeedingStatsSettings"
+            cursor = self._db.execute_read(sql_query)
+            
+            if cursor:
+                return list(cursor)
+            else:
+                return None
+        except:
+            print_exc()
+    
+    def updateCrawlingSettings(self, args):
+        try:
+            sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s, crawling_enabled=%s WHERE version=1"%(args[0], args[1])
+            cursor = self._db.execute_write(sql_update)
+        except:
+            print_exc()
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py b/instrumentation/next-share/BaseLib/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py
new file mode 100644 (file)
index 0000000..0eb7c85
--- /dev/null
@@ -0,0 +1,154 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+"""
+Database wrapper to add and retrieve Video playback statistics
+"""
+
+import sys
+import os
+import thread
+from base64 import b64encode
+from time import time
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDBBase
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler
+
+CREATE_VIDEOPLAYBACK_STATS_SQL_FILE = None
+CREATE_VIDEOPLAYBACK_STATS_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'Core', 'Statistics', "tribler_videoplayback_stats.sql")
+DB_FILE_NAME = 'tribler_videoplayback_stats.sdb'
+DB_DIR_NAME = 'sqlite'    # db file path = DB_DIR_NAME/DB_FILE_NAME
+CURRENT_DB_VERSION = 2
+
+ENABLE_LOGGER = False
+DEBUG = False
+
+def init_videoplayback_stats(config, db_exception_handler = None):
+    """ create VideoPlayback statistics database """
+    global CREATE_VIDEOPLAYBACK_STATS_SQL_FILE
+    config_dir = config['state_dir']
+    install_dir = config['install_dir']
+    CREATE_VIDEOPLAYBACK_STATS_SQL_FILE = os.path.join(install_dir,CREATE_VIDEOPLAYBACK_STATS_SQL_FILE_POSTFIX)
+    sqlitedb = SQLiteVideoPlaybackStatsCacheDB.get_instance(db_exception_handler)   
+    sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME)
+    sqlitedb.initDB(sqlite_db_path, CREATE_VIDEOPLAYBACK_STATS_SQL_FILE,current_db_version=CURRENT_DB_VERSION)  # the first place to create db in Tribler
+    return sqlitedb
+
+class SQLiteVideoPlaybackStatsCacheDBV2(SQLiteCacheDBBase):
+    def updateDB(self, fromver, tover):
+        # convert database version 1 --> 2
+        if fromver < 2:
+            sql = """
+-- Simplify the database. All info is now an event.
+
+DROP TABLE IF EXISTS playback_info;
+DROP INDEX IF EXISTS playback_info_idx;
+
+-- Simplify the database. Events are simplified to key/value
+-- pairs. Because sqlite is unable to remove a column, we are forced
+-- to DROP and re-CREATE the event table.
+--
+-- Note that this will erase previous statistics... 
+
+DROP TABLE IF EXISTS playback_event;
+DROP INDEX IF EXISTS playback_event_idx;
+
+CREATE TABLE playback_event (
+  key                   text NOT NULL,
+  timestamp             real NOT NULL,
+  event                 text NOT NULL
+);  
+
+CREATE INDEX playback_event_idx 
+  ON playback_event (key, timestamp);
+"""
+
+            self.execute_write(sql, commit=False)
+
+        # updating version stepwise so if this works, we store it
+        # regardless of later, potentially failing updates
+        self.writeDBVersion(CURRENT_DB_VERSION, commit=False)
+        self.commit()
+
+class SQLiteVideoPlaybackStatsCacheDB(SQLiteVideoPlaybackStatsCacheDBV2):
+    """
+    Wrapper around Database engine. Used to perform raw SQL queries
+    and ensure that Database schema is correct.
+    """
+
+    __single = None    # used for multithreaded singletons pattern
+    lock = thread.allocate_lock()
+
+    @classmethod
+    def get_instance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self, *args, **kw):
+        # always use get_instance() to create this object
+        if self.__single != None:
+            raise RuntimeError, "SQLiteVideoPlaybackStatsCacheDB is singleton"
+        SQLiteCacheDBBase.__init__(self, *args, **kw)
+    
+class VideoPlaybackDBHandler(BasicDBHandler):
+    """
+    Interface to add and retrieve events from the database.
+
+    Manages the playback_event table. This table may contain several
+    entries for events that occur during playback such as when it was
+    started and when it was paused.
+
+    The interface of this class should match that of
+    VideoPlaybackReporter in Tribler.Player.Reporter which is used to
+    report the same information through HTTP callbacks when there is
+    no overlay network
+    """
+
+    __single = None    # used for multi-threaded singletons pattern
+    lock = thread.allocate_lock()
+
+    @classmethod
+    def get_instance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self):
+        if VideoPlaybackDBHandler.__single is not None:
+            raise RuntimeError, "VideoPlaybackDBHandler is singleton"
+        BasicDBHandler.__init__(self, SQLiteVideoPlaybackStatsCacheDB.get_instance(), 'playback_event')
+            
+    def add_event(self, key, event):
+        if ENABLE_LOGGER:
+            assert type(key) in (str, unicode)
+            # assert not "'" in key # TODO: This assert is unnecessary and breaks for certain infohashes? (Raynor Vliegendhart)
+            assert type(event) in (str, unicode)
+            assert not "'" in event
+
+            # because the key usually an infohash, and because this is
+            # usually (and incorrectly) stored in a string instead of a
+            # unicode string, this will crash the database wrapper.
+            key = b64encode(key)
+
+            if DEBUG: print >>sys.stderr, "VideoPlaybackDBHandler add_event", key, event
+            self._db.execute_write("INSERT INTO %s (key, timestamp, event) VALUES ('%s', %s, '%s')" % (self.table_name, key, time(), event))
+
+    def flush(self):
+        """
+        Flush the statistics. This is not used for database-based logging
+        """
+        pass
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/__init__.py b/instrumentation/next-share/BaseLib/Core/CacheDB/__init__.py
new file mode 100644 (file)
index 0000000..57fd4af
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/cachedb.py b/instrumentation/next-share/BaseLib/Core/CacheDB/cachedb.py
new file mode 100644 (file)
index 0000000..ff18cf4
--- /dev/null
@@ -0,0 +1,7 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+from sqlitecachedb import *
+from SqliteSeedingStatsCacheDB import *
+from SqliteFriendshipStatsCacheDB import *
+from SqliteVideoPlaybackStatsCacheDB import *
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/friends.py b/instrumentation/next-share/BaseLib/Core/CacheDB/friends.py
new file mode 100644 (file)
index 0000000..dc8a5a3
--- /dev/null
@@ -0,0 +1,142 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+import sys
+from time import time
+import os
+import base64
+from traceback import print_exc
+
+from BaseLib.Core.Utilities.utilities import validIP, validPort, validPermid, validName, show_permid
+from CacheDBHandler import FriendDBHandler
+from BaseLib.Core.simpledefs import NTFY_FRIENDS,NTFY_PEERS
+
+default_friend_file = 'friends.txt'
+
+DEBUG = False
+
+def init(session):
+    friend_db = session.open_dbhandler(NTFY_FRIENDS)
+    peer_db = session.open_dbhandler(NTFY_PEERS)
+    filename = make_filename(session.get_state_dir(), default_friend_file)
+    ExternalFriendList(friend_db,peer_db,filename).updateFriendList()
+    
+def done(session):
+    friend_db = session.open_dbhandler(NTFY_FRIENDS)
+    peer_db = session.open_dbhandler(NTFY_PEERS)
+    filename = make_filename(session.get_state_dir(), default_friend_file)
+    ExternalFriendList(friend_db,peer_db,filename).writeFriendList()
+    
+def make_filename(config_dir,filename):
+    if config_dir is None:
+        return filename
+    else:
+        return os.path.join(config_dir,filename)    
+
+class ExternalFriendList:
+    def __init__(self,friend_db,peer_db,friend_file=default_friend_file):
+        self.friend_file = friend_file
+        self.friend_db = friend_db
+        self.peer_db = peer_db
+        
+    def clean(self):    # delete friend file
+        try:
+            os.remove(self.friend_file)
+        except Exception:
+            pass
+
+    def updateFriendList(self, friend_file=''):
+        if not friend_file:
+            friend_file = self.friend_file
+        self.friend_list = self.readFriendList(friend_file)
+        self.updateDB(self.friend_list)
+        #self.clean()
+        
+    def updateDB(self, friend_list):
+        if not friend_list:
+            return
+        for friend in friend_list:
+            self.friend_db.addExternalFriend(friend)
+
+    def getFriends(self):
+        friends = []
+        permids = self.friend_db.getFriends()
+        for permid in permids:
+            friend = self.peer_db.getPeer(permid)
+            friends.append(friend)
+        return friends
+    
+    def deleteFriend(self, permid):
+        self.friend_db.deleteFriend(permid)
+    
+    def readFriendList(self, filename=''):
+        """ read (name, permid, friend_ip, friend_port) lines from a text file """
+        
+        if not filename:
+            filename = self.friend_file
+        try:
+            file = open(filename, "r")
+            friends = file.readlines()
+            file.close()
+        except IOError:    # create a new file
+            file = open(filename, "w")
+            file.close()
+            return []
+        
+        friends_info = []
+        for friend in friends:
+            if friend.strip().startswith("#"):    # skip commended lines
+                continue
+            friend_line = friend.split(',')
+            friend_info = []
+            for i in range(len(friend_line)):
+                friend_info.append(friend_line[i].strip())
+            try:
+                friend_info[1] = base64.decodestring( friend_info[1]+'\n' )
+            except:
+                continue
+            if self.validFriendList(friend_info):
+                friend = {'name':friend_info[0], 'permid':friend_info[1], 
+                          'ip':friend_info[2], 'port':int(friend_info[3])}
+                friends_info.append(friend)
+        return friends_info
+    
+    def validFriendList(self, friend_info):
+        try:
+            if len(friend_info) < 4:
+                raise RuntimeError, "one line in friends.txt can only contain at least 4 elements"
+            validName(friend_info[0])
+            validPermid(friend_info[1])
+            validIP(friend_info[2])
+            validPort(int(friend_info[3]))
+        except Exception, msg:
+            if DEBUG:
+                print "======== reading friend list error ========"
+                print friend_info
+                print msg
+                print "==========================================="
+            return False
+        else:
+            return True
+    
+    def writeFriendList(self, filename=''):
+        if not filename:
+            filename = self.friend_file
+        try:
+            file = open(filename, "w")
+        except IOError:
+            print_exc()
+            return
+        
+        friends = self.getFriends()
+        friends_to_write = self.formatForText(friends)
+        file.writelines(friends_to_write)
+        file.close()
+
+    def formatForText(self, friends):
+        lines = []
+        for friend in friends:
+            permid = show_permid(friend['permid'])
+            line = ', '.join([friend['name'], permid, friend['ip'], str(friend['port'])])
+            line += '\n'
+            lines.append(line)
+        return lines
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/maxflow.py b/instrumentation/next-share/BaseLib/Core/CacheDB/maxflow.py
new file mode 100644 (file)
index 0000000..15afa91
--- /dev/null
@@ -0,0 +1,162 @@
+import sets
+
+# Computes maximal flow in a graph
+# Adam Langley <agl@imperialviolet.org> http://www.imperialviolet.org
+# Creative Commons http://creativecommons.org/licenses/by-sa/2.0/
+
+# Adapted for Tribler
+from copy import deepcopy
+
+class Network(object):
+        """This class can be used to calculate the maximal flow between two points in a network/graph. 
+        A network consists of nodes and arcs (egdes) that link them. Each arc has a capacity (the maximum flow down that arc).
+    The iterative algorithm is described at http://carbon.cudenver.edu/~hgreenbe/glossary/notes/maxflow-FF.pdf"""
+        
+        __slots__ = ['arcs', 'backarcs', 'nodes', 'labels']
+
+        def __init__ (self, arcs):
+
+                self.nodes = []
+                self.labels = {}
+
+                self.arcs = arcs
+                self.backarcs = {}
+
+                for source in arcs:
+                
+                        if not source in self.nodes:
+                                self.nodes.append(source)
+
+                        if not source in self.backarcs:
+                                self.backarcs[source] = {}
+
+                        for dest in arcs[source]:
+
+                                if not dest in self.nodes:
+                                        self.nodes.append(dest)
+
+                                if not dest in self.backarcs:
+                                        self.backarcs[dest] = {}
+                                
+                                self.backarcs[dest][source] = {'cap' : arcs[source][dest]['cap'], 'flow' : 0}
+
+
+        def min (a, b):
+                """private function"""
+                if (a == -1):
+                        return b
+                if (b == -1):
+                        return a
+                return min (a, b)
+        
+        min = staticmethod (min)
+        
+        def maxflow (self, source, sink, max_distance = 10000):
+                """Return the maximum flow from the source to the sink"""
+                
+                if not source in self.nodes or not sink in self.nodes:
+                        return 0.0
+                
+                arcscopy = deepcopy(self.arcs)
+                backarcscopy = deepcopy(self.backarcs)
+
+                DEBUG = False
+                
+                while 1:
+                        labels = {}
+                        labels[source] = ((0, 0), -1)
+                
+                        unscanned = {source: 0} # sets.Set ([source])
+                        scanned = sets.Set()
+
+                        while 1:
+                                # Select any node, x, that is labeled and unscanned
+                                
+                                for node in unscanned:
+                                    
+                                        if DEBUG:
+                                                print "Unscanned: " + str(node)
+
+                                        # To all unlabeled succ nodes
+                                        for outnode in arcscopy[node]:
+                                                
+                                                if DEBUG:
+                                                        print "to ", outnode
+                                                
+                                                if (outnode in unscanned or outnode in scanned):
+                                                        continue
+                                                arc = arcscopy[node][outnode]
+                                                if (arc['flow'] >= arc['cap']) or (unscanned[node] + 1) > max_distance:
+                                                        continue
+                                                
+                                                labels[outnode] = ((node, 1), Network.min(labels[node][1], arc['cap'] - arc['flow']))
+                                                
+                                                if DEBUG:
+                                                        print labels[outnode]
+                                                    
+                                                unscanned[outnode] = unscanned[node] + 1 
+                                                #unscanned.add(outnode)
+                                        
+                                        # To all predecessor nodes
+                                        for innode in backarcscopy[node]:
+                                                
+                                                if DEBUG:
+                                                        print "from ", innode
+                                                        
+                                                if (innode in unscanned or innode in scanned):
+                                                        continue
+                                                arc = arcscopy[innode][node]
+                                                if (arc['flow'] == 0) or (unscanned[node] + 1) > max_distance:
+                                                        continue
+                                                labels[innode] = ((node, -1), Network.min(labels[node][1], arc['flow']))
+                                                
+                                                if DEBUG:
+                                                        print labels[innode]
+                                                                                                
+                                                unscanned[innode] = unscanned[node] + 1
+                                                #unscanned.add(innode)
+                                                
+                                        del unscanned[node]
+                                        #unscanned.remove(node)
+                                        
+                                        scanned.add(node)
+        
+                                        # print labels
+                                        break;
+                                        
+                                else:
+                                        # no labels could be assigned
+                                        # total the incoming flows to the sink
+                                        sum = 0
+                                        for innode in backarcscopy[sink]:
+                                                sum += arcscopy[innode][sink]['flow']
+                                        return sum
+        
+                                if (sink in unscanned):
+                                        # sink is labeled and unscanned
+                                        break;
+
+                        # Routine B
+                        s = sink
+                        ((node, sense), et) = labels[s]
+                        # print "et: " + str (et)
+                        while 1:
+                                if (s == source):
+                                        break
+                                ((node, sense), epi) = labels[s]
+                                # If the first part of the label is y+
+                                if (sense == 1):
+                                        # print "  add " + str(node) + " " + str(s)
+                                        arcscopy[node][s]['flow'] += et
+                                else:
+                                        # print "  rm " + str(s) + " " + str(node)
+                                        arcscopy[s][node]['flow'] -= et
+                                s = node
+                        ##print self.arcs
+
+if (__name__ == "__main__"):
+        n = Network ({'s' : {'a': {'cap': 20, 'flow': 0}, 'x' : {'cap' : 1, 'flow' : 0}, 'y' : {'cap' : 3, 'flow' : 0}}, 'x' : {'y' : {'cap' : 1, 'flow' : 0}, 't' : {'cap' : 3, 'flow' : 0}}, 'y' : {'x' : {'cap' : 1, 'flow' : 0}, 't' : {'cap' : 1, 'flow' : 0}}, 'a': {'b': {'cap': 20, 'flow': 0}}, 'b': {'c': {'cap': 20, 'flow': 0}}, 'c': {'t': {'cap': 20, 'flow': 0}}})
+        
+        print n.nodes
+        print n.maxflow ('s', 'q', max_distance = 2)
+        
diff --git a/instrumentation/next-share/BaseLib/Core/CacheDB/sqlitecachedb.py b/instrumentation/next-share/BaseLib/Core/CacheDB/sqlitecachedb.py
new file mode 100644 (file)
index 0000000..b9103f4
--- /dev/null
@@ -0,0 +1,1221 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+import sys
+import os
+from time import sleep
+from base64 import encodestring, decodestring
+import threading
+from traceback import print_exc, print_stack
+
+from BaseLib.Core.simpledefs import INFOHASH_LENGTH
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.Utilities.unicode import dunno2unicode
+
+# ONLY USE APSW >= 3.5.9-r1
+import apsw
+#support_version = (3,5,9)
+#support_version = (3,3,13)
+#apsw_version = tuple([int(r) for r in apsw.apswversion().split('-')[0].split('.')])
+##print apsw_version
+#assert apsw_version >= support_version, "Required APSW Version >= %d.%d.%d."%support_version + " But your version is %d.%d.%d.\n"%apsw_version + \
+#                        "Please download and install it from http://code.google.com/p/apsw/"
+
+##Changed from 4 to 5 by andrea for subtitles support
+CURRENT_MAIN_DB_VERSION = 5
+
+TEST_SQLITECACHEDB_UPGRADE = False
+CREATE_SQL_FILE = None
+CREATE_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'schema_sdb_v'+str(CURRENT_MAIN_DB_VERSION)+'.sql')
+DB_FILE_NAME = 'tribler.sdb'
+DB_DIR_NAME = 'sqlite'    # db file path = DB_DIR_NAME/DB_FILE_NAME
+DEFAULT_BUSY_TIMEOUT = 10000
+MAX_SQL_BATCHED_TO_TRANSACTION = 1000   # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds
+NULL = None
+icon_dir = None
+SHOW_ALL_EXECUTE = False
+costs = []
+cost_reads = []
+torrent_dir = None
+config_dir = None
+TEST_OVERRIDE = False
+
+
+DEBUG = False
+
+class Warning(Exception):
+    pass
+
+def init(config, db_exception_handler = None):
+    """ create sqlite database """
+    global CREATE_SQL_FILE
+    global icon_dir
+    global torrent_dir
+    global config_dir
+    torrent_dir = os.path.abspath(config['torrent_collecting_dir'])
+    config_dir = config['state_dir']
+    install_dir = config['install_dir']
+    CREATE_SQL_FILE = os.path.join(install_dir,CREATE_SQL_FILE_POSTFIX)
+    sqlitedb = SQLiteCacheDB.getInstance(db_exception_handler)
+    
+    if config['superpeer']:
+        sqlite_db_path = ':memory:'
+    else:   
+        sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME)
+    print >>sys.stderr,"cachedb: init: SQL FILE",sqlite_db_path        
+
+    icon_dir = os.path.abspath(config['peer_icon_path'])
+
+    sqlitedb.initDB(sqlite_db_path, CREATE_SQL_FILE)  # the first place to create db in Tribler
+    return sqlitedb
+        
+def done(config_dir):
+    SQLiteCacheDB.getInstance().close()
+
+def make_filename(config_dir,filename):
+    if config_dir is None:
+        return filename
+    else:
+        return os.path.join(config_dir,filename)    
+    
+def bin2str(bin):
+    # Full BASE64-encoded 
+    return encodestring(bin).replace("\n","")
+    
+def str2bin(str):
+    return decodestring(str)
+
+def print_exc_plus():
+    """
+    Print the usual traceback information, followed by a listing of all the
+    local variables in each frame.
+    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52215
+    http://initd.org/pub/software/pysqlite/apsw/3.3.13-r1/apsw.html#augmentedstacktraces
+    """
+
+    tb = sys.exc_info()[2]
+    stack = []
+    
+    while tb:
+        stack.append(tb.tb_frame)
+        tb = tb.tb_next
+
+    print_exc()
+    print >> sys.stderr, "Locals by frame, innermost last"
+
+    for frame in stack:
+        print >> sys.stderr
+        print >> sys.stderr, "Frame %s in %s at line %s" % (frame.f_code.co_name,
+                                             frame.f_code.co_filename,
+                                             frame.f_lineno)
+        for key, value in frame.f_locals.items():
+            print >> sys.stderr, "\t%20s = " % key,
+            #We have to be careful not to cause a new error in our error
+            #printer! Calling str() on an unknown object could cause an
+            #error we don't want.
+            try:                   
+                print >> sys.stderr, value
+            except:
+                print >> sys.stderr, "<ERROR WHILE PRINTING VALUE>"
+
+class safe_dict(dict): 
+    def __init__(self, *args, **kw): 
+        self.lock = threading.RLock() 
+        dict.__init__(self, *args, **kw) 
+        
+    def __getitem__(self, key): 
+        self.lock.acquire()
+        try:
+            return dict.__getitem__(self, key) 
+        finally:
+            self.lock.release()
+            
+    def __setitem__(self, key, value): 
+        self.lock.acquire()
+        try:
+            dict.__setitem__(self, key, value) 
+        finally:
+            self.lock.release()
+            
+    def __delitem__(self, key): 
+        self.lock.acquire()
+        try:
+            dict.__delitem__(self, key) 
+        finally:
+            self.lock.release()
+
+    def __contains__(self, key):
+        self.lock.acquire()
+        try:
+            return dict.__contains__(self, key) 
+        finally:
+            self.lock.release()
+            
+    def values(self):
+        self.lock.acquire()
+        try:
+            return dict.values(self) 
+        finally:
+            self.lock.release()
+
+class SQLiteCacheDBBase:
+    lock = threading.RLock()
+
+    def __init__(self,db_exception_handler=None):
+        self.exception_handler = db_exception_handler
+        self.cursor_table = safe_dict()    # {thread_name:cur}
+        self.cache_transaction_table = safe_dict()   # {thread_name:[sql]
+        self.class_variables = safe_dict({'db_path':None,'busytimeout':None})  # busytimeout is in milliseconds
+        
+        self.permid_id = safe_dict()    
+        self.infohash_id = safe_dict()
+        self.show_execute = False
+        
+        #TODO: All global variables must be protected to be thread safe?
+        self.status_table = None
+        self.category_table = None
+        self.src_table = None
+        self.applied_pragma_sync_norm = False
+        
+    def __del__(self):
+        self.close()
+    
+    def close(self, clean=False):
+        # only close the connection object in this thread, don't close other thread's connection object
+        thread_name = threading.currentThread().getName()
+        cur = self.getCursor(create=False)
+        
+        if cur:
+            con = cur.getconnection()
+            cur.close()
+            con.close()
+            con = None
+            del self.cursor_table[thread_name]
+            # Arno, 2010-01-25: Remove entry in cache_transaction_table for this thread
+            try:
+                if thread_name in self.cache_transaction_table.keys(): 
+                    del self.cache_transaction_table[thread_name]
+            except:
+                print_exc()
+        if clean:    # used for test suite
+            self.permid_id = safe_dict()
+            self.infohash_id = safe_dict()
+            self.exception_handler = None
+            self.class_variables = safe_dict({'db_path':None,'busytimeout':None})
+            self.cursor_table = safe_dict()
+            self.cache_transaction_table = safe_dict()
+            
+            
+    # --------- static functions --------
+    def getCursor(self, create=True):
+        thread_name = threading.currentThread().getName()
+        curs = self.cursor_table
+        cur = curs.get(thread_name, None)    # return [cur, cur, lib] or None
+        #print >> sys.stderr, '-------------- getCursor::', len(curs), time(), curs.keys()
+        if cur is None and create:
+            self.openDB(self.class_variables['db_path'], self.class_variables['busytimeout'])    # create a new db obj for this thread
+            cur = curs.get(thread_name)
+        
+        return cur
+       
+    def openDB(self, dbfile_path=None, busytimeout=DEFAULT_BUSY_TIMEOUT):
+        """ 
+        Open a SQLite database. Only one and the same database can be opened.
+        @dbfile_path       The path to store the database file. 
+                           Set dbfile_path=':memory:' to create a db in memory.
+        @busytimeout       Set the maximum time, in milliseconds, that SQLite will wait if the database is locked. 
+        """
+
+        # already opened a db in this thread, reuse it
+        thread_name = threading.currentThread().getName()
+        #print >>sys.stderr,"sqlcachedb: openDB",dbfile_path,thread_name
+        if thread_name in self.cursor_table:
+            #assert dbfile_path == None or self.class_variables['db_path'] == dbfile_path
+            return self.cursor_table[thread_name]
+
+        assert dbfile_path, "You must specify the path of database file"
+        
+        if dbfile_path.lower() != ':memory:':
+            db_dir,db_filename = os.path.split(dbfile_path)
+            if db_dir and not os.path.isdir(db_dir):
+                os.makedirs(db_dir)            
+        
+        con = apsw.Connection(dbfile_path)
+        con.setbusytimeout(busytimeout)
+
+        cur = con.cursor()
+        self.cursor_table[thread_name] = cur
+        
+        if not self.applied_pragma_sync_norm:
+            # http://www.sqlite.org/pragma.html
+            # When synchronous is NORMAL, the SQLite database engine will still
+            # pause at the most critical moments, but less often than in FULL 
+            # mode. There is a very small (though non-zero) chance that a power
+            # failure at just the wrong time could corrupt the database in 
+            # NORMAL mode. But in practice, you are more likely to suffer a 
+            # catastrophic disk failure or some other unrecoverable hardware 
+            # fault.
+            #
+            self.applied_pragma_sync_norm = True 
+            cur.execute("PRAGMA synchronous = NORMAL;")
+            
+        return cur
+    
+    def createDBTable(self, sql_create_table, dbfile_path, busytimeout=DEFAULT_BUSY_TIMEOUT):
+        """ 
+        Create a SQLite database.
+        @sql_create_table  The sql statements to create tables in the database. 
+                           Every statement must end with a ';'.
+        @dbfile_path       The path to store the database file. Set dbfile_path=':memory:' to creates a db in memory.
+        @busytimeout       Set the maximum time, in milliseconds, that SQLite will wait if the database is locked.
+                           Default = 10000 milliseconds   
+        """
+        cur = self.openDB(dbfile_path, busytimeout)
+        print dbfile_path
+        cur.execute(sql_create_table)  # it is suggested to include begin & commit in the script
+
+    def initDB(self, sqlite_filepath,
+               create_sql_filename = None, 
+               busytimeout = DEFAULT_BUSY_TIMEOUT,
+               check_version = True,
+               current_db_version = CURRENT_MAIN_DB_VERSION):
+        """ 
+        Create and initialize a SQLite database given a sql script. 
+        Only one db can be opened. If the given dbfile_path is different with the opened DB file, warn and exit
+        @configure_dir     The directory containing 'bsddb' directory 
+        @sql_filename      The path of sql script to create the tables in the database
+                           Every statement must end with a ';'. 
+        @busytimeout       Set the maximum time, in milliseconds, to wait and retry 
+                           if failed to acquire a lock. Default = 5000 milliseconds  
+        """
+        if create_sql_filename is None:
+            create_sql_filename=CREATE_SQL_FILE
+        try:
+            self.lock.acquire()
+
+            # verify db path identity
+            class_db_path = self.class_variables['db_path']
+            if sqlite_filepath is None:     # reuse the opened db file?
+                if class_db_path is not None:   # yes, reuse it
+                    # reuse the busytimeout
+                    return self.openDB(class_db_path, self.class_variables['busytimeout'])
+                else:   # no db file opened
+                    raise Exception, "You must specify the path of database file when open it at the first time"
+            else:
+                if class_db_path is None:   # the first time to open db path, store it
+
+                    #print 'quit now'
+                    #sys.exit(0)
+                    # open the db if it exists (by converting from bsd) and is not broken, otherwise create a new one
+                    # it will update the db if necessary by checking the version number
+                    self.safelyOpenTriblerDB(sqlite_filepath, create_sql_filename, busytimeout, check_version=check_version, current_db_version=current_db_version)
+                    
+                    self.class_variables = {'db_path': sqlite_filepath, 'busytimeout': int(busytimeout)}
+                    
+                    return self.openDB()    # return the cursor, won't reopen the db
+                    
+                elif sqlite_filepath != class_db_path:  # not the first time to open db path, check if it is the same
+                    raise Exception, "Only one database file can be opened. You have opened %s and are trying to open %s." % (class_db_path, sqlite_filepath) 
+                        
+        finally:
+            self.lock.release()
+
+    def safelyOpenTriblerDB(self, dbfile_path, sql_create, busytimeout=DEFAULT_BUSY_TIMEOUT, check_version=False, current_db_version=None):
+        """
+        open the db if possible, otherwise create a new one
+        update the db if necessary by checking the version number
+        
+        safeOpenDB():    
+            try:
+                if sqlite db doesn't exist:
+                    raise Error
+                open sqlite db
+                read sqlite_db_version
+                if sqlite_db_version dosen't exist:
+                    raise Error
+            except:
+                close and delete sqlite db if possible
+                create new sqlite db file without sqlite_db_version
+                write sqlite_db_version at last
+                commit
+                open sqlite db
+                read sqlite_db_version
+                # must ensure these steps after except will not fail, otherwise force to exit
+            
+            if sqlite_db_version < current_db_version:
+                updateDB(sqlite_db_version, current_db_version)
+                commit
+                update sqlite_db_version at last
+                commit
+        """
+        try:
+            if not os.path.isfile(dbfile_path):
+                raise Warning("No existing database found. Attempting to creating a new database %s" % repr(dbfile_path))
+            
+            cur = self.openDB(dbfile_path, busytimeout)
+            if check_version:
+                sqlite_db_version = self.readDBVersion()
+                if sqlite_db_version == NULL or int(sqlite_db_version)<1:
+                    raise NotImplementedError
+        except Exception, exception:
+            if isinstance(exception, Warning):
+                # user friendly warning to log the creation of a new database
+                print >>sys.stderr, exception
+
+            else:
+                # user unfriendly exception message because something went wrong
+                print_exc()
+            
+            if os.path.isfile(dbfile_path):
+                self.close(clean=True)
+                os.remove(dbfile_path)
+            
+            if os.path.isfile(sql_create):
+                f = open(sql_create)
+                sql_create_tables = f.read()
+                f.close()
+            else:
+                raise Exception, "Cannot open sql script at %s" % os.path.realpath(sql_create)
+            
+            self.createDBTable(sql_create_tables, dbfile_path, busytimeout)  
+            if check_version:
+                sqlite_db_version = self.readDBVersion()
+            
+        if check_version:
+            self.checkDB(sqlite_db_version, current_db_version)
+
+    def checkDB(self, db_ver, curr_ver):
+        # read MyDB and check the version number.
+        if not db_ver or not curr_ver:
+            self.updateDB(db_ver,curr_ver)
+            return
+        db_ver = int(db_ver)
+        curr_ver = int(curr_ver)
+        #print "check db", db_ver, curr_ver
+        if db_ver != curr_ver or \
+               (not config_dir is None and os.path.exists(os.path.join(config_dir, "upgradingdb.txt"))): 
+            self.updateDB(db_ver,curr_ver)
+            
+    def updateDB(self,db_ver,curr_ver):
+        pass    #TODO
+
+    def readDBVersion(self):
+        cur = self.getCursor()
+        sql = u"select value from MyInfo where entry='version'"
+        res = self.fetchone(sql)
+        if res:
+            find = list(res)
+            return find[0]    # throw error if something wrong
+        else:
+            return None
+    
+    def writeDBVersion(self, version, commit=True):
+        sql = u"UPDATE MyInfo SET value=? WHERE entry='version'"
+        self.execute_write(sql, [version], commit=commit)
+    
+    def show_sql(self, switch):
+        # temporary show the sql executed
+        self.show_execute = switch 
+    
+    # --------- generic functions -------------
+        
+    def commit(self):
+        self.transaction()
+
+    def _execute(self, sql, args=None):
+        cur = self.getCursor()
+
+        if SHOW_ALL_EXECUTE or self.show_execute:
+            thread_name = threading.currentThread().getName()
+            print >> sys.stderr, '===', thread_name, '===\n', sql, '\n-----\n', args, '\n======\n'
+        try:
+            if args is None:
+                return cur.execute(sql)
+            else:
+                return cur.execute(sql, args)
+        except Exception, msg:
+            if True:
+                print_exc()
+                print_stack()
+                print >> sys.stderr, "cachedb: execute error:", Exception, msg 
+                thread_name = threading.currentThread().getName()
+                print >> sys.stderr, '===', thread_name, '===\nSQL Type:', type(sql), '\n-----\n', sql, '\n-----\n', args, '\n======\n'
+                #return None
+                # ARNODB: this is incorrect, it should reraise the exception
+                # such that _transaction can rollback or recommit. 
+                # This bug already reported by Johan
+            raise msg
+        
+
+    def execute_read(self, sql, args=None):
+        # this is only called for reading. If you want to write the db, always use execute_write or executemany
+        return self._execute(sql, args)
+    
+    def execute_write(self, sql, args=None, commit=True):
+        self.cache_transaction(sql, args)
+        if commit:
+            self.commit()
+            
+    def executemany(self, sql, args, commit=True):
+
+        thread_name = threading.currentThread().getName()
+        if thread_name not in self.cache_transaction_table:
+            self.cache_transaction_table[thread_name] = []
+        all = [(sql, arg) for arg in args]
+        self.cache_transaction_table[thread_name].extend(all)
+
+        if commit:
+            self.commit()
+            
+    def cache_transaction(self, sql, args=None):
+        thread_name = threading.currentThread().getName()
+        if thread_name not in self.cache_transaction_table:
+            self.cache_transaction_table[thread_name] = []
+        self.cache_transaction_table[thread_name].append((sql, args))
+                    
+    def transaction(self, sql=None, args=None):
+        if sql:
+            self.cache_transaction(sql, args)
+        
+        thread_name = threading.currentThread().getName()
+        
+        n = 0
+        sql_full = ''
+        arg_list = []
+        sql_queue = self.cache_transaction_table.get(thread_name,None)
+        if sql_queue:
+            while True:
+                try:
+                    _sql,_args = sql_queue.pop(0)
+                except IndexError:
+                    break
+                
+                _sql = _sql.strip()
+                if not _sql:
+                    continue
+                if not _sql.endswith(';'):
+                    _sql += ';'
+                sql_full += _sql + '\n'
+                if _args != None:
+                    arg_list += list(_args)
+                n += 1
+                
+                # if too many sql in cache, split them into batches to prevent processing and locking DB for a long time
+                # TODO: optimize the value of MAX_SQL_BATCHED_TO_TRANSACTION
+                if n % MAX_SQL_BATCHED_TO_TRANSACTION == 0:
+                    self._transaction(sql_full, arg_list)
+                    sql_full = ''
+                    arg_list = []
+                    
+            self._transaction(sql_full, arg_list)
+            
+    def _transaction(self, sql, args=None):
+        if sql:
+            sql = 'BEGIN TRANSACTION; \n' + sql + 'COMMIT TRANSACTION;'
+            try:
+                self._execute(sql, args)
+            except Exception,e:
+                self.commit_retry_if_busy_or_rollback(e,0,sql=sql)
+            
+    def commit_retry_if_busy_or_rollback(self,e,tries,sql=None):
+        """ 
+        Arno:
+        SQL_BUSY errors happen at the beginning of the experiment,
+        very quickly after startup (e.g. 0.001 s), so the busy timeout
+        is not honoured for some reason. After the initial errors,
+        they no longer occur.
+        """
+        print >>sys.stderr,"sqlcachedb: commit_retry: after",str(e),repr(sql)
+        
+        if str(e).startswith("BusyError"):
+            try:
+                self._execute("COMMIT")
+            except Exception,e2: 
+                if tries < 5:   #self.max_commit_retries
+                    # Spec is unclear whether next commit will also has 
+                    # 'busytimeout' seconds to try to get a write lock.
+                    sleep(pow(2.0,tries+2)/100.0)
+                    self.commit_retry_if_busy_or_rollback(e2,tries+1)
+                else:
+                    self.rollback(tries)
+                    raise Exception,e2
+        else:
+            self.rollback(tries)
+            m = "cachedb: TRANSACTION ERROR "+threading.currentThread().getName()+' '+str(e)
+            raise Exception, m
+            
+            
+    def rollback(self, tries):
+        print_exc()
+        try:
+            self._execute("ROLLBACK")
+        except Exception, e:
+            # May be harmless, see above. Unfortunately they don't specify
+            # what the error is when an attempt is made to roll back
+            # an automatically rolled back transaction.
+            m = "cachedb: ROLLBACK ERROR "+threading.currentThread().getName()+' '+str(e)
+            #print >> sys.stderr, 'SQLite Database', m
+            raise Exception, m
+   
+        
+    # -------- Write Operations --------
+    def insert(self, table_name, commit=True, **argv):
+        if len(argv) == 1:
+            sql = 'INSERT INTO %s (%s) VALUES (?);'%(table_name, argv.keys()[0])
+        else:
+            questions = '?,'*len(argv)
+            sql = 'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(argv.keys()), questions[:-1])
+        self.execute_write(sql, argv.values(), commit)
+    
+    def insertMany(self, table_name, values, keys=None, commit=True):
+        """ values must be a list of tuples """
+
+        questions = u'?,'*len(values[0])
+        if keys is None:
+            sql = u'INSERT INTO %s VALUES (%s);'%(table_name, questions[:-1])
+        else:
+            sql = u'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(keys), questions[:-1])
+        self.executemany(sql, values, commit=commit)
+    
+    def update(self, table_name, where=None, commit=True, **argv):
+        sql = u'UPDATE %s SET '%table_name
+        arg = []
+        for k,v in argv.iteritems():
+            if type(v) is tuple:
+                sql += u'%s %s ?,' % (k, v[0])
+                arg.append(v[1])
+            else:
+                sql += u'%s=?,' % k
+                arg.append(v)
+        sql = sql[:-1]
+        if where != None:
+            sql += u' where %s'%where
+        self.execute_write(sql, arg, commit)
+        
+    def delete(self, table_name, commit=True, **argv):
+        sql = u'DELETE FROM %s WHERE '%table_name
+        arg = []
+        for k,v in argv.iteritems():
+            if type(v) is tuple:
+                sql += u'%s %s ? AND ' % (k, v[0])
+                arg.append(v[1])
+            else:
+                sql += u'%s=? AND ' % k
+                arg.append(v)
+        sql = sql[:-5]
+        self.execute_write(sql, argv.values(), commit)
+    
+    # -------- Read Operations --------
+    def size(self, table_name):
+        num_rec_sql = u"SELECT count(*) FROM %s;"%table_name
+        result = self.fetchone(num_rec_sql)
+        return result
+
+    def fetchone(self, sql, args=None):
+        # returns NULL: if the result is null 
+        # return None: if it doesn't found any match results
+        find = self.execute_read(sql, args)
+        if not find:
+            return NULL
+        else:
+            find = list(find)
+            if len(find) > 0:
+                find = find[0]
+            else:
+                return NULL
+        if len(find)>1:
+            return find
+        else:
+            return find[0]
+           
+    def fetchall(self, sql, args=None, retry=0):
+        res = self.execute_read(sql, args)
+        if res != None:
+            find = list(res)
+            return find
+        else:
+            return []   # should it return None?
+    
+    def getOne(self, table_name, value_name, where=None, conj='and', **kw):
+        """ value_name could be a string, a tuple of strings, or '*' 
+        """
+
+        if isinstance(value_name, tuple):
+            value_names = u",".join(value_name)
+        elif isinstance(value_name, list):
+            value_names = u",".join(value_name)
+        else:
+            value_names = value_name
+            
+        if isinstance(table_name, tuple):
+            table_names = u",".join(table_name)
+        elif isinstance(table_name, list):
+            table_names = u",".join(table_name)
+        else:
+            table_names = table_name
+            
+        sql = u'select %s from %s'%(value_names, table_names)
+
+        if where or kw:
+            sql += u' where '
+        if where:
+            sql += where
+            if kw:
+                sql += u' %s '%conj
+        if kw:
+            arg = []
+            for k,v in kw.iteritems():
+                if type(v) is tuple:
+                    operator = v[0]
+                    arg.append(v[1])
+                else:
+                    operator = "="
+                    arg.append(v)
+                sql += u' %s %s ? ' % (k, operator)
+                sql += conj
+            sql = sql[:-len(conj)]
+        else:
+            arg = None
+
+        # print >> sys.stderr, 'SQL: %s %s' % (sql, arg)
+        return self.fetchone(sql,arg)
+    
+    def getAll(self, table_name, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw):
+        """ value_name could be a string, or a tuple of strings 
+            order by is represented as order_by
+            group by is represented as group_by
+        """
+        if isinstance(value_name, tuple):
+            value_names = u",".join(value_name)
+        elif isinstance(value_name, list):
+            value_names = u",".join(value_name)
+        else:
+            value_names = value_name
+        
+        if isinstance(table_name, tuple):
+            table_names = u",".join(table_name)
+        elif isinstance(table_name, list):
+            table_names = u",".join(table_name)
+        else:
+            table_names = table_name
+            
+        sql = u'select %s from %s'%(value_names, table_names)
+        
+        if where or kw:
+            sql += u' where '
+        if where:
+            sql += where
+            if kw:
+                sql += u' %s '%conj
+        if kw:
+            arg = []
+            for k,v in kw.iteritems():
+                if type(v) is tuple:
+                    operator = v[0]
+                    arg.append(v[1])
+                else:
+                    operator = "="
+                    arg.append(v)
+
+                sql += u' %s %s ?' % (k, operator)
+                sql += conj
+            sql = sql[:-len(conj)]
+        else:
+            arg = None
+        
+        if group_by != None:
+            sql += u' group by ' + group_by
+        if having != None:
+            sql += u' having ' + having
+        if order_by != None:
+            sql += u' order by ' + order_by    # you should add desc after order_by to reversely sort, i.e, 'last_seen desc' as order_by
+        if limit != None:
+            sql += u' limit %d'%limit
+        if offset != None:
+            sql += u' offset %d'%offset
+
+        try:
+            return self.fetchall(sql, arg) or []
+        except Exception, msg:
+            print >> sys.stderr, "sqldb: Wrong getAll sql statement:", sql
+            raise Exception, msg
+    
+    # ----- Tribler DB operations ----
+
+    #------------- useful functions for multiple handlers ----------
+    def insertPeer(self, permid, update=True, commit=True, **argv):
+        """ Insert a peer. permid is the binary permid.
+        If the peer is already in db and update is True, update the peer.
+        """
+        peer_id = self.getPeerID(permid)
+        peer_existed = False
+        if 'name' in argv:
+            argv['name'] = dunno2unicode(argv['name'])
+        if peer_id != None:
+            peer_existed = True
+            if update:
+                where=u'peer_id=%d'%peer_id
+                self.update('Peer', where, commit=commit, **argv)
+        else:
+            self.insert('Peer', permid=bin2str(permid), commit=commit, **argv)
+        return peer_existed
+                
+    def deletePeer(self, permid=None, peer_id=None, force=True, commit=True):
+        if peer_id is None:
+            peer_id = self.getPeerID(permid)
+            
+        deleted = False
+        if peer_id != None:
+            if force:
+                self.delete('Peer', peer_id=peer_id, commit=commit)
+            else:
+                self.delete('Peer', peer_id=peer_id, friend=0, superpeer=0, commit=commit)
+            deleted = not self.hasPeer(permid, check_db=True)
+            if deleted and permid in self.permid_id:
+                self.permid_id.pop(permid)
+
+        return deleted
+                
+    def getPeerID(self, permid):
+        assert isinstance(permid, str), permid
+        # permid must be binary
+        if permid in self.permid_id:
+            return self.permid_id[permid]
+        
+        sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?"
+        peer_id = self.fetchone(sql_get_peer_id, (bin2str(permid),))
+        if peer_id != None:
+            self.permid_id[permid] = peer_id
+        
+        return peer_id
+    
+    def hasPeer(self, permid, check_db=False):
+        if not check_db:
+            return bool(self.getPeerID(permid))
+        else:
+            permid_str = bin2str(permid)
+            sql_get_peer_id = "SELECT peer_id FROM Peer WHERE permid==?"
+            peer_id = self.fetchone(sql_get_peer_id, (permid_str,))
+            if peer_id is None:
+                return False
+            else:
+                return True
+    
+    def insertInfohash(self, infohash, check_dup=False, commit=True):
+        """ Insert an infohash. infohash is binary """
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if infohash in self.infohash_id:
+            if check_dup:
+                print >> sys.stderr, 'sqldb: infohash to insert already exists', `infohash`
+            return
+        
+        infohash_str = bin2str(infohash)
+        sql_insert_torrent = "INSERT INTO Torrent (infohash) VALUES (?)"
+        self.execute_write(sql_insert_torrent, (infohash_str,), commit)
+    
+    def deleteInfohash(self, infohash=None, torrent_id=None, commit=True):
+        assert infohash is None or isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert infohash is None or len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if torrent_id is None:
+            torrent_id = self.getTorrentID(infohash)
+            
+        if torrent_id != None:
+            self.delete('Torrent', torrent_id=torrent_id, commit=commit)
+            if infohash in self.infohash_id:
+                self.infohash_id.pop(infohash)
+    
+    def getTorrentID(self, infohash):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if infohash in self.infohash_id:
+            return self.infohash_id[infohash]
+        
+        sql_get_torrent_id = "SELECT torrent_id FROM Torrent WHERE infohash==?"
+        tid = self.fetchone(sql_get_torrent_id, (bin2str(infohash),))
+        if tid != None:
+            self.infohash_id[infohash] = tid
+        return tid
+        
+    def getInfohash(self, torrent_id):
+        sql_get_infohash = "SELECT infohash FROM Torrent WHERE torrent_id==?"
+        arg = (torrent_id,)
+        ret = self.fetchone(sql_get_infohash, arg)
+        ret = str2bin(ret)
+        return ret
+    
+    def getTorrentStatusTable(self):
+        if self.status_table is None:
+            st = self.getAll('TorrentStatus', ('lower(name)', 'status_id'))
+            self.status_table = dict(st)
+        return self.status_table
+    
+    def getTorrentCategoryTable(self):
+        # The key is in lower case
+        if self.category_table is None:
+            ct = self.getAll('Category', ('lower(name)', 'category_id'))
+            self.category_table = dict(ct)
+        return self.category_table
+    
+    def getTorrentSourceTable(self):
+        # Don't use lower case because some URLs are case sensitive
+        if self.src_table is None:
+            st = self.getAll('TorrentSource', ('name', 'source_id'))
+            self.src_table = dict(st)
+        return self.src_table
+
+    def test(self):
+        res1 = self.getAll('Category', '*')
+        res2 = len(self.getAll('Peer', 'name', 'name is not NULL'))
+        return (res1, res2)
+
+
+class SQLiteCacheDBV5(SQLiteCacheDBBase):
+    def updateDB(self, fromver, tover):
+
+        # bring database up to version 2, if necessary        
+        if fromver < 2:
+            sql = """
+
+-- Patch for BuddyCast 4
+
+ALTER TABLE MyPreference ADD COLUMN click_position INTEGER DEFAULT -1;
+ALTER TABLE MyPreference ADD COLUMN reranking_strategy INTEGER DEFAULT -1;
+ALTER TABLE Preference ADD COLUMN click_position INTEGER DEFAULT -1;
+ALTER TABLE Preference ADD COLUMN reranking_strategy INTEGER DEFAULT -1;
+CREATE TABLE ClicklogSearch (
+                     peer_id INTEGER DEFAULT 0,
+                     torrent_id INTEGER DEFAULT 0,
+                     term_id INTEGER DEFAULT 0,
+                     term_order INTEGER DEFAULT 0
+                     );
+CREATE INDEX idx_search_term ON ClicklogSearch (term_id);
+CREATE INDEX idx_search_torrent ON ClicklogSearch (torrent_id);
+
+
+CREATE TABLE ClicklogTerm (
+                    term_id INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT 0,
+                    term VARCHAR(255) NOT NULL,
+                    times_seen INTEGER DEFAULT 0 NOT NULL
+                    );
+CREATE INDEX idx_terms_term ON ClicklogTerm(term);  
+    
+"""       
+            
+            self.execute_write(sql, commit=False)
+
+        
+        if fromver < 3:
+            sql = """
+-- Patch for Local Peer Discovery
+            
+ALTER TABLE Peer ADD COLUMN is_local integer DEFAULT 0;
+"""       
+            self.execute_write(sql, commit=False)
+
+        if fromver < 4:
+            sql="""
+-- V2: Patch for VoteCast
+
+DROP TABLE IF EXISTS ModerationCast;
+DROP INDEX IF EXISTS moderationcast_idx;
+
+DROP TABLE IF EXISTS Moderators;
+DROP INDEX IF EXISTS moderators_idx;
+
+DROP TABLE IF EXISTS VoteCast;
+DROP INDEX IF EXISTS votecast_idx;
+
+CREATE TABLE VoteCast (
+mod_id text,
+voter_id text,
+vote integer,
+time_stamp integer
+);
+
+CREATE INDEX mod_id_idx
+on VoteCast 
+(mod_id);
+
+CREATE INDEX voter_id_idx
+on VoteCast 
+(voter_id);
+
+CREATE UNIQUE INDEX votecast_idx
+ON VoteCast
+(mod_id, voter_id);
+            
+--- patch for BuddyCast 5 : Creation of Popularity table and relevant stuff
+
+CREATE TABLE Popularity (
+                         torrent_id INTEGER,
+                         peer_id INTEGER,
+                         msg_receive_time NUMERIC,
+                         size_calc_age NUMERIC,
+                         num_seeders INTEGER DEFAULT 0,
+                         num_leechers INTEGER DEFAULT 0,
+                         num_of_sources INTEGER DEFAULT 0
+                     );
+
+CREATE INDEX Message_receive_time_idx 
+  ON Popularity 
+   (msg_receive_time);
+
+CREATE INDEX Size_calc_age_idx 
+  ON Popularity 
+   (size_calc_age);
+
+CREATE INDEX Number_of_seeders_idx 
+  ON Popularity 
+   (num_seeders);
+
+CREATE INDEX Number_of_leechers_idx 
+  ON Popularity 
+   (num_leechers);
+
+CREATE UNIQUE INDEX Popularity_idx
+  ON Popularity
+   (torrent_id, peer_id, msg_receive_time);
+
+-- v4: Patch for ChannelCast, Search
+
+CREATE TABLE ChannelCast (
+publisher_id text,
+publisher_name text,
+infohash text,
+torrenthash text,
+torrentname text,
+time_stamp integer,
+signature text
+);
+
+CREATE INDEX pub_id_idx
+on ChannelCast
+(publisher_id);
+
+CREATE INDEX pub_name_idx
+on ChannelCast
+(publisher_name);
+
+CREATE INDEX infohash_ch_idx
+on ChannelCast
+(infohash);
+
+----------------------------------------
+
+CREATE TABLE InvertedIndex (
+word               text NOT NULL,
+torrent_id         integer
+);
+
+CREATE INDEX word_idx
+on InvertedIndex
+(word);
+
+CREATE UNIQUE INDEX invertedindex_idx
+on InvertedIndex
+(word,torrent_id);
+
+----------------------------------------
+
+-- Set all similarity to zero because we are using a new similarity
+-- function and the old values no longer correspond to the new ones
+UPDATE Peer SET similarity = 0;
+UPDATE Torrent SET relevance = 0;
+
+"""
+            self.execute_write(sql, commit=False)
+        if fromver < 5:
+            sql=\
+"""
+--------------------------------------
+-- Creating Subtitles (future RichMetadata) DB
+----------------------------------
+CREATE TABLE Metadata (
+  metadata_id integer PRIMARY KEY ASC AUTOINCREMENT NOT NULL,
+  publisher_id text NOT NULL,
+  infohash text NOT NULL,
+  description text,
+  timestamp integer NOT NULL,
+  signature text NOT NULL,
+  UNIQUE (publisher_id, infohash),
+  FOREIGN KEY (publisher_id, infohash) 
+    REFERENCES ChannelCast(publisher_id, infohash) 
+    ON DELETE CASCADE -- the fk constraint is not enforced by sqlite
+);
+
+CREATE INDEX infohash_md_idx
+on Metadata(infohash);
+
+CREATE INDEX pub_md_idx
+on Metadata(publisher_id);
+
+
+CREATE TABLE Subtitles (
+  metadata_id_fk integer,
+  subtitle_lang text NOT NULL,
+  subtitle_location text,
+  checksum text NOT NULL,
+  UNIQUE (metadata_id_fk,subtitle_lang),
+  FOREIGN KEY (metadata_id_fk) 
+    REFERENCES Metadata(metadata_id) 
+    ON DELETE CASCADE, -- the fk constraint is not enforced by sqlite
+  
+  -- ISO639-2 uses 3 characters for lang codes
+  CONSTRAINT lang_code_length 
+    CHECK ( length(subtitle_lang) == 3 ) 
+);
+
+
+CREATE INDEX metadata_sub_idx
+on Subtitles(metadata_id_fk);
+
+-- Stores the subtitles that peers have as an integer bitmask
+ CREATE TABLE SubtitlesHave (
+    metadata_id_fk integer,
+    peer_id text NOT NULL,
+    have_mask integer NOT NULL,
+    received_ts integer NOT NULL, --timestamp indicating when the mask was received
+    UNIQUE (metadata_id_fk, peer_id),
+    FOREIGN KEY (metadata_id_fk)
+      REFERENCES Metadata(metadata_id)
+      ON DELETE CASCADE, -- the fk constraint is not enforced by sqlite
+
+    -- 32 bit unsigned integer
+    CONSTRAINT have_mask_length
+      CHECK (have_mask >= 0 AND have_mask < 4294967296)
+);
+
+CREATE INDEX subtitles_have_idx
+on SubtitlesHave(metadata_id_fk);
+
+-- this index can boost queries
+-- ordered by timestamp on the SubtitlesHave DB
+CREATE INDEX subtitles_have_ts
+on SubtitlesHave(received_ts);
+
+"""
+            self.execute_write(sql, commit=False)
+            
+        # updating version stepwise so if this works, we store it
+        # regardless of later, potentially failing updates
+        self.writeDBVersion(CURRENT_MAIN_DB_VERSION, commit=False)
+        self.commit()
+        
+        # now the start the process of parsing the torrents to insert into 
+        # InvertedIndex table. 
+        if TEST_SQLITECACHEDB_UPGRADE:
+            state_dir = "."
+        else:
+            from BaseLib.Core.Session import Session
+            session = Session.get_instance()
+            state_dir = session.get_state_dir()
+        tmpfilename = os.path.join(state_dir,"upgradingdb.txt")
+        if fromver < 4 or os.path.exists(tmpfilename):
+            def upgradeTorrents():
+                # fetch some un-inserted torrents to put into the InvertedIndex
+                sql = """
+                SELECT torrent_id, name, torrent_file_name
+                FROM Torrent
+                WHERE torrent_id NOT IN (SELECT DISTINCT torrent_id FROM InvertedIndex)
+                AND torrent_file_name IS NOT NULL
+                LIMIT 20"""
+                records = self.fetchall(sql)
+                
+                if len(records) == 0:
+                    # upgradation is complete and hence delete the temp file
+                    os.remove(tmpfilename) 
+                    if DEBUG: print >> sys.stderr, "DB Upgradation: temp-file deleted", tmpfilename
+                    return 
+                    
+                for torrent_id, name, torrent_file_name in records:
+                    try:
+                        abs_filename = os.path.join(session.get_torrent_collecting_dir(), torrent_file_name)
+                        if not os.path.exists(abs_filename):
+                            raise RuntimeError(".torrent file not found. Use fallback.")
+                        torrentdef = TorrentDef.load(abs_filename)
+                        torrent_name = torrentdef.get_name_as_unicode()
+                        keywords = Set(split_into_keywords(torrent_name))
+                        for filename in torrentdef.get_files_as_unicode():
+                            keywords.update(split_into_keywords(filename))
+
+                    except:
+                        # failure... most likely the .torrent file
+                        # is invalid
+
+                        # use keywords from the torrent name
+                        # stored in the database
+                        torrent_name = dunno2unicode(name)
+                        keywords = Set(split_into_keywords(torrent_name))
+
+                    # store the keywords in the InvertedIndex
+                    # table in the database
+                    if len(keywords) > 0:
+                        values = [(keyword, torrent_id) for keyword in keywords]
+                        self.executemany(u"INSERT OR REPLACE INTO InvertedIndex VALUES(?, ?)", values, commit=False)
+                        if DEBUG:
+                            print >> sys.stderr, "DB Upgradation: Extending the InvertedIndex table with", len(values), "new keywords for", torrent_name
+
+                # now commit, after parsing the batch of torrents
+                self.commit()
+                
+                # upgradation not yet complete; comeback after 5 sec
+                tqueue.add_task(upgradeTorrents, 5) 
+
+
+            # Create an empty file to mark the process of upgradation.
+            # In case this process is terminated before completion of upgradation,
+            # this file remains even though fromver >= 4 and hence indicating that 
+            # rest of the torrents need to be inserted into the InvertedIndex!
+            
+            # ensure the temp-file is created, if it is not already
+            try:
+                open(tmpfilename, "w")
+                if DEBUG: print >> sys.stderr, "DB Upgradation: temp-file successfully created"
+            except:
+                if DEBUG: print >> sys.stderr, "DB Upgradation: failed to create temp-file"
+            
+            if DEBUG: print >> sys.stderr, "Upgrading DB .. inserting into InvertedIndex"
+            from BaseLib.Utilities.TimedTaskQueue import TimedTaskQueue
+            from sets import Set
+            from BaseLib.Core.Search.SearchManager import split_into_keywords
+            from BaseLib.Core.TorrentDef import TorrentDef
+
+            # start the upgradation after 10 seconds
+            tqueue = TimedTaskQueue("UpgradeDB")
+            tqueue.add_task(upgradeTorrents, 10)
+
+class SQLiteCacheDB(SQLiteCacheDBV5):
+    __single = None    # used for multithreaded singletons pattern
+
+    @classmethod
+    def getInstance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+                    #print >>sys.stderr,"SqliteCacheDB: getInstance: created is",cls,cls.__single
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self, *args, **kargs):
+        # always use getInstance() to create this object
+        
+        # ARNOCOMMENT: why isn't the lock used on this read?!
+        
+        if self.__single != None:
+            raise RuntimeError, "SQLiteCacheDB is singleton"
+        SQLiteCacheDBBase.__init__(self, *args, **kargs)
+    
+if __name__ == '__main__':
+    configure_dir = sys.argv[1]
+    config = {}
+    config['state_dir'] = configure_dir
+    config['install_dir'] = u'.'
+    config['peer_icon_path'] = u'.'
+    sqlite_test = init(config)
+    sqlite_test.test()
+
diff --git a/instrumentation/next-share/BaseLib/Core/ClosedSwarm/ClosedSwarm.py b/instrumentation/next-share/BaseLib/Core/ClosedSwarm/ClosedSwarm.py
new file mode 100644 (file)
index 0000000..614e954
--- /dev/null
@@ -0,0 +1,605 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+import time
+import os.path
+
+from base64 import encodestring, decodestring
+from M2Crypto.EC import pub_key_from_der
+
+from BaseLib.Core.Overlay import permid
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+
+# Constants to be put into BaseLib.Core.BitTornado.BT1.MessageID.py
+# Also update all the protocol stuff there (flag the extension)
+
+
+# Parent exception - all exceptions thrown by the ClosedSwarm class
+# are children of this class
+class ClosedSwarmException(Exception):
+    """
+    Base class for closed swarm related exceptions
+    """
+    pass
+
+# Specialized exceptions
+class MissingKeyException(ClosedSwarmException):
+    """
+    No key was available for the operation
+    """
+    pass
+
+class MissingCertificateException(ClosedSwarmException):
+    """
+    A required certificate was not found
+    """
+    pass
+
+class BadMessageException(ClosedSwarmException):
+    """
+    A bad closed swarm message was received
+    """
+    pass
+
+class WrongSwarmException(ClosedSwarmException):
+    """
+    The wrong swarm was specified
+    """
+    pass
+
+class InvalidSignatureException(ClosedSwarmException):
+    """
+    Invalid signature
+    """
+    pass
+
+class InvalidPOAException(ClosedSwarmException):
+    """
+    The Proof-of-Access was invalid
+    """
+    pass
+
+class POAExpiredException(ClosedSwarmException):
+    """
+    The Proof-of-Access has timed out
+    """
+    pass
+    
+# Some helper functions
+
+def pubkey_from_der(der_key):
+    """
+    Return a public key object from a DER encoded key
+    """
+    return pub_key_from_der(decodestring(der_key))
+
+def generate_cs_keypair(keypair_filename=None, pubkey_filename=None):
+    """
+    Generate a keypair suitable for a Closed Swarm
+    
+    Saves to the given files if specified, returns keypair, pubkey
+    """
+    keypair = permid.generate_keypair()
+    if keypair_filename:
+        permid.save_keypair(keypair, keypair_filename)
+
+    pubkey = encodestring(str(keypair.pub().get_der())).replace("\n","")
+    if pubkey_filename:
+        permid.save_pub_key(keypair, pubkey_filename)
+    
+    return keypair, pubkey
+
+def read_cs_keypair(keypair_filename):
+    """
+    Read and return a CS keypair from a file
+    """
+    return permid.read_keypair(keypair_filename)
+
+def save_cs_keypair(keypair, keypairfilename):
+    """
+    Save a CS keypair to a file
+    """
+    return keypair.save_key(keypairfilename, None)    
+
+def read_cs_pubkey(pubkey_filename):
+    """
+    Read and return the public key of a torrent from a file
+    """
+    return open(pubkey_filename,"r").read()
+
+def write_poa_to_file(filename, poa):
+    """
+    Dump the POA to the given file in serialized form
+    """
+    target = open(filename,"wb")
+    target.write(poa.serialize())
+    return filename
+
+def read_poa_from_file(filename):
+    """
+    Read and return a POA object from a file. Throws exception if
+    the file was not found or the POA could not be deserialized
+    """
+    if not os.path.exists(filename):
+        raise Exception("File '%s' not found"%filename)
+    
+    data = open(filename,"rb").read()
+    return POA.deserialize(data)
+
+# Some POA helpers
+def trivial_get_poa(path, perm_id, swarm_id):
+    """
+    Look for a POA file for the given permid,swarm_id
+    """
+    filename = encodestring(perm_id).replace("\n","")
+    filename = filename.replace("/","")
+    filename = filename.replace("\\","")
+
+    t_id  = encodestring(swarm_id).replace("\n","")
+    t_id = t_id.replace("/","")
+    t_id = t_id.replace("/","")
+
+    poa_path = os.path.join(path, filename + "." + t_id + ".poa")
+
+    return read_poa_from_file(poa_path)
+        
+def trivial_save_poa(path, perm_id, swarm_id, poa):
+    """
+    Save POA
+    """
+    filename = encodestring(perm_id).replace("\n","")
+    filename = filename.replace("/","")
+    filename = filename.replace("\\","")
+
+    t_id  = encodestring(swarm_id).replace("\n","")
+    t_id = t_id.replace("/","")
+    t_id = t_id.replace("/","")
+
+    # if the path does not exist, try to create it
+    if not os.path.exists(path):
+        os.makedirs(path)
+
+    poa_path = os.path.join(path, filename + "." + t_id + ".poa")
+    
+    return write_poa_to_file(poa_path, poa)
+
+
+class POA:
+    """
+    Proof of access wrapper
+    """
+    
+    def __init__(self, torrent_id, torrent_pub_key, node_pub_key,
+                 signature="", expire_time=0):
+        """
+        Create a new POA for this torrent
+        """
+        self.torrent_id = torrent_id
+        self.torrent_pub_key = torrent_pub_key
+        self.node_pub_key = node_pub_key
+        self.signature = signature
+        self.expire_time = expire_time
+
+    def serialize_to_list(self):
+        """
+        Serialize to a list of entries
+        """
+        return [self.torrent_id,
+                self.torrent_pub_key,
+                self.node_pub_key,
+                self.expire_time,
+                self.signature]
+        
+    def deserialize_from_list(lst):
+        """
+        Deserialize a POA from a list of elements.
+
+        The POA object should be verified after deserializing
+        """
+        if not lst or len(lst) < 5:
+            raise InvalidPOAException("Bad list")
+
+        torrent_id = lst[0]
+        torrent_pub_key = lst[1]
+        node_pub_key = lst[2]
+        expire_time = lst[3]
+        signature = lst[4]
+        return POA(torrent_id, torrent_pub_key,
+                   node_pub_key, signature, expire_time)
+
+    deserialize_from_list = staticmethod(deserialize_from_list)
+    
+    def serialize(self):
+        """
+        Return a bencoded, serialized POA
+        """
+        lst = [self.torrent_id,
+               self.torrent_pub_key,
+               self.node_pub_key,
+               self.expire_time,
+               self.signature]
+        return bencode(lst)
+
+    def deserialize(encoded):
+        """
+        De-serialize a serialized POA. Returns a POA object, or raises
+        InvalidPOAException if the POA was bad
+        """
+        if not encoded:
+            raise InvalidPOAException("Cannot deserialize nothing")
+        
+        try:
+            lst = bdecode(encoded)
+            if len(lst) < 5:
+                raise InvalidPOAException("Too few entries (got %d, "
+                                          "expected 5)"%len(lst))
+            return POA(lst[0], lst[1],
+                       lst[2], expire_time=lst[3], signature=lst[4])
+        except Exception, e:
+            raise InvalidPOAException("De-serialization failed (%s)"%e)
+
+
+    deserialize = staticmethod(deserialize)
+
+        
+    def get_torrent_pub_key(self):
+        """
+        Return the base64 encoded torrent pub key for this POA
+        """
+        return encodestring(self.torrent_pub_key).replace("\n","")
+        
+    def verify(self):
+        """
+        Throws an exception if the POA does not hold or has expired
+        """
+
+        if self.expire_time and \
+               self.expire_time < time.mktime(time.gmtime()):
+            raise POAExpiredException("Expired")
+        
+        try:
+            lst = [self.torrent_id, 
+                   self.torrent_pub_key, 
+                   self.node_pub_key]
+            b_list = bencode(lst)
+            digest = permid.sha(b_list).digest()
+            pub = pub_key_from_der(self.torrent_pub_key)
+            if not pub.verify_dsa_asn1(digest, self.signature):
+                raise InvalidPOAException("Proof of access verification failed")
+        except Exception, e:
+            raise InvalidPOAException("Bad POA: %s"%e)
+        
+    def sign(self, torrent_key_pair):
+        """
+        Sign the POA
+        """
+        
+        lst = [self.torrent_id, 
+                self.torrent_pub_key, 
+                self.node_pub_key]
+        b_list = bencode(lst)
+        digest = permid.sha(b_list).digest()
+
+        self.signature = torrent_key_pair.sign_dsa_asn1(digest)
+
+    def save(self, filename):
+        target = open(filename,"wb")
+        target.write(self.serialize())
+        target.close()
+        return filename
+
+    def load(filename):
+        """
+        Read and return a POA object from a file. Throws exception if
+        the file was not found or the POA could not be deserialized
+        """
+        if not os.path.exists(filename):
+            raise Exception("File '%s' not found"%filename)
+    
+        data = open(filename,"rb").read()
+        return POA.deserialize(data)
+
+    load = staticmethod(load)
+
+        
+        
+def create_poa(torrent_id, torrent_keypair, pub_permid, expire_time=0):
+    """
+    Create and return a certificate 'proof of access' to the given node.
+    Notice that this function reuire the full keypair of the torrent
+    """
+    poa = POA(torrent_id, 
+              str(torrent_keypair.pub().get_der()),
+              pub_permid,
+              expire_time=expire_time)
+    poa.sign(torrent_keypair)
+    return poa
+
+
+
+class ClosedSwarm:
+    """
+    This is a class that can authenticate two peers to participate in
+    a closed swarm.
+    The certificate given must be the "proof of access" to the torrent
+    in question
+
+    How to use:
+    For the initiator:
+    cs = ClosedSwarm(my_keypair, torrent_id, torrent_pubkey, poa)
+
+    node a (initiator)                | node b
+    msg1 = cs_a.a_create_challenge()  |  
+    send(msg1)                        | msg1 = recv()
+                                      | msg2 = cs.b_create_challenge(msg1)
+    msg2 = recv()                     | send(msg2)
+    msg3 = cs.a_provide_poa_message(msg2)|
+    send(msg3)                        | msg3 = recv()
+                                      | msg4 = b_provide_poa_message(msg3)
+    msg4 = recv()                     | send(msg4)
+    cs.a_check_poa_message(msg4)      |
+    if cs.is_remote_node_authorized():| if cs.is_remote_node_authorized():
+    ...
+
+    The protocol is allowed to stop after msg3, which means that node b
+    will not be authorized. This will happen if node b is seeding, or is
+    not authorized.
+    
+    If something bad happens (bad messages, invalid signatures or keys etc),
+    exceptions are thrown.
+    
+    All exceptions thrown are children of ClosedSwarmException
+        
+    """
+    IDLE = 0
+    EXPECTING_RETURN_CHALLENGE = 1   # A sent challenge to B, expects challenge
+    EXPECTING_INITIATOR_RESPONSE = 2 # B sent challenge to A, expects POA
+    SEND_INITIATOR_RESPONSE = 3      # A sent POA to B, expects POA
+    COMPLETED = 4                    # Nothing more expected
+    
+    def __init__(self, my_keypair, 
+                 torrent_id, torrent_pubkeys,
+                 poa): 
+
+        if poa:
+            if not poa.__class__ == POA:
+                raise Exception("POA is not of class POA, but of class %s"%poa.__class__)
+            
+        assert torrent_pubkeys.__class__ == list
+
+        self.state = self.IDLE
+
+        self.my_keypair = my_keypair
+        self.pub_permid = str(my_keypair.pub().get_der())
+
+        self.torrent_id = torrent_id
+        self.torrent_pubkeys = torrent_pubkeys
+        self.poa = poa
+        self.remote_node_authorized = False
+
+        self.nonce_a = None
+        self.nonce_b = None
+        self.remote_nonce = None # Shortcut
+        self.my_nonce = None # Shortcut
+        
+        if self.poa: # Allow nodes to support CS but not have a POA (e.g. if they are seeding)
+            if self.poa.get_torrent_pub_key() not in self.torrent_pubkeys:
+                import sys
+                print >> sys.stderr, "Bad POA for this torrent (wrong torrent key!)"
+                self.poa = None
+                
+    def is_remote_node_authorized(self):
+        """
+        Returns True iff the remote node is authorized to receive traffic
+        """
+        return self.remote_node_authorized
+
+    def set_poa(self, poa):
+        """
+        Set the POA of the closed swarm run
+        """
+        assert poa.__class__ == POA
+        self.poa = poa
+        
+    def give_up(self):
+        """
+        Give up the protocol - set to completed
+        """
+        self.state = self.COMPLETED
+        
+    def is_incomplete(self):
+        """
+        Not completed the CS exchange yet
+        """
+        return self.state != self.COMPLETED
+
+    def _create_challenge_msg(self, msg_id):
+        """
+        Create the first message (from both nodes)
+        """
+        [self.my_nonce, my_nonce_bencoded] = permid.generate_challenge()
+        # Serialize this message
+        return [msg_id,
+                self.torrent_id,
+                self.my_nonce]
+        
+
+    def a_create_challenge(self): 
+        """
+        1st message
+        Initiate a challenge, returns a list
+        """
+        assert self.state == self.IDLE
+        self.state = self.EXPECTING_RETURN_CHALLENGE
+        return self._create_challenge_msg(CS_CHALLENGE_A)
+
+    def b_create_challenge(self, lst):
+        """
+        2nd message
+        Return a message that can be sent in reply to the given challenge.
+        Throws exception on bad message or if this cannot be done
+        BadMessageException - Message was bad
+        MissingKeyException - Don't have the necessary keys
+        MissingCertificateException - Don't have a certificate
+        """
+        assert self.state == self.IDLE
+        self.state = self.EXPECTING_INITIATOR_RESPONSE
+
+        # List should be [INITIAL_CHALLENGE, torrent_id, nonce]
+        if len(lst) != 3:
+            raise BadMessageException("Bad number of elements in message, expected 2, got %d"%len(lst))
+        if lst[0] != CS_CHALLENGE_A:
+            raise BadMessageException("Expected initial challenge, got something else")
+        [torrent_id, nonce_a] = lst[1:]
+
+        # Now we generate the response
+        if self.torrent_id != torrent_id:
+            raise WrongSwarmException("Expected %s, got %s"%(self.torrent_id,
+                                                             torrent_id))
+
+        # Save the remote nonce too
+        self.remote_nonce = nonce_a
+        
+        # We got a correct challenge for the correct torrent, make our message
+        return self._create_challenge_msg(CS_CHALLENGE_B)
+
+    def _create_poa_message(self, msg_id, nonce_a, nonce_b):
+        """
+        Create the POA exchange message (messages 3 and 4).
+        """
+        assert msg_id
+        assert nonce_a
+        assert nonce_b
+        assert self.poa
+        
+        # Provide the certificate 
+        if not self.poa:
+            raise MissingCertificateException("Missing certificate")
+
+        msg = [msg_id] + self.poa.serialize_to_list()
+
+        # Add signature
+        lst = [nonce_a,
+               nonce_b,
+               self.poa.serialize()]
+        
+        b_list = bencode(lst)
+        digest = permid.sha(b_list).digest()
+        sig = self.my_keypair.sign_dsa_asn1(digest)
+        msg.append(sig)
+
+        return msg
+
+    def _validate_poa_message(self, lst, nonce_a, nonce_b):
+        """
+        Validate an incoming POA message - throw exception if bad.
+        Returns the POA if successful
+        """
+        assert nonce_a
+        assert nonce_b
+        
+        if len(lst) != 7:
+            raise BadMessageException("Require 7 elements, got %d"%len(lst))
+        
+        poa = POA.deserialize_from_list(lst[1:-1])
+        sig = lst[-1]
+        assert poa.node_pub_key
+        
+        if poa.torrent_id != self.torrent_id:
+            raise WrongSwarmException("Wrong swarm")
+
+        if poa.get_torrent_pub_key() not in self.torrent_pubkeys:
+            raise InvalidPOAException("Bad POA for this torrent")
+
+        # Check the signature
+        lst = [nonce_a,
+               nonce_b,
+               poa.serialize()]
+        import sys
+        b_list = bencode(lst)
+        digest = permid.sha(b_list).digest()
+        try:
+            pub = pub_key_from_der(poa.node_pub_key)
+        except:
+            print >> sys.stderr, "The node_pub_key is no good"
+            print >> sys.stderr, poa.node_pub_key
+            raise Exception("Node's public key is no good...")
+            
+        if not pub.verify_dsa_asn1(digest, sig):
+            raise InvalidSignatureException("Freshness test failed")
+            
+        # Passed the freshness test, now check the certificate
+        poa.verify() # Throws exception if bad
+        return poa
+
+    
+    def a_provide_poa_message(self, lst):
+        """
+        3rd message
+        Got a reply to an initial challenge.  Returns
+        the challenge sent by the remote node
+        """
+        assert self.state == self.EXPECTING_RETURN_CHALLENGE
+        #self.state = self.SEND_INITIATOR_RESPONSE
+        self.state = self.COMPLETED # Not sure we get a POA from the remote node
+        if len(lst) != 3:
+            raise BadMessageException("Require 3 elements, got %d"%\
+                                     len(lst))
+        if lst[0] != CS_CHALLENGE_B:
+            raise BadMessageException("Expected RETURN_CHALLENGE, got '%s'"%lst[0])
+        if lst[1] != self.torrent_id:
+            raise WrongSwarmException("POA for wrong swarm")
+
+        self.remote_nonce = lst[2]
+        msg = self._create_poa_message(CS_POA_EXCHANGE_A, self.my_nonce, self.remote_nonce)
+        return msg
+            
+
+    def b_provide_poa_message(self, lst, i_am_seeding=False):
+        """
+        4rd message
+        Got a reply to an initial challenge.  Returns
+        the challenge sent by the remote node or None if i_am_seeding is true
+        """
+        assert self.state == self.EXPECTING_INITIATOR_RESPONSE
+        self.state = self.COMPLETED
+
+        if lst[0] != CS_POA_EXCHANGE_A:
+            import sys
+            print >> sys.stderr, "Not CS_POA_EXCHANGE_A"
+            raise BadMessageException("Expected POA EXCHANGE")
+
+        try:
+            self._validate_poa_message(lst, self.remote_nonce,
+                                       self.my_nonce)
+            self.remote_node_authorized = True
+        except Exception, e:
+            self.remote_node_authorized = False
+            import sys
+            print >> sys.stderr, "POA could not be validated:",e
+            #raise e // The remote node failed, but we can still make it!
+
+        if i_am_seeding:
+            return None
+        
+        msg = self._create_poa_message(CS_POA_EXCHANGE_B, self.remote_nonce, self.my_nonce)
+        return msg
+
+    def a_check_poa_message(self, lst):
+        """
+        Verify receiption of 4th message
+        """
+        assert self.state == self.SEND_INITIATOR_RESPONSE
+        self.state = self.COMPLETED
+
+        if lst[0] != CS_POA_EXCHANGE_B:
+            raise BadMessageException("Expected POA EXCHANGE")
+
+        self._validate_poa_message(lst, self.my_nonce, self.remote_nonce)
+
+        # Remote node authorized!
+        self.remote_node_authorized = True
+
diff --git a/instrumentation/next-share/BaseLib/Core/ClosedSwarm/PaymentIntegration.py b/instrumentation/next-share/BaseLib/Core/ClosedSwarm/PaymentIntegration.py
new file mode 100644 (file)
index 0000000..89132a0
--- /dev/null
@@ -0,0 +1,267 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+#
+# Arno TODO: move this to ../Tools/, wx not allowed in ../Core
+#
+
+import wx
+import sys
+import urllib
+import re
+
+import xmlrpclib # Not needed for proper payment system integration
+from base64 import encodestring,decodestring
+
+from BaseLib.Core.ClosedSwarm import ClosedSwarm
+
+
+class PaymentSystem:
+
+    def __init__(self, perm_id, swarm_id, mobile_number=None):
+        self.mobile_number = mobile_number
+        self.perm_id = perm_id
+        self.swarm_id = swarm_id
+        self.request_sent = None
+        
+    def set_phone_number(self, mobile_number):
+        self.mobile_number = mobile_number
+
+
+    def request_code(self):
+        if self.request_sent == self.mobile_number:
+            import sys
+            print >> sys.stderr, "Refusing to send new request to same number"
+            
+        data = urllib.urlencode({"mobile": self.mobile_number, "request": "code", "swarm_id":self.swarm_id, "nosend": "off", "debug": "off"})
+        
+        f = urllib.urlopen("http://daccer.for-the.biz/smps.php", data)
+        s = f.read()
+        f.close()
+        p = re.compile(r"error=(\S+)", re.MULTILINE)
+        m = p.search(s)
+        error = m.group(1)
+        self.request_sent = self.mobile_number
+        
+        # TODO: Check for errors and throw exceptions
+        return error
+
+    def verify_code(self, code):
+        import sys
+        print >> sys.stderr, {"request": "validate", "code": code, "mobile": self.mobile_number, "perm_id": self.perm_id, "swarm_id": self.swarm_id}
+        
+        data = urllib.urlencode({"request": "validate", "code": code, "mobile": self.mobile_number, "perm_id": self.perm_id, "swarm_id": self.swarm_id})
+        f = urllib.urlopen("http://daccer.for-the.biz/smps.php", data)
+        s = f.read()
+        f.close()
+        p = re.compile(r"code=(\w+)", re.MULTILINE)
+        m = p.search(s)
+        if m != None:
+            validation = m.group(1)
+        else:
+            validation = None
+        p = re.compile(r"poa=(.*)..error=", re.DOTALL)
+        m = p.search(s)
+        if m != None:
+            poa = m.group(1)
+        else:
+            poa = None
+        p = re.compile(r"error=(\S+)", re.MULTILINE)
+        m = p.search(s)
+        if m != None:
+            error = m.group(1)
+        else:
+            error = ",no_error_return"
+
+        print >>sys.stderr,"Verify Code returned ",s,"with error:",error
+
+        # TODO: Check for errors and throw exceptions
+        return [validation, poa, error]
+
+    
+
+
+class PaymentDialog(wx.Dialog):
+    """
+    The dialog to interact with the payment service
+    TODO: Do some design here... :)
+    """
+    def __init__(self, swarm_title, payment_url, swarm_id, node_id):
+        kwds = {"style":wx.DEFAULT_DIALOG_STYLE}
+        wx.Dialog.__init__(self, None)
+
+        self.payment_url = payment_url
+        self.swarm_id = swarm_id
+        self.node_id = node_id
+        self.phone_number = None
+        self.poa = None
+
+        self.label_0 = wx.StaticText(self, -1, "\nRegister your phone number (with country code) to get \nhigh speed access to the resource '" + swarm_title + "'\n")
+        
+        self.label_1 = wx.StaticText(self, -1, "Phone number")
+        self.txt_phone_number = wx.TextCtrl(self, -1, "")
+        self.btn_request_code = wx.Button(self, -1, "Request code")
+
+        self.label_2 = wx.StaticText(self, -1, "Code")
+        self.txt_code = wx.TextCtrl(self, -1, "")
+        self.btn_send_code = wx.Button(self, -1, "Send code")
+        
+        self.status = wx.StatusBar(self, -1)
+        
+        self.__set_properties()
+        self.__do_layout()
+
+        self.Bind(wx.EVT_BUTTON, self._request_code, self.btn_request_code)
+        self.Bind(wx.EVT_BUTTON, self._request_token, self.btn_send_code)
+
+        self.status.SetStatusText("Please enter your phone number")
+
+        self._payment_service = PaymentSystem(node_id, swarm_id)
+
+    def __set_properties(self):
+        self.SetTitle("NextShare payment test")
+
+        self.txt_code.Enable(False)
+        self.btn_send_code.Enable(False)
+        
+    def __do_layout(self):
+
+        # begin wxGlade: MyDialog.__do_layout
+        sizer_1 = wx.BoxSizer(wx.VERTICAL)
+        grid_sizer_1 = wx.GridSizer(2, 3, 0, 0)
+        sizer_1.Add(self.label_0, 0, 0, 0)
+        grid_sizer_1.Add(self.label_1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
+        grid_sizer_1.Add(self.txt_phone_number, 0, wx.EXPAND, 0)
+        grid_sizer_1.Add(self.btn_request_code, 0, wx.EXPAND, 0)
+        grid_sizer_1.Add(self.label_2, 0, wx.ALIGN_CENTER_VERTICAL, 0)
+        grid_sizer_1.Add(self.txt_code, 0, wx.EXPAND, 0)
+        grid_sizer_1.Add(self.btn_send_code, 0, wx.EXPAND, 0)
+        sizer_1.Add(grid_sizer_1, 1, wx.EXPAND, 0)
+        sizer_1.Add(self.status, 0, 0, 0)
+        self.SetSizer(sizer_1)
+        sizer_1.Fit(self)
+        self.Layout()
+
+
+
+    def _request_code(self, event):
+
+        num = self.txt_phone_number.Value
+        if not num:
+            # TODO: Error handling
+            return
+        try:
+            self._payment_service.set_phone_number(num.strip())
+            self.status.SetStatusText("Requesting code...")
+            error = self._payment_service.request_code()
+
+            if error != "0":
+                if error.count("mobile_number_wrong") > 0:
+                    txt = "Bad mobile number"
+                elif error.count("swarm_id_unavailable") > 0:
+                    txt = "Unknown resource"
+                else:
+                    txt = "Unknown error: " + error
+                    
+                self.status.SetStatusText("Got errors:" + txt)
+                return
+            
+        except Exception,e:
+            print >>sys.stderr,"Error contacting payment system:",e
+            # TODO: Handle errors properly
+            return
+
+        # TODO: Figure out why the payment system doesn't want the swarm ID
+        # TODO: to figure out the price/availability etc.
+        
+        #s = xmlrpclib.ServerProxy(self.payment_url)
+        #s.initialize_payment(num, self.swarm_id)
+
+        # Enable code field and button
+        self.phone_number = num
+        self.txt_code.Enable(True)
+        self.btn_send_code.Enable(True)
+        
+        self.status.SetStatusText("Please enter the code")
+        
+    def _request_token(self, event):
+
+        code = self.txt_code.Value
+        if not code:
+            # TODO: Error handling
+            return
+
+        [validation, poa, error] = self._payment_service.verify_code(code)
+        
+        if error != "0":
+            if error.count("no_such_code"):
+                txt = "Bad code"
+            elif error.count("code_to_old"):
+                txt = "Code has expired"
+            elif error.count("code_already_consumed"):
+                txt = "This code has already been used"
+            elif error.count("mobile_number_different"):
+                txt = "INTERNAL: phone number has changed..."
+            elif error.count("invalid_request"):
+                txt = "The request vas invalid"
+            else:
+                txt = "Unknown error: " + error
+            self.status.SetStatusText("Got error: " + txt)
+            return
+        
+        self.poa = poa
+        self.EndModal(0)
+
+    def get_poa(self):
+        return self.poa
+        
+
+def wx_get_poa(url, swarm_id, perm_id, root_window=None, swarm_title="Unknown content"):
+    """
+    Pop up a WX interface for the payment system
+    """
+    
+    d = PaymentDialog(swarm_title,
+                      url,
+                      swarm_id,
+                      perm_id)
+    
+    retval = d.ShowModal()
+    try:
+        poa_b64 = d.get_poa()
+        poa_serialized = decodestring(poa_b64)
+        poa = ClosedSwarm.POA.deserialize(poa_serialized)
+        poa.verify()
+        return poa
+    except:
+        return None
+
+    
+
+# Test
+if __name__ == "__main__":
+
+    app = wx.PySimpleApp()            
+    import threading
+    t = threading.Thread(target=app.MainLoop)
+    t.start()
+
+    
+
+    d = PaymentDialog("Test file",
+                      "http://127.0.0.1:9090",
+                      "1234",
+                      "myid")
+
+    retval = d.ShowModal()
+    print "Modal returned"
+    poa_b64 = d.get_poa()
+    if poa_b64:
+        poa_serialized = decodestring(poa_b64)
+        from BaseLib.Core.ClosedSwarm import ClosedSwarm
+        poa = ClosedSwarm.POA.deserialize(poa_serialized)
+        poa.verify()
+        print "Got valid poa"
+        
+    else:
+        print "No POA for us..."
+    
diff --git a/instrumentation/next-share/BaseLib/Core/ClosedSwarm/Tools.py b/instrumentation/next-share/BaseLib/Core/ClosedSwarm/Tools.py
new file mode 100644 (file)
index 0000000..377028d
--- /dev/null
@@ -0,0 +1,166 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+#
+# Arno TODO: move this to ../Tools/, wx not allowed in ../Core
+#
+
+import re
+import urllib
+import urllib2
+
+from ClosedSwarm import read_poa_from_file
+
+def wx_get_poa(root_window=None):
+    """
+    Pop up a graphical file selector
+    """
+    import wx
+    import sys
+    print >>sys.stderr, "Using GUI poa browser"
+    fd = wx.FileDialog(root_window, "Select Proof of Access", wildcard="*.poa", style=wx.OPEN)
+    if fd.ShowModal() == wx.ID_OK:
+        return read_poa_from_file(fd.GetPath())
+    raise Exception("User aborted")
+    
+
+def wx_get_http_poa(url, swarm_id, perm_id, root_window=None):
+    """
+    Pop up a graphical authorization thingy if required by the
+    web server 
+    """
+
+    def auth_handler(realm):
+        """
+        As for username,password
+        """
+        import wx
+        import sys
+        print >>sys.stderr, "Using GUI poa browser"
+        
+        pw = wx.Dialog(root_window, -1, "Authenticate")
+
+        vert = wx.BoxSizer(wx.VERTICAL)
+        label_1 = wx.StaticText(pw, -1, "Authentication for %s reqired"%realm)
+        vert.Add(label_1, 0, wx.EXPAND | wx.LEFT, 10)
+    
+        horiz = wx.BoxSizer(wx.HORIZONTAL)
+        vert.Add(horiz, 0, 0, 0)
+        label_2 = wx.StaticText(pw, -1, "Username")
+        label_2.SetMinSize((70,15))
+        horiz.Add(label_2, 0, wx.LEFT, 0)
+        pw.username = wx.TextCtrl(pw, -1, "")
+        horiz.Add(pw.username, 0, wx.LEFT, 0)
+
+        horiz = wx.BoxSizer(wx.HORIZONTAL)
+        vert.Add(horiz, 0, 0, 0)
+        pw.pwd = wx.TextCtrl(pw, -1, "", style=wx.TE_PASSWORD)
+        label_3  = wx.StaticText(pw, -1, "Password")
+        label_3.SetMinSize((70,15))
+        horiz.Add(label_3, 0, wx.LEFT, 0)
+        horiz.Add(pw.pwd, 0, wx.LEFT, 0)
+
+        horiz = wx.BoxSizer(wx.HORIZONTAL)
+        vert.Add(horiz, 0, wx.LEFT, 0)
+
+        horiz.Add(wx.Button(pw, wx.ID_CANCEL), 0,0,0)
+        ok = wx.Button(pw, wx.ID_OK)
+        ok.SetDefault()
+        horiz.Add(ok, 0,0,0)
+
+        pw.username.SetFocus()
+        order = (pw.username, pw.pwd, ok)
+        for i in xrange(len(order) - 1):
+            order[i+1].MoveAfterInTabOrder(order[i])
+
+        pw.SetSizer(vert)
+        vert.Fit(pw)
+        pw.Layout()
+
+        try:
+            if pw.ShowModal() == wx.ID_OK:
+                return (pw.username.GetValue(), pw.pwd.GetValue())
+        finally:
+            pw.Destroy()
+
+        raise Exception("User aborted")
+
+    w = web_get_poa(url, swarm_id, perm_id, auth_handler)    
+    return w.get_poa()
+    
+
+class web_get_poa:
+    """
+    Class that will call the auth_handler if authentication
+    is required
+    """
+    
+    def __init__(self, url, swarm_id, perm_id, auth_handler=None):
+
+        self.url = url
+        self.swarm_id = swarm_id
+        self.perm_id = perm_id
+        self.auth_handler = auth_handler
+
+        
+    def get_poa(self, credentials=None):
+        """
+        Try to fetch a POA
+        """
+
+        if credentials and len(credentials) == 4:
+            (protocol, realm, name, password) = credentials
+            password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
+            password_mgr.add_password(realm, self.url, name, password)
+            if protocol.lower() == "digest":
+                handler = urllib2.HTTPDigestAuthHandler(password_mgr)
+            elif protocol.lower() == "basic":
+                handler = urllib2.HTTPBasicAuthHandler(password_mgr)
+            else:
+                raise Exception("Unknown authorization protocol: '%s'"%protocol)
+                
+            opener = urllib2.build_opener(handler)
+            urllib2.install_opener(opener)
+
+        values = {'swarm_id':self.swarm_id,
+                  'perm_id':self.perm_id}
+
+        try:
+            data = urllib.urlencode(values)
+            req = urllib2.Request(self.url, data)
+            response = urllib2.urlopen(req)
+        except urllib2.HTTPError,e:
+            # Need authorization?
+            if e.code == 401 and not credentials:
+                try:
+                    type, realm = e.headers["WWW-Authenticate"].split()
+                    m = re.match('realm="(.*)"', realm)
+                    if m:
+                        realm = m.groups()[0]
+                    else:
+                        raise Exception("Bad www-authenticate reponse")
+                except Exception,e:
+                    raise Exception("Authentication failed: %s"%e)
+                        
+                if self.auth_handler:
+                    name, passwd = self.auth_handler(realm)
+                    if name and passwd:
+                        credentials = (type, realm, name, passwd)
+                        return self.get_poa(credentials)
+                    
+            raise Exception("Could not get POA: %s"%e)
+        except urllib2.URLError,e:
+            raise Exception("Could not get POA: %s"%e.reason)
+        
+
+        # Connected ok, now get the POA
+        try:
+            poa_str = response.read()
+            from BaseLib.Core.ClosedSwarm import ClosedSwarm
+            return ClosedSwarm.POA.deserialize(poa_str)
+        except Exception,e:
+            raise Exception("Could not fetch POA: %s"%e)
+    
+        raise Exception("Could not get POA: Unknown reason")
+
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/ClosedSwarm/__init__.py b/instrumentation/next-share/BaseLib/Core/ClosedSwarm/__init__.py
new file mode 100644 (file)
index 0000000..94c71b3
--- /dev/null
@@ -0,0 +1,3 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/MagnetLink.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/MagnetLink.py
new file mode 100644 (file)
index 0000000..a111994
--- /dev/null
@@ -0,0 +1,208 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+"""
+The MagnetLink module handles the retrieval of the 'info' part of a
+.torrent file given a magnet link.
+
+Ideally we should use the regular BitTorrent connection classes to
+make connection to peers, but all these classes assume that the
+.torrent information is already available.
+
+Hence, this module will make BitTorrent connection for the sole
+purpose of retrieving the .torrent info part.  After retrieval has
+finished all connections are closed and a regular download will begin.
+"""
+import sys
+from binascii import unhexlify
+from urlparse import urlsplit
+from traceback import print_exc
+from threading import Lock
+
+try:
+    # parse_sql requires python 2.6 or higher
+    from urlparse import parse_qsl
+except ImportError:
+    from urllib import unquote_plus
+    def parse_qsl(query):
+        """
+        'foo=bar&moo=milk' --> [('foo', 'bar'), ('moo', 'milk')]
+        """
+        query = unquote_plus(query)
+        for part in query.split("&"):
+            if "=" in part:
+                yield part.split("=", 1)
+
+from BaseLib.Core.DecentralizedTracking.kadtracker.identifier import Id, IdError
+from BaseLib.Core.DecentralizedTracking.MagnetLink.MiniBitTorrent import MiniSwarm, MiniTracker
+import BaseLib.Core.DecentralizedTracking.mainlineDHT as mainlineDHT
+
+DEBUG = False
+
+class Singleton:
+    _singleton_lock = Lock()
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        if hasattr(cls, "_singleton_instance"):
+            return getattr(cls, "_singleton_instance")
+
+        else:
+            cls._singleton_lock.acquire()
+            try:
+                if not hasattr(cls, "_singleton_instance"):
+                    setattr(cls, "_singleton_instance", cls(*args, **kargs))
+                return getattr(cls, "_singleton_instance")
+                
+            finally:
+                cls._singleton_lock.release()
+
+class MagnetHandler(Singleton):
+    def __init__(self, raw_server):
+        self._raw_server = raw_server
+        self._magnets = []
+
+    def get_raw_server(self):
+        return self._raw_server
+
+    def add_magnet(self, magnet_link):
+        self._magnets.append(magnet_link)
+
+    def remove_magnet(self, magnet_link):
+        self._magnets.remove(magnet_link)
+
+    def get_magnets(self):
+        return self._magnets
+
+class MagnetLink:
+    def __init__(self, url, callback):
+        """
+        If the URL conforms to a magnet link, the .torrent info is
+        downloaded and returned to CALLBACK.
+        """
+        # _callback is called when the metadata is retrieved.
+        self._callback = callback
+
+        dn, xt, tr = self._parse_url(url)
+
+        # _name is the unicode name suggested for the swarm.
+        assert dn is None or isinstance(dn, unicode), "DN has invalid type: %s" % type(dn)
+        self._name = dn
+
+        # _info_hash is the 20 byte binary info hash that identifies
+        # the swarm.
+        assert isinstance(xt, str), "XT has invalid type: %s" % type(xt)
+        assert len(xt) == 20, "XT has invalid length: %d" % len(xt)
+        self._info_hash = xt
+
+        # _tracker is an optional tracker address.
+        self._tracker = tr
+
+        # _swarm is a MiniBitTorrent.MiniSwarm instance that connects
+        # to peers to retrieve the metadata.
+        magnet_handler = MagnetHandler.get_instance()
+        magnet_handler.add_magnet(self)
+        self._swarm = MiniSwarm(self._info_hash, magnet_handler.get_raw_server(), self.metainfo_retrieved)
+
+    def get_infohash(self):
+        return self._info_hash
+
+    def get_name(self):
+        return self._name
+
+    def retrieve(self):
+        """
+        Start retrieving the metainfo
+        
+        Returns True when attempting to obtain the metainfo, in this
+        case CALLBACK will always be called.  Otherwise False is
+        returned, in this case CALLBACK will not be called.
+        """
+        if self._info_hash:
+            # todo: catch the result from get_peers and call its stop
+            # method.  note that this object does not yet contain a
+            # stop method...
+            dht = mainlineDHT.dht
+            dht.get_peers(Id(self._info_hash), self._swarm.add_potential_peers)
+
+            try:
+                if self._tracker:
+                    MiniTracker(self._swarm, self._tracker)
+            except:
+                print_exc()
+
+            return True
+        else:
+            return False
+
+    def metainfo_retrieved(self, metainfo, peers=[]):
+        """
+        Called when info part for metadata is retrieved.  If we have
+        more metadata, we will add it at this point.
+
+        PEERS optionally contains a list of valid BitTorrent peers,
+        found during metadata download, to help bootstrap the
+        download.
+        """
+        assert isinstance(metainfo, dict)
+        assert isinstance(peers, list)
+        if __debug__:
+            for address in peers:
+                assert isinstance(address[0], str)
+                assert isinstance(address[1], int)
+
+        # create metadata
+        metadata = {"info":metainfo}
+        if self._tracker:
+            metadata["announce"] = self._tracker
+        else:
+            metadata["nodes"] = []
+        if peers:
+            metadata["initial peers"] = peers
+
+        self._callback(metadata)
+        self.close()
+
+    def close(self):
+        magnet_handler = MagnetHandler.get_instance()
+        magnet_handler.remove_magnet(self)
+        
+        # close all MiniBitTorrent activities
+        self._swarm.close()
+
+    @staticmethod
+    def _parse_url(url):
+        # url must be a magnet link
+        dn = None
+        xt = None
+        tr = None
+
+        if DEBUG: print >> sys.stderr, "Magnet._parse_url()", url
+
+        schema, netloc, path, query, fragment = urlsplit(url)
+        if schema == "magnet":
+            # magnet url's do not conform to regular url syntax (they
+            # do not have a netloc.)  This causes path to contain the
+            # query part.
+            if "?" in path:
+                pre, post = path.split("?", 1)
+                if query:
+                    query = "&".join((post, query))
+                else:
+                    query = post
+
+            for key, value in parse_qsl(query):
+                if key == "dn":
+                    # convert to unicode
+                    dn = value.decode()
+
+                elif key == "xt" and value.startswith("urn:btih:"):
+                    xt = unhexlify(value[9:49])
+
+                elif key == "tr":
+                    tr = value
+
+            if DEBUG: print >> sys.stderr, "Magnet._parse_url() NAME:", dn
+            if DEBUG: print >> sys.stderr, "Magnet._parse_url() HASH:", xt
+            if DEBUG: print >> sys.stderr, "Magnet._parse_url() TRAC:", tr
+
+        return (dn, xt, tr)
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/MiniBitTorrent.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/MiniBitTorrent.py
new file mode 100644 (file)
index 0000000..38420de
--- /dev/null
@@ -0,0 +1,560 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+"""
+The MiniBitTorrent module sets up connections to BitTorrent peers with
+the sole purpose of obtaining the .Torrent metadata.
+
+The peers are obtained though either the tracker, PEX, or the DHT
+provided in the MagnetLink.  All connections will be closed once the
+metadata is obtained.
+"""
+
+from cStringIO import StringIO
+from random import getrandbits 
+from threading import Lock, Event, Thread
+from time import time
+from traceback import print_exc
+from urllib import urlopen, urlencode
+import sys
+
+from BaseLib.Core.BitTornado.BT1.MessageID import protocol_name, EXTEND
+from BaseLib.Core.BitTornado.BT1.convert import toint, tobinary
+from BaseLib.Core.BitTornado.RawServer import RawServer
+from BaseLib.Core.BitTornado.SocketHandler import SocketHandler
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Utilities.Crypto import sha
+
+UT_EXTEND_HANDSHAKE = chr(0)
+UT_PEX_ID = chr(1)
+UT_METADATA_ID = chr(2)
+METADATA_PIECE_SIZE = 16 * 1024
+MAX_CONNECTIONS = 30
+MAX_TIME_INACTIVE = 30
+
+DEBUG = True
+
+# todo: extend testcases
+# todo: add tracker support
+# todo: stop the dht
+
+class Connection:
+    """
+    A single BitTorrent connection.
+    """
+    def __init__(self, swarm, raw_server, address):
+        self._swarm = swarm
+        self._closed = False
+        self._in_buffer = StringIO()
+        self._next_len = 1
+        self._next_func = self.read_header_len
+        self._address = address
+        self._last_activity = time()
+
+        self._her_ut_metadata_id = chr(0)
+
+        # outstanding requests for pieces
+        self._metadata_requests = []
+
+        if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent: New connection"
+        self._socket = raw_server.start_connection(address, self)
+        self.write_handshake()
+
+    def write_handshake(self):
+        # if DEBUG: print >> sys.stderr, "MiniBitTorrent.write_handshake()"
+        self._socket.write("".join((chr(len(protocol_name)), protocol_name,
+                                    "\x00\x00\x00\x00\x00\x30\x00\x00",
+                                    self._swarm.get_info_hash(),
+                                    self._swarm.get_peer_id())))
+
+    def write_extend_message(self, metadata_message_id, payload):
+        assert isinstance(payload, dict), "PAYLOAD has invalid type: %s" % type(payload)
+        assert isinstance(metadata_message_id, str), "METADATA_MESSAGE_ID has invalid type: %s" % type(metadata_message_id)
+        assert len(metadata_message_id) == 1, "METADATA_MESSAGE_ID has invalid length: %d" % len(metadata_message_id)
+        if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.write_extend_message()"
+        payload = bencode(payload)
+        self._socket.write("".join((tobinary(2 + len(payload)), # msg len
+                                    EXTEND,                     # msg id
+                                    metadata_message_id,        # extend msg id
+                                    payload)))                  # bencoded msg
+
+    def read_header_len(self, s):
+        if ord(s) != len(protocol_name):
+            return None
+        return len(protocol_name), self.read_header
+
+    def read_header(self, s):
+        if s != protocol_name:
+            return None
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        if ord(s[5]) & 16:
+            # extend module is enabled
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.read_reserved() extend module is supported"
+            self.write_extend_message(UT_EXTEND_HANDSHAKE, {"m":{"ut_pex":ord(UT_PEX_ID), "ut_metadata":ord(UT_METADATA_ID), "metadata_size":self._swarm.get_metadata_size()}})
+            return 20, self.read_download_id
+        else:
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.read_reserved() extend module not supported"
+            return None
+
+    def read_download_id(self, s):
+        if s != self._swarm.get_info_hash():
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.read_download_id() invalid info hash"
+            return None
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        self._swarm.add_good_peer(self._address)
+        return 4, self.read_len
+
+    def read_len(self, s):
+        l = toint(s)
+        # if l > self.Encoder.max_len:
+        #     return None
+        # if DEBUG: print >> sys.stderr, "waiting for", l, "bytes"
+        return l, self.read_message
+
+    def read_message(self, s):
+        if s != '':
+            if not self.got_message(s):
+                return None
+        return 4, self.read_len
+
+    def got_message(self, data):
+        if data[0] == EXTEND and len(data) > 2:
+
+            # we only care about EXTEND messages.  So all other
+            # messages will NOT reset the _last_activity timestamp.
+            self._last_activity = time()
+
+            return self.got_extend_message(data)
+
+        # ignore all other messages, but stay connected
+        return True
+
+    def _request_some_metadata_piece(self):
+        if not self._closed:
+            piece = self._swarm.reserve_metadata_piece()
+            if isinstance(piece, int):
+                if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Requesting metadata piece", piece
+                self._metadata_requests.append(piece)
+                self.write_extend_message(self._her_ut_metadata_id, {"msg_type":0, "piece":piece})
+
+            else:
+                self._swarm._raw_server.add_task(self._request_some_metadata_piece, 1)
+
+    def got_extend_message(self, data):
+        try:
+            message = bdecode(data[2:], sloppy=True)
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message()", len(message), "bytes as payload"
+            # if DEBUG: print >> sys.stderr, message
+        except:
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Received invalid UT_METADATA message"
+            return False
+
+        if data[1] == UT_EXTEND_HANDSHAKE: # extend handshake
+            if "metadata_size" in message and isinstance(message["metadata_size"], int) and message["metadata_size"] > 0:
+                self._swarm.add_metadata_size_opinion(message["metadata_size"])
+
+            if "m" in message and isinstance(message["m"], dict) and "ut_metadata" in message["m"] and isinstance(message["m"]["ut_metadata"], int):
+                self._her_ut_metadata_id = chr(message["m"]["ut_metadata"])
+                self._request_some_metadata_piece()
+
+            else:
+                # other peer does not support ut_metadata.  Try to get
+                # some PEX peers, otherwise close connection
+                if not ("m" in message and isinstance(message["m"], dict) and "ut_pex" in message["m"]):
+                    return False
+
+        elif data[1] == UT_PEX_ID: # our ut_pex id
+            if "added" in message and isinstance(message["added"], str) and len(message["added"]) % 6 == 0:
+                added = message["added"]
+                addresses = []
+                for offset in xrange(0, len(added), 6):
+                    address = ("%s.%s.%s.%s" % (ord(added[offset]), ord(added[offset+1]), ord(added[offset+2]), ord(added[offset+3])), ord(added[offset+4]) << 8 | ord(added[offset+5]))
+                    addresses.append(address)
+                if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message()", len(addresses), "peers from PEX"
+                self._swarm.add_potential_peers(addresses)
+
+                # when this peer does not support ut_metadata we can
+                # close the connection after receiving a PEX message
+                if self._her_ut_metadata_id == chr(0):
+                    return False
+
+        elif data[1] == UT_METADATA_ID: # our ut_metadata id
+            if "msg_type" in message:
+                if message["msg_type"] == 0 and "piece" in message and isinstance(message["piece"], int):
+                    # She send us a request.  However, since
+                    # MiniBitTorrent disconnects after obtaining the
+                    # metadata, we can not provide any pieces
+                    # whatsoever.
+                    # So... send reject
+                    if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Rejecting request for piece", message["piece"]
+                    self.write_extend_message(self._her_ut_metadata_id, {"msg_type":2, "piece":message["piece"]})
+
+                elif message["msg_type"] == 1 and \
+                         "piece" in message and (isinstance(message["piece"], int) or isinstance(message["piece"], long)) and message["piece"] in self._metadata_requests and \
+                         "total_size" in message and (isinstance(message["total_size"], int) or isinstance(message["total_size"], long)) and message["total_size"] <= METADATA_PIECE_SIZE:
+                    # Received a metadata piece
+                    if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Received metadata piece", message["piece"]
+                    self._metadata_requests.remove(message["piece"])
+                    self._swarm.add_metadata_piece(message["piece"], data[-message["total_size"]:])
+                    self._request_some_metadata_piece()
+
+                elif message["msg_type"] == 2 and "piece" in message and isinstance(message["piece"], int) and message["piece"] in self._metadata_requests:
+                    # Received a reject
+                    if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Our request for", message["piece"], "was rejected"
+                    self._metadata_requests.remove(message["piece"])
+                    self._swarm.unreserve_metadata_piece(message["piece"])
+
+                    # Register a task to run in 'some time' to start
+                    # requesting again (reject is usually caused by
+                    # flood protection)
+                    self._swarm._raw_server.add_task(self._request_some_metadata_piece, 5)
+
+                else:
+                    if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Received unknown message"
+                    return False
+
+            else:
+                if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Received invalid extend message (no msg_type)"
+                return False
+
+        else:
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.got_extend_message() Received unknown extend message"
+            return False
+                    
+        return True
+
+    def data_came_in(self, socket, data):
+        while not self._closed:
+            left = self._next_len - self._in_buffer.tell()
+            # if DEBUG: print >> sys.stderr, self._in_buffer.tell() + len(data), "/", self._next_len
+            if left > len(data):
+                self._in_buffer.write(data)
+                return
+            self._in_buffer.write(data[:left])
+            data = data[left:]
+            message = self._in_buffer.getvalue()
+            self._in_buffer.reset()
+            self._in_buffer.truncate()
+            next_ = self._next_func(message)
+            if next_ is None:
+                self.close()
+                return
+            self._next_len, self._next_func = next_
+
+    def connection_lost(self, socket):
+        if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.connection_lost()"
+        self._closed = True
+        self._swarm.connection_lost(self)
+
+    def connection_flushed(self, socket):
+        pass
+
+    def check_for_timeout(self, deadline):
+        """
+        Close when no activity since DEADLINE
+        """
+        if self._last_activity < deadline:
+            if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.check_for_timeout() Timeout!"
+            self.close()
+
+    def close(self):
+        if DEBUG: print >> sys.stderr, self._address, "MiniBitTorrent.close()"
+        self._closed = True
+        if self._socket.connected:
+            self._socket.close()
+        else:
+            self._swarm.connection_lost(self)
+    
+class MiniSwarm:
+    """
+    A MiniSwarm instance maintains an overview of what is going on in
+    a single BitTorrent swarm.
+    """
+    def __init__(self, info_hash, raw_server, callback):
+        # _info_hash is the 20 byte binary info hash that identifies
+        # the swarm.
+        assert isinstance(info_hash, str), str
+        assert len(info_hash) == 20
+        self._info_hash = info_hash
+
+        # _raw_server provides threading support.  All socket events
+        # will run in this thread.
+        self._raw_server = raw_server
+
+        # _callback is called with the raw metadata string when it is
+        # retrieved
+        self._callback = callback
+
+        # _peer_id contains 20 semi random bytes
+        self._peer_id = "-ST0100-" + "".join([chr(getrandbits(8)) for _ in range(12)])
+        assert isinstance(self._peer_id, str)
+        assert len(self._peer_id) == 20, len(self._peer_id)
+
+        # _lock protects several member variables that are accessed
+        # from our RawServer and other threads.
+        self._lock = Lock()
+
+        # _connections contains all open socket connections.  This
+        # variable is protected by _lock.
+        self._connections = []
+
+        # _metadata_blocks contains the blocks that form the metadata
+        # that we want to download.  This variable is protected by
+        # _lock.
+        self._metadata_blocks = [] # [requested, piece, data]
+
+        # _metadata_size contains the size in bytes of the metadata.
+        # This value is based on the opinions of other peers which is
+        # accumulated in _metadata_size_opinions.
+        self._metadata_size = 0
+        self._metadata_size_opinions = {} # size:number-of-votes
+
+        # _potential_peers contains a dictionary of address::timestamp
+        # pairs where potential BitTorrent peers can be found
+        self._potential_peers = {}
+
+        # _good_peers contains a dictionary of address:timestamp pairs
+        # where valid BitTorrent peers can be found
+        self._good_peers = {}
+
+        # _closed indicates that we no longer need this swarm instance
+        self._closed = False
+
+        # scan for old connections
+        self._raw_server.add_task(self._timeout_connections, 5)
+
+    def add_good_peer(self, address):
+        assert isinstance(address, tuple)
+        assert len(address) == 2
+        assert isinstance(address[0], str)
+        assert isinstance(address[1], int)
+        self._good_peers[address] = time()
+
+    def get_info_hash(self):
+        return self._info_hash
+
+    def get_peer_id(self):
+        return self._peer_id
+
+    def get_metadata_size(self):
+        return self._metadata_size
+
+    def add_metadata_size_opinion(self, metadata_size):
+        """
+        A peer told us the metadata size.  Assume it is correct,
+        however, keep track of potential differences.
+        """
+        if metadata_size in self._metadata_size_opinions:
+            self._metadata_size_opinions[metadata_size] += 1
+        else:
+            self._metadata_size_opinions[metadata_size] = 1
+
+        # what do we believe the metadata size is
+        if len(self._metadata_size_opinions) == 1:
+            metadata_size = self._metadata_size_opinions.keys()[0]
+            if DEBUG: print >> sys.stderr, "MiniBitTorrent.add_metadata_size_opinion() Metadata size is:", metadata_size, "(%d unanimous vote)" % sum(self._metadata_size_opinions.values())
+
+        else:
+            options = [(weight, metadata_size) for metadata_size, weight in self._metadata_size_opinions.iteritems()]
+            options.sort(reverse=True)
+            if DEBUG: print >> sys.stderr, "MiniBitTorrent.add_metadata_size_opinion() Choosing metadata size from multiple options:", options
+            metadata_size = options[0][1]
+
+        if self._metadata_size != metadata_size:
+            self._metadata_size = metadata_size
+
+            pieces = metadata_size / METADATA_PIECE_SIZE
+            if metadata_size % METADATA_PIECE_SIZE != 0:
+                pieces += 1
+
+            # we were led to believe that there are more blocks than
+            # there actually are, remove some
+            if len(self._metadata_blocks) > pieces:
+                if DEBUG: print >> sys.stderr, "MiniBitTorrent.add_metadata_size_opinion() removing some blocks..."
+                self._metadata_blocks = [block_tuple for block_tuple in self._metadata_blocks if block_tuple[1] < pieces]
+
+            # we were led to believe that there are fewer blocks than
+            # there actually are, add some
+            elif len(self._metadata_blocks) < pieces:
+                blocks = [[0, piece, None] for piece in xrange(len(self._metadata_blocks), pieces)]
+                if DEBUG: print >> sys.stderr, "MiniBitTorrent.add_metadata_size_opinion() adding", len(blocks), "blocks..."
+                self._metadata_blocks.extend(blocks)
+
+    def reserve_metadata_piece(self):
+        """
+        A metadata piece request can be made.  Find the most usefull
+        piece to request.
+        """
+        for block_tuple in self._metadata_blocks:
+            if block_tuple[2] is None:
+                block_tuple[0] += 1
+                self._metadata_blocks.sort()
+                return block_tuple[1]
+        return None
+
+    def unreserve_metadata_piece(self, piece):
+        """
+        A metadata piece request is refused or cancelled.  Update the
+        priorities.
+        """
+        for index, block_tuple in zip(xrange(len(self._metadata_blocks)), self._metadata_blocks):
+            if block_tuple[1] == piece:
+                block_tuple[0] = max(0, block_tuple[0] - 1)
+                self._metadata_blocks.sort()
+                break
+
+    def add_metadata_piece(self, piece, data):
+        """
+        A metadata piece was received
+        """
+        if not self._closed:
+
+            for index, block_tuple in zip(xrange(len(self._metadata_blocks)), self._metadata_blocks):
+                if block_tuple[1] == piece:
+                    block_tuple[0] = max(0, block_tuple[0] - 1)
+                    block_tuple[2] = data
+                    self._metadata_blocks.sort()
+                    break
+
+            # def p(s):
+            #     if s is None: return 0
+            #     return len(s)
+            # if DEBUG: print >> sys.stderr, "Progress:", [p(t[2]) for t in self._metadata_blocks]
+
+            # see if we are done
+            for requested, piece, data in self._metadata_blocks:
+                if data is None:
+                    break
+
+            else:
+                metadata = "".join([data for requested, piece, data in self._metadata_blocks])
+                info_hash = sha(metadata).digest()
+
+                if info_hash == self._info_hash:
+                    if DEBUG: print >> sys.stderr, "MiniBitTorrent.add_metadata_piece() Done!"
+
+                    # get nice list with recent BitTorrent peers, sorted
+                    # by most recently connected
+                    peers = [(timestamp, address) for address, timestamp in self._good_peers.iteritems()]
+                    peers.sort(reverse=True)
+                    peers = [address for timestamp, address in peers]
+
+                    self._callback(bdecode(metadata), peers)
+
+                else:
+                    # todo: hash failed... now what?
+                    # quick solution... remove everything and try again
+                    self._metadata_blocks = [(requested, piece, None) for requested, piece, data in self._metadata_blocks]
+    def add_potential_peers(self, addresses):
+        if not self._closed:
+            for address in addresses:
+                if not address in self._potential_peers:
+                    self._potential_peers[address] = 0
+
+            if len(self._connections) < MAX_CONNECTIONS:
+                self._create_connections()
+
+    def _create_connections(self):
+        # order by last connection attempt
+        addresses = [(timestamp, address) for address, timestamp in self._potential_peers.iteritems()]
+        addresses.sort()
+
+        now = time()
+
+        # print >> sys.stderr, len(self._connections), "/", len(self._potential_peers)
+
+        for timestamp, address in addresses:
+            if len(self._connections) >= MAX_CONNECTIONS:
+                break
+
+            if address in self._connections:
+                continue
+
+            try:
+                self._potential_peers[address] = now
+                connection = Connection(self, self._raw_server, address)
+
+            except:
+                if DEBUG: print >> sys.stderr, "MiniBitTorrent.add_potential_peers() ERROR"
+                print_exc()
+
+            else:
+                self._lock.acquire()
+                try:
+                    self._connections.append(connection)
+                finally:
+                    self._lock.release()
+
+    def _timeout_connections(self):
+        deadline = time() - MAX_TIME_INACTIVE
+        for connection in self._connections:
+            connection.check_for_timeout(deadline)
+
+        if not self._closed:
+            self._raw_server.add_task(self._timeout_connections, 1)
+
+    def connection_lost(self, connection):
+        try:
+            self._connections.remove(connection)
+        except:
+            # it is possible that a connection timout occurs followed
+            # by another connection close from the socket handler when
+            # the connection can not be established.
+            pass
+        if not self._closed:
+            self._create_connections()
+
+    def close(self):
+        self._closed = True
+        for connection in self._connections:
+            connection.close()
+
+class MiniTracker(Thread):
+    """
+    A MiniTracker instance makes a single connection to a tracker to
+    attempt to obtain peer addresses.
+    """
+    def __init__(self, swarm, tracker):
+        Thread.__init__(self)
+        self._swarm = swarm
+        self._tracker = tracker
+        self.start()
+
+    def run(self):
+        announce = self._tracker + "?" + urlencode({"info_hash":self._swarm.get_info_hash(),
+                                                    "peer_id":self._swarm.get_peer_id(),
+                                                    "port":"12345",
+                                                    "compact":"1",
+                                                    "uploaded":"0",
+                                                    "downloaded":"0",
+                                                    "left":"-1",
+                                                    "event":"started"})
+        handle = urlopen(announce)
+        if handle:
+            body = handle.read()
+            if body:
+                try:
+                    body = bdecode(body)
+
+                except:
+                    pass
+                
+                else:
+                    # using low-bandwidth binary format
+                    peers = []
+                    peer_data = body["peers"]
+                    for x in range(0, len(peer_data), 6):
+                        key = peer_data[x:x+6]
+                        ip = ".".join([str(ord(i)) for i in peer_data[x:x+4]])
+                        port = (ord(peer_data[x+4]) << 8) | ord(peer_data[x+5])
+                        peers.append((ip, port))
+
+                    if DEBUG: print >> sys.stderr, "MiniTracker.run() received", len(peers), "peer addresses from tracker"
+                    self._swarm.add_potential_peers(peers)
+                                                                                                            
+                                                    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/__init__.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/MagnetLink/__init__.py
new file mode 100644 (file)
index 0000000..0038dec
--- /dev/null
@@ -0,0 +1,6 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+# the extention id for the 'ut_metadata' message
+EXTEND_MSG_METADATA     = 'ut_metadata'
+EXTEND_MSG_METADATA_ID  = chr(224)
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/__init__.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/__init__.py
new file mode 100644 (file)
index 0000000..8a8e66a
--- /dev/null
@@ -0,0 +1,2 @@
+# written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/MDHT_Spec.txt b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/MDHT_Spec.txt
new file mode 100644 (file)
index 0000000..b2ce812
--- /dev/null
@@ -0,0 +1,73 @@
+M24 release. BitTorrent and Mainline DHT protocol extensions.\r
+\r
+Mainline DHT is used in the Nextshare content delivery platform for the peer discovery process.\r
+Currently, the performance of the protocol is very poor, with the median lookup time taking up to 1 minute.\r
+We believe this poor performance is due to the bad management of the peer routing tables.\r
+\r
+Therefore, we propose a modification to the current MDHT protocol with a particular\r
+focus on a better routing table management, while maintaining its backward compatibility.\r
+Our MDHT protocol extensions are a follow up of our previous experiments on more then 3 million deployed nodes.\r
+\r
+The extensions in MDHT include (general):\r
+\r
+- A main routing table and a replacement routing table. The main routing table contains best known nodes;\r
+such nodes are considered best nodes when they are reachable from us and when our requests to them\r
+do no time out.\r
+- The nodes in the replacement table are nodes with relatively good attributes; for example, they\r
+demonstrate a significant number of responses to our requests and they have a relatively low number of\r
+timeouts. Such nodes are used (moved to the main routing table) as main nodes, when the current nodes \r
+in the main routing table fail to respond to our queries. \r
+\r
+- A node always starts in quarantine, no matter in which routing table the node is in.\r
+A node is in quarantine for as long as there is no response from it, after having sent us a query for about\r
+3 minutes ago. The quarantine ends when there is a 3 minutes window between a query from the node and \r
+the next response. This quarantine period is designed to detect possible NATed nodes. If a node is in quarantine, \r
+we are not sure whether the node is behind a NAT, but if the node is not in the quarantine - then we are \r
+pretty confident that the node in not behind the NAT. A node that is not in quarantine never comes back to the \r
+quarantine (unless it is completely kicked out from both tables and we loose all rnode information, and therefore starts over).\r
+\r
+- Nodes in the main table are refreshed every 3 minutes (if nodes are in quarantine), and every 10 minutes \r
+(if nodes aren't in quarantine). Nodes in the replacement table are not refreshed (no matter whether they are in\r
+quarantine or not).\r
+\r
+- The nodes that are in one of the routing tables (main or replacement) are called rnodes. They store node-specific \r
+information such as: the number of queries to the node, the number of responses from the node, the number of timeouts and errors.\r
+\r
+- Nodes are added to the main routing table only after they have been checked for reachability; nodes are\r
+not added to the main routing table if they don't respond to our reachability check queries.\r
+\r
+- When a node in the main table gets a timeout, it goes to the replacement table. In fact, this node gets replaced with a better\r
+node from the replacement table. The following happens inside the replacement table in order to select the best node for the main table:\r
+       - All the nodes in the correct bucket of the replacement table are pinged - checked for reachability\r
+       - Pings to the NextShare (NS) nodes are delayed for 200ms (in order to give priority to NS nodes)\r
+       - The node that replies first (the fastest) to our query is chosen as the best node for the main table\r
+\r
+\r
+More details on the routing table management:\r
+\r
+- When a query is received from a node which is not in any of the routing tables, then this node is checked for reachability.\r
+If there is room in the main table, the node will be added only if it responded to our reachability check (ping query). \r
+Otherwise, the worst node in the replacement table will be replaced with this new-coming node. \r
+A node in the replacement table is considered the "worst node" when its accummulated number of timeouts exceeds 3.\r
+\r
+- When a response is received from a node that is already in the main routing table, then it is simply refreshed.\r
+Otherwise, if the response comes from a node that is not an rnode and if there is room in the replacement table, then the \r
+node is simply replaced with the worst node of the replacement table.\r
+\r
+- If there is a timeout from a node that is in the main routing table, then it is simply removed from the main table and \r
+put into the replacement table. In fact, the node that did timeout is put inside the replacement table in place of the worst node.\r
+\r
+- Regarding the worst node selection inside the replacement table, we emphasize that the NS nodes are favored to remain inside\r
+the table. When the replacement table is refreshed - in order to identify the worst node in the bucket - the first pings (reachability checks)\r
+are sent to the NS nodes, and only after a delay, they are sent to the rest of the nodes.\r
+\r
+\r
+Additional Mainline DHT extensions - nodes2 replies:\r
+\r
+- For IPv4 nodes, we use the standard 'compact node info' encoding, specified in the BitTorrent protocol. However,\r
+the protocol specification does not have support for IPv6 nodes. The DHT messages - the 'nodes' replies - don't support IPv6,\r
+because all the node contacts are encoded as 6-bytes but IPv6 nodes need 18-bytes. Therefore, in this protocol extension we \r
+use libtorrent - which implements a few extensions to MDHT - in order to make use of the 'nodes2' for IPv6 contact encoding.\r
+\r
+- According to the libtorrent specification, replies with the 'nodes2' key are contacts that are encoded as 20-bytes node ID and\r
+then a variable length encoded IP address (6 bytes in IPv4 case and 18 bytes in IPv6 case).\r
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/Makefile b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/Makefile
new file mode 100644 (file)
index 0000000..ca4f667
--- /dev/null
@@ -0,0 +1,5 @@
+all:
+       rm .coverage; \
+       python2.5 /usr/bin/nosetests --with-doctest \
+       --with-coverage --cover-package=trunk #\
+#      --pdb --pdb-failures
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/README b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/README
new file mode 100644 (file)
index 0000000..37cd14c
--- /dev/null
@@ -0,0 +1,33 @@
+KadTracker 0.1.0
+
+INSTALLATION
+
+This package uses Python 2.5 standard library. No extra modules need to be
+installed.
+
+A Makefile is provided to run the tests. The tests require the nose test
+framework to work.
+
+API
+
+The API is located in kadtracker.py. This is the only module necessary to use the
+package.
+
+Users must ONLY use the methods provided in kadtracker.py.
+
+Users can use the Id and Node containers as they wish. These classes are located
+in identifier.py and node.py
+
+EXAMPLES
+
+Two examples are provided in the distribution:
+
+- server_dht.py
+
+Do the routing maintainance tasks plus a get_peers lookup to a random identifier
+every 10 minutes.
+
+- interactive_dht.py
+
+Do the routing maintainance task plus get_peers lookups asked by the user during
+the interactive session.
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/__init__.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/bencode.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/bencode.py
new file mode 100644 (file)
index 0000000..9e0eebb
--- /dev/null
@@ -0,0 +1,210 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import cStringIO
+import logging
+
+logger = logging.getLogger('dht')
+
+class LoggingException(Exception):
+
+    def __init__(self, msg):
+        logger.info('%s: %s' % (self.__class__, msg))
+                    
+
+class EncodeError(LoggingException):
+    """Raised by encoder when invalid input."""
+    
+class DecodeError(LoggingException):
+    """Raised by decoder when invalid bencode input."""
+    def __init__(self, msg, bencoded):
+        LoggingException.__init__(self, '\nBencoded: '.join((msg, bencoded)))
+    
+class RecursionDepthError(DecodeError):
+    """Raised when the bencoded recursivity is too deep.
+
+    This check prevents from using too much recursivity when an
+    accidentally/maliciously constructed bencoded string looks like
+    'llllllllllllllllllllllllllllllllllll'.
+    
+    """
+
+
+def encode(data):
+    output = cStringIO.StringIO()
+    encode_f = _get_encode_f(data)
+    encode_f(data, output)
+    result = output.getvalue()
+    output.close()
+    return result
+        
+def decode(bencoded, max_depth=4):
+    if not bencoded:
+        raise DecodeError('Empty bencoded string', bencoded)
+    try:
+        decode_f = _get_decode_f(bencoded, 0)
+        data, next_pos, = decode_f(bencoded, 0, max_depth)
+    except (KeyError, IndexError, ValueError):
+        raise DecodeError('UNEXPECTED>>>>>>>>>>>>', bencoded)
+    else:
+        if next_pos != len(bencoded):
+            raise DecodeError('Extra characters after valid bencode.', bencoded)
+    return data
+
+
+def _encode_str(data, output):
+    """Encode a string object
+
+    The result format is:
+    <string length encoded in base ten ASCII>:<string data>
+
+    """
+    output.write('%d:%s' % (len(data), data))
+
+def _encode_int(data, output):
+    """Encode an integer (or long) object
+
+    The result format is:
+    i<integer encoded in base ten ASCII>e
+    
+    """
+    output.write('i%de' % data)
+
+def _encode_list(data, output):
+    """Encode a list object
+
+    The result format is:
+    l<bencoded element>...<bencoded element>e
+
+    """
+    output.write('l')
+    for item in data:
+        encode_f = _get_encode_f(item)
+        encode_f(item, output)
+    output.write('e')
+
+def _encode_dict(data, output):
+    """Encode a dict object
+
+    The result format is:
+    d<bencoded key><bencoded value>...<bencoded key><bencoded value>e 
+    Keys must be string and will be encoded in lexicographical order
+
+    """
+    output.write('d')
+    keys = data.keys()
+    keys.sort()
+    for key in keys:
+        if type(key) != str: # key must be a string)
+            raise EncodeError, 'Found a non-string key. Data: %r' % data
+        value = data[key]
+        _encode_fs[str](key, output)
+        encode_f = _get_encode_f(value)
+        encode_f(value, output)
+    output.write('e')
+
+    
+    
+
+def _decode_str(bencoded, pos, _):
+    """
+
+    
+    """
+    str_len, str_begin = _get_int(bencoded, pos, ':')
+    str_end = str_begin + str_len
+    return (bencoded[str_begin:str_end], str_end)
+        
+def _decode_int(bencoded, pos, _):
+    """
+
+    
+    """
+    return  _get_int(bencoded, pos + 1, 'e') # +1 to skip 'i'
+
+def _decode_list(bencoded, pos, max_depth):
+    """
+
+    
+    """
+    if max_depth == 0:
+        raise RecursionDepthError('maximum recursion depth exceeded', bencoded)
+    
+    result = []
+    next_pos = pos + 1 # skip 'l'
+    while bencoded[next_pos] != 'e':
+        decode_f = _get_decode_f(bencoded, next_pos)
+        item, next_pos = decode_f(bencoded,
+                                  next_pos, max_depth - 1)
+        result.append(item)
+    return result, next_pos + 1 # correct for 'e'
+
+def _decode_dict(bencoded, pos, max_depth):
+    """
+    
+    """
+    if max_depth == 0:
+        raise RecursionDepthError, 'maximum recursion depth exceeded'
+    
+    result = {}
+    next_pos = pos + 1 # skip 'd'
+    while bencoded[next_pos] != 'e':
+        # Decode key
+        decode_f = _get_decode_f(bencoded, next_pos)
+        if decode_f != _decode_str:
+            raise DecodeError('Keys must be string. Found: <%s>' % (
+                    bencoded[next_pos]),
+                              bencoded)
+        key, next_pos = decode_f(bencoded,
+                                 next_pos, max_depth - 1)
+        # Decode value
+        decode_f = _get_decode_f(bencoded, next_pos)
+        value, next_pos = decode_f(bencoded,
+                                   next_pos, max_depth - 1)
+        result[key] = value
+    return result, next_pos + 1 # skip 'e'
+
+
+
+def _get_encode_f(value):
+    try:
+        return _encode_fs[type(value)]
+    except (KeyError), e:
+        raise EncodeError, 'Invalid type: <%r>' % e
+    
+def _get_int(bencoded, pos, char):
+    try:
+        end = bencoded.index(char, pos)
+    except (ValueError):
+        raise DecodeError('Character %s not found.', bencoded)
+    try:
+        result = int(bencoded[pos:end])
+    except (ValueError), e:
+        raise DecodeError('Not an integer: %r' %e, bencoded)
+    return (result, end + 1) # +1 to skip final character
+
+def _get_decode_f(bencoded, pos):
+    try:
+        return _decode_fs[bencoded[pos]]
+    except (KeyError), e:
+        raise DecodeError('Caracter in position %d raised %r' % (pos, e),
+                          bencoded)
+    
+
+_encode_fs = {str : _encode_str,
+              int :  _encode_int,
+              long : _encode_int,
+              tuple : _encode_list,
+              list :  _encode_list,
+              dict : _encode_dict
+              }
+
+_decode_fs = {'i' : _decode_int,
+             'l' : _decode_list,
+             'd' : _decode_dict}
+for i in xrange(10):
+    _decode_fs[str(i)] = _decode_str
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/controller.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/controller.py
new file mode 100644 (file)
index 0000000..e26f878
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+
+import logging, logging_conf
+
+import identifier
+import message
+import token_manager
+import tracker
+from routing_manager import RoutingManager
+from minitwisted import ThreadedReactor
+from rpc_manager import RPCManager
+from querier import Querier
+from responder import Responder
+from message import QUERY, RESPONSE, ERROR, OutgoingGetPeersQuery
+from lookup_manager import LookupManager
+from node import Node
+
+logger = logging.getLogger('dht')
+
+class Controller:
+    
+    def __init__(self, dht_addr):
+        my_addr = dht_addr
+        my_id = identifier.RandomId()
+        my_node = Node(my_addr, my_id)
+        tracker_ = tracker.Tracker()
+        token_m = token_manager.TokenManager()
+
+        self.reactor = ThreadedReactor()
+        rpc_m = RPCManager(self.reactor, my_addr[1])
+        querier_ = Querier(rpc_m, my_id)
+        routing_m = RoutingManager(my_node, querier_,
+                                   bootstrap_nodes)
+        responder_ = Responder(my_id, routing_m,
+                              tracker_, token_m)
+
+        responder_.set_on_query_received_callback(
+            routing_m.on_query_received)
+        querier_.set_on_response_received_callback(
+            routing_m.on_response_received)
+        querier_.set_on_error_received_callback(
+            routing_m.on_error_received)
+        querier_.set_on_timeout_callback(routing_m.on_timeout)
+        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)
+
+        routing_m.do_bootstrap()
+
+        rpc_m.add_msg_callback(QUERY,
+                               responder_.on_query_received)
+
+        self.lookup_m = LookupManager(my_id, querier_,
+                                      routing_m)
+        self._routing_m = routing_m
+        
+
+    def start(self):
+        self.reactor.start()
+
+    def stop(self):
+        #TODO2: stop each manager
+        self.reactor.stop()
+
+    def get_peers(self, info_hash, callback_f, bt_port=None):
+        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)
+
+    def print_routing_table_stats(self):
+        self._routing_m.print_stats()
+    
+bootstrap_nodes = (
+    
+    Node(('67.215.242.138', 6881)), #router.bittorrent.com
+    Node(('192.16.127.98', 7005)), #KTH node
+    )
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/floodbarrier.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/floodbarrier.py
new file mode 100644 (file)
index 0000000..0ac3167
--- /dev/null
@@ -0,0 +1,95 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+"""
+Floodbarrier is a protection mechanism which protects us from
+host processing too many messages from a single host.
+
+"""
+
+import time
+import collections
+import logging, logging_conf
+
+logger = logging.getLogger('dht')
+
+
+CHECKING_PERIOD = 2 # seconds
+MAX_PACKETS_PER_PERIOD = 20
+BLOCKING_PERIOD = 100 # seconds
+
+class HalfPeriodRegister(object):
+
+    """Helper class. Not meant to be used outside this module"""
+
+    def __init__(self):
+        self.ip_dict = {}
+
+    def get_num_packets(self, ip):
+        return self.ip_dict.get(ip, 0)
+
+    def register_ip(self, ip):
+        self.ip_dict[ip] = self.ip_dict.get(ip, 0) + 1
+
+class FloodBarrier(object):
+
+    """
+    Object which keeps track of packets received from different
+    hosts. Default values are coded but users can choose their own.
+    The main function is ip_blocked().
+
+    """
+
+    def __init__(self, checking_period=CHECKING_PERIOD,
+                 max_packets_per_period=MAX_PACKETS_PER_PERIOD,
+                 blocking_period=BLOCKING_PERIOD):
+        self.checking_period = checking_period
+        self.max_packets_per_period = max_packets_per_period
+        self.blocking_period = blocking_period
+
+        self.last_half_period_time = time.time()
+        self.ip_registers = [HalfPeriodRegister(), HalfPeriodRegister()]
+        self.blocked_ips = {}
+
+    def ip_blocked(self, ip):
+        """
+        Register that a packet has been received from the given IP and return
+        whether the host is blocked and, hence, the packet should be dropped
+        
+        """
+        current_time = time.time()
+        if current_time > self.last_half_period_time + self.checking_period / 2:
+            self.half_period_timeout = current_time
+            self.ip_registers = [self.ip_registers[1], HalfPeriodRegister()]
+            if current_time > self.last_half_period_time + self.checking_period:
+                self.ip_registers = [self.ip_registers[1], HalfPeriodRegister()]
+        self.ip_registers[1].register_ip(ip)
+        num_packets = self.ip_registers[0].get_num_packets(ip) + \
+            self.ip_registers[1].get_num_packets(ip)
+        if num_packets > self.max_packets_per_period:
+            logger.debug('Got %d packets: blocking %r...' % (
+                    num_packets, ip))
+            self.blocked_ips[ip] = current_time + self.blocking_period
+            return True
+        # At this point there are no enough packets to block ip (in current
+        # period). Now, we need to check whether the ip is currently blocked
+        if ip in self.blocked_ips:
+            logger.debug('Ip %r (%d) currently blocked' % (ip,
+                                                            num_packets))
+            if current_time > self.blocked_ips[ip]:
+                logger.debug(
+                    'Block for %r (%d) has expired: unblocking...' %
+                    (ip, num_packets))
+                # Blocking period already expired
+                del self.blocked_ips[ip]
+                return False
+            else:
+                # IP is currently blocked (block hasn't expired)
+                return True
+        else:
+
+            
+            # IP is not blocked
+            return False
+            
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/identifier.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/identifier.py
new file mode 100644 (file)
index 0000000..7842167
--- /dev/null
@@ -0,0 +1,237 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+"""
+This module provides the Id object and necessary tools.
+
+"""
+
+import sys
+import random
+
+import logging
+
+logger = logging.getLogger('dht')
+
+
+BITS_PER_BYTE = 8
+ID_SIZE_BYTES = 20
+ID_SIZE_BITS = ID_SIZE_BYTES * BITS_PER_BYTE
+
+
+def _bin_to_hex(bin_str):
+    """Convert a binary string to a hex string."""
+    hex_list = ['%02x' % ord(c) for c in bin_str]
+    return ''.join(hex_list)
+
+def _hex_to_bin_byte(hex_byte):
+    #TODO2: Absolutely sure there is a library function for this
+    hex_down = '0123456789abcdef'
+    hex_up = '0123456789ABDCEF'
+    value = 0
+    for i in xrange(2):
+        value *= 16
+        try:
+            value += hex_down.index(hex_byte[i])
+        except ValueError:
+            try:
+                value += hex_up.index(hex_byte[i])
+            except ValueError:
+                raise IdError
+    return chr(value)
+    
+def _hex_to_bin(hex_str):
+    return ''.join([_hex_to_bin_byte(hex_byte) for hex_byte in zip(
+                hex_str[::2], hex_str[1::2])])
+
+
+def _byte_xor(byte1, byte2):
+    """Xor two characters as if they were bytes."""
+    return chr(ord(byte1) ^ ord(byte2))
+               
+def _first_different_byte(str1, str2):
+    """Return the position of the first different byte in the strings.
+    Raise IndexError when no difference was found (str1 == str2).
+    """
+    for i in range(len(str1)):
+        if str1[i] != str2[i]:
+            return i
+    raise IndexError
+
+def _first_different_bit(byte1, byte2):
+    """Return the position of the first different bit in the bytes.
+    The bytes must not be equal.
+
+    """
+    assert byte1 != byte2
+    byte = ord(byte1) ^ ord(byte2)
+    i = 0
+    while byte >> (BITS_PER_BYTE - 1) == 0:
+        byte <<= 1
+        i += 1
+    return i
+
+class IdError(Exception):
+    pass
+
+class Id(object):
+
+    """Convert a string to an Id object.
+    
+    The bin_id string's lenght must be ID_SIZE bytes (characters).
+
+    You can use both binary and hexadecimal strings. Example
+    #>>> Id('\x00' * ID_SIZE_BYTES) == Id('0' * ID_SIZE_BYTES * 2)
+    #True
+    #>>> Id('\xff' * ID_SIZE_BYTES) == Id('f' * ID_SIZE_BYTES * 2)
+    #True
+    """
+
+    def __init__(self, hex_or_bin_id):
+        if not isinstance(hex_or_bin_id, str):
+            raise IdError
+        if len(hex_or_bin_id) == ID_SIZE_BYTES:
+            self._bin_id = hex_or_bin_id
+        elif len(hex_or_bin_id) == ID_SIZE_BYTES*2:
+            self._bin_id = _hex_to_bin(hex_or_bin_id)
+        else:
+            raise IdError
+
+    def __hash__(self):
+        return self.bin_id.__hash__()
+
+    @property
+    def bin_id(self):
+        """bin_id is read-only."""
+        return self._bin_id
+
+    def __eq__(self, other):
+        return self.bin_id == other.bin_id
+
+    def __ne__(self, other):
+        return not self == other
+        
+    def __str__(self):
+        return self.bin_id
+
+    def __repr__(self):
+        return '<Id: %s>' % _bin_to_hex(self.bin_id)
+
+    def distance(self, other):
+        """
+        Do XOR distance between two Id objects and return it as Id
+        object.
+
+        """
+        byte_list = [_byte_xor(a, b) for a, b in zip(self.bin_id,
+                                                     other.bin_id)]
+        return Id(''.join(byte_list))
+
+    def log_distance(self, other):
+        """Return log (base 2) of the XOR distance between two Id
+        objects. Return -1 when the XOR distance is 0.
+
+        That is, this function returns 'n' when the distance between
+        the two objects is [2^n, 2^(n+1)).
+        When the two identifiers are equal, the distance is 0. Therefore
+        log_distance is -infinity. In this case, -1 is returned.
+        Example:
+        >>> z = Id(chr(0) * ID_SIZE_BYTES)
+
+        >>> # distance = 0 [-inf, 1) -> log(0) = -infinity
+        >>> z.log_distance(z) 
+        -1
+        >>> # distance = 1 [1, 2) -> log(1) = 0
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(1)))
+        0
+        >>> # distance = 2 [2, 4) -> log(2) = 1
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(2)))
+        1
+        >>> # distance = 3 [2, 4) -> log(3) = 
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(3)))
+        1
+        >>> # distance = 4 [4, 8) -> log(2^2) = 2
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(4)))
+        2
+        >>> # distance = 5 [4, 8) -> log(5) = 2
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(5)))
+        2
+        >>> # distance = 6  [4, 8) -> log(6) = 2
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(6)))
+        2
+        >>> # distance = 7  [4, 8) -> log(7) = 2
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(7)))
+        2
+        >>> # distance = 128 = 2^(8*0+7)  [128, 256) -> log(7^2) = 7
+        >>> z.log_distance(Id(chr(0)*(ID_SIZE_BYTES-1)+chr(128)))
+        7
+        >>> # distance = 2^(8*18+8) = 2^148+8 -> log(1) = 152
+        >>> z.log_distance(Id(chr(1)+chr(0)*(ID_SIZE_BYTES-1)))
+        152
+        >>> # distance = 2^(8*19+1) = 2^159 -> log(1) = 159
+        >>> z.log_distance(Id(chr(128)+chr(0)*(ID_SIZE_BYTES-1)))
+        159
+
+        """
+        try:
+            byte_i = _first_different_byte(self.bin_id, other.bin_id)
+        except IndexError:
+            # _first_different_byte did't find differences, thus the
+            # distance is 0 and log_distance is -1 
+            return -1
+        unmatching_bytes = ID_SIZE_BYTES - byte_i - 1
+        byte1 = self.bin_id[byte_i]
+        byte2 = other.bin_id[byte_i]
+        bit_i = _first_different_bit(byte1, byte2)
+        # unmatching_bits (in byte: from least significant bit)
+        unmatching_bits = BITS_PER_BYTE - bit_i - 1
+        return unmatching_bytes * BITS_PER_BYTE + unmatching_bits
+    
+            
+    def order_closest(self, id_list):
+        """Return a list with the Id objects in 'id_list' ordered
+        according to the distance to self. The closest id first.
+        
+        The original list is not modified.
+
+        """
+        id_list_copy = id_list[:]
+        max_distance = ID_SIZE_BITS + 1
+        log_distance_list = [] 
+        for element in id_list:
+            log_distance_list.append(self.log_distance(element))
+
+        result = []
+        for _ in range(len(id_list)):
+            lowest_index = None
+            lowest_distance = max_distance
+            for j in range(len(id_list_copy)):
+                if log_distance_list[j] < lowest_distance:
+                    lowest_index = j
+                    lowest_distance = log_distance_list[j]
+            result.append(id_list_copy[lowest_index])
+            del log_distance_list[lowest_index]
+            del id_list_copy[lowest_index]
+        return result
+    
+    def generate_close_id(self, log_distance):
+        if log_distance < 0:
+            return self
+        byte_num, bit_num = divmod(log_distance, BITS_PER_BYTE)
+        byte_index = len(self.bin_id) - byte_num - 1 # -1 correction
+        id_byte = self.bin_id[byte_index]
+        id_byte = chr(ord(id_byte) ^ (1 << bit_num))
+        bin_id = self.bin_id[:byte_index] +\
+            id_byte + self.bin_id[byte_index + 1:]
+        return Id(bin_id)
+
+    
+class RandomId(Id):
+
+    """Create a random Id object."""
+    def __init__(self):
+        random_str = ''.join([chr(random.randint(0, 255)) \
+                                      for _ in xrange(ID_SIZE_BYTES)])
+        Id.__init__(self, random_str)
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/interactive_dht.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/interactive_dht.py
new file mode 100644 (file)
index 0000000..684dcb3
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+import sys
+
+import logging, logging_conf
+logs_path = '.'
+logs_level = logging.DEBUG # This generates HUGE (and useful) logs
+#logs_level = logging.INFO # This generates some (useful) logs
+#logs_level = logging.WARNING # This generates warning and error logs
+
+import identifier
+import kadtracker
+
+def peers_found(peers):
+    print 'Peers found:'
+    for peer in peers:
+        print peer
+    print 'Type an info_hash (in hex digits): ',
+
+def lookup_done():
+    print 'Lookup DONE'
+    print 'Type an info_hash (in hex digits): ',
+
+if len(sys.argv) == 4 and sys.argv[0] == 'interactive_dht.py':
+    logging_conf.setup(logs_path, logs_level)
+    RUN_DHT = True
+    my_addr = (sys.argv[1], int(sys.argv[2])) #('192.16.125.242', 7000)
+    logs_path = sys.argv[3]
+    dht = kadtracker.KadTracker(my_addr, logs_path)
+else:
+    RUN_DHT = False
+    print 'usage: python interactive_dht.py dht_ip dht_port log_path'
+    
+print 'Type "exit" to stop the DHT and exit'
+while (RUN_DHT):
+    print 'Type an info_hash (in hex digits): ',
+    input = sys.stdin.readline()[:-1]
+    if input == 'exit':
+        dht.stop()
+        break
+    try:
+        info_hash = identifier.Id(input)
+    except (identifier.IdError):
+        print 'Invalid input (%s)' % input
+        continue
+    print 'Getting peers for info_hash %r' % info_hash
+    dht.get_peers(info_hash, peers_found)
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/kadtracker.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/kadtracker.py
new file mode 100644 (file)
index 0000000..4af2916
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+"""
+This module is the API for the whole package.
+
+You can use the KadTracker class and its methods to interact with
+the DHT.
+
+Find usage examples in server_dht.py and interactive_dht.py.
+
+If you want to generate logs. You will have to setup logging_conf
+BEFORE importing this module. See the examples above for details.
+
+"""
+
+import controller
+import logging, logging_conf
+
+class KadTracker:
+    """KadTracker is the interface for the whole package.
+
+    Setting up the DHT is as simple as creating this object.
+    The parameters are:
+    - dht_addr: a tuple containing IP address and port number.
+    - logs_path: a string containing the path to the log files.
+
+    """
+    def __init__(self, dht_addr, logs_path):
+        logging_conf.setup(logs_path, logging.DEBUG)
+        self.controller = controller.Controller(dht_addr)
+        self.controller.start()
+
+    def stop(self):
+        """Stop the DHT."""
+        self.controller.stop()
+    
+    def get_peers(self, info_hash, callback_f, bt_port=None):
+        """ Start a get peers lookup. Return a Lookup object.
+        
+        The info_hash must be an identifier.Id object.
+        
+        The callback_f must expect one parameter. When peers are
+        discovered, the callback is called with a list of peers as parameter.
+        The list of peers is a list of addresses (<IPv4, port> pairs).
+
+        The bt_port parameter is optional. When provided, ANNOUNCE messages
+        will be send using the provided port number.
+
+        """
+        return self.controller.get_peers(info_hash, callback_f, bt_port)
+
+    def print_routing_table_stats(self):
+        self.controller.print_routing_table_stats()
+
+
+    #TODO2: Future Work
+    #TODO2: def add_bootstrap_node(self, node_addr, node_id=None):
+    #TODO2: def lookup.back_off()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/ktracker_example.py.no b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/ktracker_example.py.no
new file mode 100644 (file)
index 0000000..8b3fc73
--- /dev/null
@@ -0,0 +1,47 @@
+'''
+This example shows how ktracker can be used as a library
+'''
+
+import time
+
+import ktracker_query
+import ktracker
+
+
+my_dht_port = 1111
+
+bootstrap_node_addr = ('127.0.0.1', 2222)
+bootstrap_node_id = '0' * 20
+
+info_hash = 'z' * 20
+my_bt_addr = ('127.0.0.1', my_dht_port)
+
+
+def on_query_event(query):
+    print '-' * 40
+    print 'Query status:', query.status
+    print query.peers
+    print '-' * 40
+
+
+
+
+ktracker = ktracker.KTracker(my_dht_port)
+ktracker.start()
+
+# join an existing DHT via bootstraping node (node_id is optional)
+ktracker.add_bootstrap_node(bootstrap_node_addr)
+
+# create an announce_and_get_peers query
+query = ktracker_query.AnnounceAndGetPeersQuery(info_hash, my_bt_addr)
+ktracker.do_query(query, on_query_event)
+
+# let's give some time for the first query to finish
+time.sleep(60)
+
+# create a get_peers query
+query = ktracker_query.GetPeersQuery(info_hash)
+ktracker.do_query(query, on_query_event)
+
+# we are done
+ktracker.stop()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/ktracker_query.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/ktracker_query.py
new file mode 100644 (file)
index 0000000..2179738
--- /dev/null
@@ -0,0 +1,28 @@
+
+class GetPeersLookup(object):
+
+    def __init__(self, info_hash):
+        self._info_hash = info_hash
+
+    @property
+    def info_hash(self):
+        return self._info_hash
+
+    def get_status(self):
+        #lock
+        return self._status
+    def set_status(self, query_status):
+        #lock
+        self._status = query_status
+    status = property(get_status, set_status)
+
+
+    
+    def add_peers(self, peer_list):
+        '''
+        Library users should not use this method.
+        '''
+        #lock
+        self._peers.append(peer_list)
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/logging_conf.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/logging_conf.py
new file mode 100644 (file)
index 0000000..2f09c13
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import sys
+import os
+import logging
+
+FORMAT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s - %(funcName)s()\n\
+%(message)s\n'
+
+devnullstream = open(os.devnull,"w")
+
+logging.basicConfig(level=logging.CRITICAL,
+                   format='%(asctime)s %(levelname)-8s %(message)s',
+                   datefmt='%a, %d %b %Y %H:%M:%S',
+                   stream=devnullstream)
+
+def testing_setup(module_name):
+    logger = logging.getLogger('dht')
+    # Arno, 2010-06-11: Alt way of disabling logging from DHT instead of global
+    logger.setLevel(logging.CRITICAL+100)
+    filename = ''.join((str(module_name), '.log'))
+    logger_file = os.path.join('test_logs', filename)
+    
+    logger_conf = logging.FileHandler(logger_file, 'w')
+    logger_conf.setLevel(logging.DEBUG)
+    logger_conf.setFormatter(logging.Formatter(FORMAT))
+    logger.addHandler(logger_conf)
+
+def setup(logs_path, logs_level):
+    logger = logging.getLogger('dht')
+    logger.setLevel(logs_level)
+
+    logger_conf = logging.FileHandler(
+        os.path.join(logs_path, 'dht.log'), 'w')
+    #print "Logging to", os.path.join(logs_path, 'dht.log')
+    logger_conf.setLevel(logs_level)
+    logger_conf.setFormatter(logging.Formatter(FORMAT))
+    logger.addHandler(logger_conf)
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/lookup_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/lookup_manager.py
new file mode 100644 (file)
index 0000000..7d29f95
--- /dev/null
@@ -0,0 +1,260 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import sys
+import threading
+
+import logging
+
+import identifier as identifier
+import message as message
+
+
+logger = logging.getLogger('dht')
+
+
+MAX_PARALLEL_QUERIES = 16
+
+ANNOUNCE_REDUNDANCY = 3
+
+class _QueuedNode(object):
+
+    def __init__(self, node_, log_distance):
+        self.node = node_
+        self.log_distance = log_distance
+        self.queried = False
+
+class _LookupQueue(object):
+
+    def __init__(self, target_id, queue_size):
+        self.target_id = target_id
+        self.queue_size = queue_size
+        self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS+1)]
+        # Queued_ips is used to prevent that many Ids are
+        # claimed from a single IP address.
+        self.queued_ips = {}
+
+    def add(self, nodes):
+        for node_ in nodes:
+            if node_.ip in self.queued_ips:
+                continue # Already queued
+            self.queued_ips[node_.ip] = None
+
+            log_distance = self.target_id.log_distance(node_.id)
+            for i, qnode in enumerate(self.queue):
+                if log_distance < qnode.log_distance:
+                    break
+            self.queue = self.queue[:i] \
+                + [_QueuedNode(node_, log_distance)] \
+                + self.queue[i:self.queue_size-1]
+
+    def pop_closest_node(self):
+        """ Raise IndexError when empty queue"""
+        for qnode in self.queue:
+            if qnode.node and not qnode.queried:
+                qnode.queried = True
+                return qnode.node
+        raise IndexError('No more nodes in the queue.')
+
+   
+class GetPeersLookup(object):
+    """DO NOT use underscored variables, they are thread-unsafe.
+    Variables without leading underscore are thread-safe.
+
+    All nodes in bootstrap_nodes MUST have ID.
+    """
+
+    def __init__(self, my_id, querier_, max_parallel_queries,
+                 info_hash, callback_f, bootstrap_nodes,
+                 bt_port=None):
+        logger.debug('New lookup (info_hash: %r)' % info_hash)
+        self._my_id = my_id
+        self._querier = querier_
+        self._max_parallel_queries = max_parallel_queries
+        self._get_peers_msg = message.OutgoingGetPeersQuery(
+            my_id, info_hash)
+        self._callback_f = callback_f
+        self._lookup_queue = _LookupQueue(info_hash,
+                                          max_parallel_queries * 2)
+        self._lookup_queue.add(bootstrap_nodes)
+        self._num_parallel_queries = 0
+
+        self._info_hash = info_hash
+        self._bt_port = bt_port
+        self._lock = threading.RLock()
+
+        self._announce_candidates = []
+        self._num_responses_with_peers = 0
+        self._is_done = False
+
+    @property
+    def is_done(self):
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            is_done = self._is_done
+        finally:
+            self._lock.release()
+        return is_done
+
+    @property
+    def num_parallel_queries(self):
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            num_parallel_queries = self._num_parallel_queries
+        finally:
+            self._lock.release()
+        return num_parallel_queries
+
+    def start(self):
+        self._send_queries()
+
+        
+    def _on_response(self, response_msg, node_):
+        logger.debug('response from %r\n%r' % (node_,
+                                                response_msg))
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            self._num_parallel_queries -= 1
+            try:
+                peers = response_msg.peers
+                logger.debug('PEERS\n%r' % peers)
+                self._num_responses_with_peers += 1
+                #TODO2: Halve queue size as well?
+                # We've got some peers, let's back off a little
+                self._max_parallel_queries = max(
+                    self._max_parallel_queries / 2, 1)
+                self._callback_f(peers)
+            except (AttributeError):
+                pass
+            nodes = []
+            try:
+                nodes.extend(response_msg.nodes)
+            except (AttributeError):
+                pass
+            try:
+                nodes.extend(response_msg.nodes2)
+            except (AttributeError):
+                pass
+            logger.info('NODES: %r' % (nodes))
+            self._add_to_announce_candidates(node_,
+                                             response_msg.token)
+            self._lookup_queue.add(nodes)
+            self._send_queries()
+        finally:
+            self._lock.release()
+
+    def _on_timeout(self, node_):
+        logger.debug('TIMEOUT node: %r' % node_)
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            self._num_parallel_queries -= 1
+            self._send_queries()
+        finally:
+            self._lock.release()
+
+    def _on_error(self, error_msg, node_): 
+        logger.debug('ERROR node: %r' % node_)
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            self._num_parallel_queries -= 1
+            self._send_queries()
+        finally:
+            self._lock.release()
+
+    def _send_queries(self):
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            while self._num_parallel_queries < self._max_parallel_queries:
+                try:
+                    node_ = self._lookup_queue.pop_closest_node()
+                    logger.debug('popped node %r' % node_)
+                except(IndexError):
+                    logger.debug('no more candidate nodes!')
+                    if not self._num_parallel_queries:
+                        logger.debug('Lookup DONE')
+                        self._announce()
+                    return
+                if node_.id == self._my_id:
+                    # Don't send to myself
+                    continue
+                self._num_parallel_queries += 1
+                logger.debug('sending to: %r, parallelism: %d/%d' %
+                    (node_,
+                     self._num_parallel_queries,
+                     self._max_parallel_queries))
+                self._querier.send_query(self._get_peers_msg, node_,
+                                         self._on_response,
+                                        self._on_timeout,
+                                        self._on_error)
+        finally:
+            self._lock.release()
+
+    def _add_to_announce_candidates(self, node_, token):
+        node_log_distance = node_.id.log_distance(self._info_hash)
+        self._announce_candidates.append((node_log_distance,
+                                          node_,
+                                          token))
+        for i in xrange(len(self._announce_candidates)-1, 0, -1):
+            if self._announce_candidates[i][1] \
+                    < self._announce_candidates[i-1][1]:
+                tmp1, tmp2 =  self._announce_candidates[i-1:i+1] 
+                self._announce_candidates[i-1:i+1] = tmp2, tmp1
+            else:
+                break
+        self._announce_candidates = \
+            self._announce_candidates[:ANNOUNCE_REDUNDANCY]
+
+    def _do_nothing(self, *args, **kwargs):
+        #TODO2: generate logs
+        pass
+
+    def _announce(self):
+        self._is_done = True
+        if not self._bt_port:
+            return
+        for (_, node_, token) in self._announce_candidates:
+            logger.debug('announcing to %r' % node_)
+            msg = message.OutgoingAnnouncePeerQuery(
+                self._my_id, self._info_hash, self._bt_port, token)
+            self._querier.send_query(msg, node_,
+                                     self._do_nothing,
+                                     self._do_nothing,
+                                     self._do_nothing)
+
+
+    def _get_announce_candidates(self):
+        return [e[1] for e in self._announce_candidates]
+    
+        
+class LookupManager(object):
+
+    def __init__(self, my_id, querier_, routing_m,
+                 max_parallel_queries=MAX_PARALLEL_QUERIES):
+        self.my_id = my_id
+        self.querier = querier_
+        self.routing_m = routing_m
+        self.max_parallel_queries = max_parallel_queries
+
+
+    def get_peers(self, info_hash, callback_f, bt_port=None):
+        lookup_q = GetPeersLookup(
+            self.my_id, self.querier,
+            self.max_parallel_queries, info_hash, callback_f,
+            self.routing_m.get_closest_rnodes(info_hash),
+            bt_port)
+        lookup_q.start()
+        return lookup_q
+
+    def stop(self):
+        self.querier.stop()
+
+
+#TODO2: During the lookup, routing_m gets nodes_found and sends find_node
+        # to them (in addition to the get_peers sent by lookup_m)
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/message.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/message.py
new file mode 100644 (file)
index 0000000..85ee830
--- /dev/null
@@ -0,0 +1,521 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+"""
+This module provides message classes.
+
+Outgoing messages are built from a few parameters. They are immutable and can be
+reused (TID is not part of the message).
+
+Incoming messages are built from bencoded data. They are immutable and must be
+sanitized before attempting to use message's attributes.
+
+"""
+
+import sys
+
+import logging
+
+import bencode
+from identifier import Id, ID_SIZE_BYTES, IdError
+from node import Node
+
+
+logger = logging.getLogger('dht')
+
+
+NEXTSHARE = 'NS\0\0\0'
+
+# High level keys
+TYPE = 'y'     # Message's type
+ARGS = 'a'     # Query's arguments in a dictionary
+RESPONSE = 'r' # Reply dictionary
+ERROR = 'e'    # Error message string
+TID = 't'      # Transaction ID
+QUERY = 'q'    # Query command (only for queries)
+VERSION = 'v'  # Client's version
+
+# Valid values for key TYPE
+QUERY = 'q'    # Query
+RESPONSE = 'r' # Response
+ERROR = 'e'    # Error
+
+# Valid values for key QUERY
+PING = 'ping'
+FIND_NODE = 'find_node'
+GET_PEERS = 'get_peers'
+ANNOUNCE_PEER = 'announce_peer'
+
+# Valid keys for ARGS
+ID = 'id'         # Node's nodeID (all queries)
+TARGET = 'target' # Target's nodeID (find_node)
+INFO_HASH = 'info_hash' # Torrent's info_hash (get_peers and announce)
+PORT = 'port'     # BitTorrent port (announce)
+TOKEN = 'token'   # Token (announce)
+
+# Valid keys for RESPONSE
+ID = 'id'         # Node's nodeID (all replies)
+NODES = 'nodes'   # String of nodes in compact format (find_nodes and get_peers)
+NODES2 = 'nodes2' # Same as previous (with IPv6 support)
+TOKEN = 'token'   # Token (get_peers)
+VALUES = 'values' # List of peers in compact format (get_peers)
+
+# Valid values for ERROR
+GENERIC_E = [201, 'Generic Error']
+SERVER_E = [202, 'Server Error']
+PROTOCOL_E = [203, 'Protocol Error']
+UNKNOWN_E = [201, 'Method Unknown']
+
+BLANK = 'BLANK'
+BENCODED_BLANK = bencode.encode(BLANK)
+
+# Valid values for TID and VERSION
+#  binary string
+
+
+
+IP4_SIZE = 4 #bytes
+IP6_SIZE = 16 #bytes
+ADDR4_SIZE = IP4_SIZE + 2 # IPv4 address plus port
+ADDR6_SIZE = IP6_SIZE + 2 # IPv6 address plus port
+C_NODE_SIZE = ID_SIZE_BYTES + ADDR4_SIZE
+C_NODE2_SIZE = ID_SIZE_BYTES + ADDR6_SIZE
+
+IP6_PADDING = '\0' * 10 + '\xff\xff'
+
+
+class AddrError(Exception):
+    pass
+
+#class IP6Addr(AddrError):
+#    pass
+# TODO2: deal with IPv6 address (we ignore them now)
+
+def bin_to_int(bin_str):
+    return ord(bin_str[0]) * 256 + ord(bin_str[1])
+
+def int_to_bin(i):
+    return chr(i/256) + chr(i%256)
+
+def bin_to_ip(bin_str):
+    if len(bin_str) == IP4_SIZE:
+        return '.'.join([str(ord(b)) for b in bin_str])
+    if len(bin_str) != IP6_SIZE:
+        raise MsgError, 'compact_ip: invalid size (%d)' % len(bin_str)
+    if not bin_str.startswith(IP6_PADDING):
+        raise AddrError, 'IPv4 and v6 should not be mixed!'
+    c_ip = bin_str[len(IP6_PADDING):]
+    return '.'.join([`ord(byte)` for byte in c_ip])
+
+def ip_to_bin(ip_str):
+    return ''.join([chr(int(b)) for b in ip_str.split('.')])
+
+def compact_addr(addr):
+    return ''.join((ip_to_bin(addr[0]), int_to_bin(addr[1])))
+
+def uncompact_addr(c_addr):
+    if c_addr[-2:] == '\0\0':
+        logger.warning('c_addr: %r > port is ZERO' % c_addr)
+        raise AddrError
+    return (bin_to_ip(c_addr[:-2]), bin_to_int(c_addr[-2:]))
+
+def _compact_peers(peers):
+    return [compact_addr(peer) for peer in peers]
+
+def _uncompact_peers(c_peers):
+    peers = []
+    for c_peer in c_peers:
+        try:
+            peers.append(uncompact_addr(c_peer))
+        except (AddrError):
+            pass
+    return peers
+
+def _compact_nodes(nodes):
+    return ''.join([node.id.bin_id + compact_addr(node.addr) \
+                    for node in nodes])
+    
+def _uncompact_nodes(c_nodes):
+    if len(c_nodes) % C_NODE_SIZE != 0: 
+        raise MsgError, 'invalid size (%d) %s' % (len(c_nodes),
+                                                  c_nodes)
+    nodes = []
+    for begin in xrange(0, len(c_nodes), C_NODE_SIZE):
+        node_id = Id(c_nodes[begin:begin + ID_SIZE_BYTES])
+        try:
+            node_addr = uncompact_addr(
+                c_nodes[begin+ID_SIZE_BYTES:begin+C_NODE_SIZE])
+        except AddrError:
+            pass
+        else:
+            node = Node(node_addr, node_id)
+            nodes.append(node)
+    return nodes
+
+def _compact_nodes2(nodes):
+    return [node.id.bin_id + IP6_PADDING + compact_addr(node.addr) \
+            for node in nodes]
+    
+def _uncompact_nodes2(c_nodes):
+    nodes = []
+    for c_node in c_nodes:
+        node_id = Id(c_node[:ID_SIZE_BYTES])
+        try:
+            node_addr = uncompact_addr(c_node[ID_SIZE_BYTES:]) 
+        except (AddrError):
+            logger.warning('IPv6 addr in nodes2: %s' % c_node)
+        else:
+            node = Node(node_addr, node_id)
+            nodes.append(node)
+    return nodes
+        
+
+def matching_tid(query_tid, response_tid):
+    return query_tid[0] == response_tid[0]
+
+
+
+MSG_DICTS = {}
+
+MSG_DICTS['og_ping_q'] = {VERSION: NEXTSHARE,
+                            TID: BLANK,
+                            TYPE: QUERY,
+                            QUERY: PING,
+                            ARGS: {ID: BLANK}
+                            }
+MSG_DICTS['og_find_node_q'] = {VERSION: NEXTSHARE,
+                                 TID: BLANK,
+                                 TYPE: QUERY,
+                                 QUERY: FIND_NODE,
+                                 ARGS: {ID: BLANK, TARGET: BLANK}
+                                 }
+MSG_DICTS['og_get_peers_q'] = {VERSION: NEXTSHARE,
+                                  TID: BLANK,
+                                  TYPE: QUERY,
+                                  QUERY: GET_PEERS,
+                                  ARGS: {ID: BLANK, INFO_HASH: BLANK}
+                                  }
+MSG_DICTS['og_announce_peer_q'] = {VERSION: NEXTSHARE,
+                                   TID: BLANK,
+                                   TYPE: QUERY,
+                                   QUERY: ANNOUNCE_PEER,
+                                   ARGS: {ID: BLANK, INFO_HASH: BLANK,
+                                          PORT: BLANK, TOKEN: BLANK}
+                                   }
+
+MSG_DICTS['og_ping_r'] = {VERSION: NEXTSHARE,
+                               TID: BLANK,
+                               TYPE: RESPONSE,
+                               RESPONSE: {ID: BLANK}
+                               }
+MSG_DICTS['og_find_node_r'] = {VERSION: NEXTSHARE,
+                               TID: BLANK,
+                               TYPE: RESPONSE,
+                               RESPONSE: {ID: BLANK, NODES2: BLANK}
+                                   }
+MSG_DICTS['og_get_peers_r_nodes'] = {VERSION: NEXTSHARE,
+                                      TID: BLANK,
+                                     TYPE: RESPONSE,
+                                     RESPONSE: {ID: BLANK, NODES2: BLANK,
+                                                TOKEN: BLANK}
+                                    }
+MSG_DICTS['og_get_peers_r_values'] = {VERSION: NEXTSHARE,
+                                      TID: BLANK,
+                                     TYPE: RESPONSE,
+                                     RESPONSE: {ID: BLANK, VALUES: BLANK,
+                                                TOKEN: BLANK}
+                                    }
+MSG_DICTS['og_announce_peer_r'] = {VERSION: NEXTSHARE,
+                                  TID: BLANK,
+                                  TYPE: RESPONSE,
+                                  RESPONSE: {ID: BLANK}
+                                  }
+MSG_DICTS['og_error'] = {VERSION: NEXTSHARE,
+                         TID: BLANK,
+                         TYPE: ERROR,
+                         ERROR: BLANK
+                         }
+BENCODED_MSG_TEMPLATES = {}
+for msg_type, msg_dict in MSG_DICTS.iteritems():
+    bencoded_msg = bencode.encode(msg_dict)
+    BENCODED_MSG_TEMPLATES[msg_type] = bencoded_msg.split(BENCODED_BLANK)
+
+
+class MsgError(Exception):
+    """Raised anytime something goes wrong (specially when decoding/sanitizing).
+
+    """
+
+
+class OutgoingMsgBase(object):
+    """Base class for outgoing messages. You shouldn't have instances of it.
+
+    """
+    
+    def __str__(self):
+        return str(self._bencoded_msg) + str(self._values)
+
+    def __repr__(self):
+        return str(self.__class__) + str(self)
+
+    def encode(self, tid):
+        self._values[-1] = tid
+        num_blank_slots  = len(self._bencoded_msg) -1
+        # Reserve space for prebencoded chunks and blank slots.
+        splitted_msg = [None] * (len(self._bencoded_msg) + num_blank_slots)
+        # Let's fill in every blank slot.
+        for i in range(num_blank_slots):
+            splitted_msg[2*i] = self._bencoded_msg[i] # prebencoded chunk
+            splitted_msg[2*i+1] = bencode.encode(self._values[i]) # value
+        splitted_msg[-1] = self._bencoded_msg[-1] # last prebencoded chunk
+        return ''.join(splitted_msg) # put all bencode in a single string
+      
+
+class OutgoingPingQuery(OutgoingMsgBase):
+    
+    def __init__(self, sender_id):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_ping_q']
+        self._values = [sender_id.bin_id,
+                        ''] #TID
+        self.query = PING
+
+        
+class OutgoingFindNodeQuery(OutgoingMsgBase):
+
+    def __init__(self, sender_id, target_id):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_find_node_q']
+        self._values = [sender_id.bin_id,
+                        target_id.bin_id,
+                        ''] #TID
+        self.query = FIND_NODE
+
+
+class OutgoingGetPeersQuery(OutgoingMsgBase):
+
+    def __init__(self, sender_id, info_hash):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_get_peers_q']
+        self._values = [sender_id.bin_id,
+                        info_hash.bin_id,
+                        ''] #TID
+        self.query = GET_PEERS
+
+
+class OutgoingAnnouncePeerQuery(OutgoingMsgBase):
+    
+    def __init__(self, sender_id, info_hash, port, token):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_announce_peer_q']
+        self._values = [sender_id.bin_id,
+                        info_hash.bin_id,
+                        port,
+                        token,
+                        ''] #TID
+        self.query = ANNOUNCE_PEER
+
+
+class OutgoingPingResponse(OutgoingMsgBase):
+
+    def __init__(self, sender_id):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_ping_r']
+        self._values = [sender_id.bin_id,
+                        ''] #TID
+
+
+class OutgoingFindNodeResponse(OutgoingMsgBase):
+
+    def __init__(self, sender_id, nodes2=None, nodes=None):
+        if nodes is not None:
+            raise MsgError, 'not implemented'
+        if nodes2 is not None:
+            self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_find_node_r']
+            self._values = [sender_id.bin_id,
+                            _compact_nodes2(nodes2),
+                            ''] #TID
+        else:
+            raise MsgError, 'must have nodes OR nodes2'
+                          
+class OutgoingGetPeersResponse(OutgoingMsgBase):
+
+    def __init__(self, sender_id, token,
+                 nodes2=None, peers=None):
+        if peers:
+            self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_get_peers_r_values']
+            self._values = [sender_id.bin_id,
+                            token,
+                            _compact_peers(peers),
+                            ''] #TID
+            
+        elif nodes2:
+            self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_get_peers_r_nodes']
+            self._values = [sender_id.bin_id,
+                            _compact_nodes2(nodes2),
+                            token,
+                            ''] #TID
+        else:
+            raise MsgError, 'must have nodes OR peers'
+
+class OutgoingAnnouncePeerResponse(OutgoingMsgBase):
+    
+    def __init__(self, sender_id):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_announce_peer_r']
+        self._values = [sender_id.bin_id,
+                        ''] #TID
+
+class OutgoingErrorMsg(OutgoingMsgBase):
+
+    def __init__(self, error):
+        self._bencoded_msg = BENCODED_MSG_TEMPLATES['og_error']
+        self._values = [error,
+                        ''] #TID
+        return
+
+    
+class IncomingMsg(object):
+
+    def __init__(self, bencoded_msg):
+        try:
+            self._msg_dict = bencode.decode(bencoded_msg)
+        except (bencode.DecodeError):
+            logger.exception('invalid bencode')
+            raise MsgError, 'invalid bencode'
+        # Make sure the decoded data is a dict and has a TID key
+        try:
+            self.tid = self._msg_dict[TID]
+        except (TypeError):
+            raise MsgError, 'decoded data is not a dictionary'
+        except (KeyError): 
+            raise MsgError, 'key TID not found'
+        # Sanitize TID
+        if not (isinstance(self.tid, str) and self.tid):
+            raise MsgError, 'TID must be a non-empty binary string'
+
+        # Sanitize TYPE
+        try:
+            self.type = self._msg_dict[TYPE]
+        except (KeyError):
+            raise MsgError, 'key TYPE not found'
+
+        if not self.type in (QUERY, RESPONSE, ERROR):
+            raise MsgError, 'Unknown TYPE value'
+        if self.type == QUERY:
+            self._sanitize_query()
+        elif self.type == ERROR:
+            self._sanitize_error()
+        return
+
+    def __repr__(self):
+        return repr(self._msg_dict)
+
+
+    def _get_value(self, k, kk=None, optional=False):
+        try:
+            v = self._msg_dict[k]
+            if kk:
+                v = v[kk]
+        except (KeyError):
+            if optional:
+                return None
+            else:
+                raise MsgError, 'Non-optional key (%s:%s) not found' % (k, kk)
+        except (TypeError):
+            raise MsgError, 'Probably k (%r) is not a dictionary' % (k)
+        return v
+    
+    def _get_str(self, k, kk=None, optional=False):
+        v = self._get_value(k, kk, optional)
+        if v is None:
+            return None
+        if not isinstance(v, str):
+            raise MsgError, 'Value (%s:%s,%s) must be a string' % (k, kk, v)
+        return v
+
+    def _get_id(self, k, kk=None):
+        try:
+            v = self._get_value(k, kk)
+            v = Id(v)
+        except (IdError):
+            raise MsgError, 'Value (%s:%s,%s) must be a valid Id' % (k, kk, v)
+        return v
+
+    def _get_int(self, k, kk=None):
+        v = self._get_value(k, kk)
+        try:
+            v= int(v)
+        except (TypeError, ValueError):
+            raise MsgError, 'Value (%s:%s,%s) must be an int' % (k, kk, v)
+        return v
+    
+    def _sanitize_common(self):
+        # version (optional)
+        self.version = self._get_str(VERSION, optional=True)
+        self.ns_node = self.version \
+            and self.version.startswith(NEXTSHARE[:2])
+    
+    def _sanitize_query(self):
+        self._sanitize_common()
+        # sender_id
+        self.sender_id = self._get_id(ARGS, ID)
+        # query
+        self.query = self._get_str(QUERY)
+        if self.query in [GET_PEERS, ANNOUNCE_PEER]:
+            # info_hash
+            self.info_hash = self._get_id(ARGS, INFO_HASH)
+            if self.query == ANNOUNCE_PEER:
+                self.port = self._get_int(ARGS, PORT)
+                self.token = self._get_str(ARGS, TOKEN)
+        elif self.query == FIND_NODE:
+            # target
+            self.target = self._get_id(ARGS, TARGET)
+        return
+        
+    def sanitize_response(self, query):
+        self._sanitize_common()
+        # sender_id
+        self.sender_id = self._get_id(RESPONSE, ID)
+        if query in [FIND_NODE, GET_PEERS]:
+            # nodes
+            nodes_found = False
+            c_nodes = self._get_str(RESPONSE, NODES, optional=True)
+            if c_nodes:
+                self.nodes = _uncompact_nodes(c_nodes)
+                nodes_found = True
+            # nodes2
+            try:
+                self.nodes2 = _uncompact_nodes2(
+                    self._msg_dict[RESPONSE][NODES2])
+                if nodes_found:
+                    logger.info('Both nodes and nodes2 found')
+                nodes_found = True
+            except (KeyError):
+                pass
+        if query == FIND_NODE:
+            if not nodes_found:
+                logger.warning('No nodes in find_node response')
+                raise MsgError, 'No nodes in find_node response'
+        elif query == GET_PEERS:
+            # peers
+            try:
+                self.peers = _uncompact_peers(
+                    self._msg_dict[RESPONSE][VALUES])
+                if nodes_found:
+                    logger.debug(
+                        'Nodes and peers found in get_peers response')
+            except (KeyError):
+                if not nodes_found:
+                    logger.warning(
+                        'No nodes or peers found in get_peers response')
+                    raise (MsgError,
+                           'No nodes or peers found in get_peers response')
+            # token
+            self.token = self._get_str(RESPONSE, TOKEN)
+            
+    def _sanitize_error(self):
+        self._sanitize_common()
+        try:
+            self.error = [int(self._msg_dict[ERROR][0]),
+                          str(self._msg_dict[ERROR][1])]
+        except (KeyError, IndexError, ValueError, TypeError):
+            raise MsgError, 'Invalid error message'
+        if self.error not in [GENERIC_E, SERVER_E, PROTOCOL_E, UNKNOWN_E]:
+            logger.info('Unknown error: %s', self.error)
+            
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/minitwisted.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/minitwisted.py
new file mode 100644 (file)
index 0000000..1ad5819
--- /dev/null
@@ -0,0 +1,312 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+'''
+Minitwisted is inspired by the Twisted framework. Although, it is much
+simpler.
+- It can only handle one UDP connection per reactor.
+- Reactor runs in a thread
+- You can use call_later and call_now to run your code in thread-safe mode
+
+'''
+
+#from __future__ import with_statement
+
+import sys
+import socket
+import threading
+import time
+
+import logging
+
+from floodbarrier import FloodBarrier
+
+logger = logging.getLogger('dht')
+
+
+BUFFER_SIZE = 1024
+
+
+class Task(object):
+    
+    '''Simple container for a task '''
+
+    def __init__(self, delay, callback_fs, *args, **kwds):
+        '''
+        Create a task instance. Here is when the call time is calculated.
+
+        '''
+        self.delay = delay
+        if callable(callback_fs):
+            # single callback
+            self.callback_fs = [callback_fs]
+        else:
+            self.callback_fs = callback_fs
+        self.args = args
+        self.kwds = kwds
+        self.call_time = time.time() + self.delay
+        self._cancelled = False
+
+    @property
+    def cancelled(self):
+        return self._cancelled
+    
+    def fire_callbacks(self):
+        """Fire a callback (if it hasn't been cancelled)."""
+        if not self._cancelled:
+            for callback_f in self.callback_fs:
+                callback_f(*self.args, **self.kwds)
+        '''
+        Tasks usually have arguments which reference to the objects which
+        created the task. That is, they create a memory cycle. In order
+        to break the memoery cycle, those arguments are deleted.
+        '''
+        del self.callback_fs
+        del self.args
+        del self.kwds
+
+    def cancel(self):
+        """Cancel a task (callback won't be called when fired)"""
+        self._cancelled = True
+        
+
+class TaskManager(object):
+
+    """Manage tasks"""
+
+    def __init__(self):
+        self.tasks = {}
+        self.next_task = None
+
+    def add(self, task):
+        """Add task to the TaskManager"""
+        
+        ms_delay = int(task.delay * 1000)
+        # we need integers for the dictionary (floats are not hashable)
+        self.tasks.setdefault(ms_delay, []).append(task)
+        if self.next_task is None or task.call_time < self.next_task.call_time:
+            self.next_task = task
+
+#    def __iter__(self):
+#        """Makes (along with next) this objcet iterable"""
+#        return self
+
+    def _get_next_task(self):
+        """Return the task which should be fired next"""
+        
+        next_task = None
+        for _, task_list in self.tasks.items():
+            task = task_list[0]
+            if next_task is None:
+                next_task = task
+            if task.call_time < next_task.call_time:
+                next_task = task
+        return next_task
+                
+
+    def consume_task(self):
+        """
+        Return the task which should be fire next and removes it from
+        TaskManager 
+
+        """
+        current_time = time.time()
+        if self.next_task is None:
+            # no pending tasks
+            return None #raise StopIteration
+        if self.next_task.call_time > current_time:
+            # there are pending tasks but it's too soon to fire them
+            return None #raise StopIteration
+        # self.next_task is ready to be fired
+        task = self.next_task
+        # delete  consummed task and get next one (if any)
+        ms_delay = int(self.next_task.delay * 1000)
+        del self.tasks[ms_delay][0]
+        if not self.tasks[ms_delay]:
+            # delete list when it's empty
+            del self.tasks[ms_delay]
+        self.next_task = self._get_next_task()
+        #TODO2: make it yield
+        return task
+                            
+class ThreadedReactor(threading.Thread):
+
+    """
+    Object inspired in Twisted's reactor.
+    Run in its own thread.
+    It is an instance, not a nasty global
+    
+    """
+    def __init__(self, task_interval=0.1, floodbarrier_active=True):
+        threading.Thread.__init__(self)
+        self.setName("KADTracker"+self.getName())
+        self.setDaemon(True)
+        
+        self.stop_flag = False
+        self._lock = threading.RLock()
+        self.task_interval = task_interval
+        self.floodbarrier_active = floodbarrier_active
+        self.tasks = TaskManager()
+        if self.floodbarrier_active:
+            self.floodbarrier = FloodBarrier()
+        
+    def run(self):
+        """Main loop activated by calling self.start()"""
+        
+        last_task_run = time.time()
+        stop_flag = self.stop_flag
+        while not stop_flag:
+            timeout_raised = False
+            try:
+                data, addr = self.s.recvfrom(BUFFER_SIZE)
+            except (AttributeError):
+                logger.warning('udp_listen has not been called')
+                time.sleep(self.task_interval)
+                #TODO2: try using Event and wait
+                timeout_raised = True
+            except (socket.timeout):
+                timeout_raised = True
+            except (socket.error), e:
+                logger.critical(
+                    'Got socket.error when receiving (more info follows)')
+                logger.exception('See critical log above')
+            else:
+                ip_is_blocked = self.floodbarrier_active and \
+                                self.floodbarrier.ip_blocked(addr[0])
+                if ip_is_blocked:
+                    logger.warning('%s blocked' % `addr`)
+                else:
+                    self.datagram_received_f(data, addr)
+
+            if timeout_raised or \
+                   time.time() - last_task_run > self.task_interval:
+                #with self._lock:
+                self._lock.acquire()
+                try:
+                    while True:
+                        task = self.tasks.consume_task()
+                        if task is None:
+                            break
+#                        logger.critical('TASK COUNT 2 %d' % sys.getrefcount(task))
+                        task.fire_callbacks()
+                    stop_flag = self.stop_flag
+                finally:
+                    self._lock.release()
+        logger.debug('Reactor stopped')
+            
+    def stop(self):
+        """Stop the thread. It cannot be resumed afterwards????"""
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            self.stop_flag = True
+        finally:
+            self._lock.release()
+        # wait a little for the thread to end
+        time.sleep(self.task_interval)
+
+
+#     def stop_and_wait(self):
+#         """Stop the thread and wait a little (task_interval)."""
+
+#         self.stop()
+        # wait a little before ending the thread's life
+#        time.sleep(self.task_interval * 2)
+
+    def listen_udp(self, port, datagram_received_f):
+        """Listen on given port and call the given callback when data is
+        received.
+
+        """
+        self.datagram_received_f = datagram_received_f
+        self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.s.settimeout(self.task_interval)
+        my_addr = ('', port)
+        self.s.bind(my_addr) 
+        
+
+    def call_later(self, delay, callback_fs, *args, **kwds):
+        """Call the given callback with given arguments in the future (delay
+        seconds).
+
+        """
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            task = Task(delay, callback_fs, *args, **kwds)
+#            logger.critical('TASK COUNT CREATION 2 %d' % sys.getrefcount(task))
+            self.tasks.add(task)
+#            logger.critical('TASK COUNT CREATION 3 %d' % sys.getrefcount(task))
+        finally:
+            self._lock.release()
+        return task
+            
+    def call_now(self, callback_f, *args, **kwds):
+        """Same as call_later with delay 0 seconds."""
+        return self.call_later(0, callback_f, *args, **kwds)
+        
+        
+    def sendto(self, data, addr):
+        """Send data to addr using the UDP port used by listen_udp."""
+        #with self._lock:
+        self._lock.acquire()
+        try:
+            try:
+                bytes_sent = self.s.sendto(data, addr)
+                if bytes_sent != len(data):
+                    logger.critical(
+                        'Just %d bytes sent out of %d (Data follows)' % (
+                            bytes_sent,
+                            len(data)))
+                    logger.critical('Data: %s' % data)
+            except (socket.error):
+                logger.critical(
+                    'Got socket.error when sending (more info follows)')
+                logger.critical('Sending data to %r\n%r' % (addr,
+                                                             data))
+                logger.exception('See critical log above')
+        finally:
+            self._lock.release()
+
+
+class ThreadedReactorSocketError(ThreadedReactor):
+
+    def listen_udp(self, delay, callback_f, *args, **kwds):
+        self.s = _SocketMock()
+
+                
+class ThreadedReactorMock(object):
+    def __init__(self, task_interval=0.1):
+        pass
+    
+    def start(self):
+        pass
+
+    stop = start
+#    stop_and_wait = stop
+
+    def listen_udp(self, port, data_received_f):
+        self.s = _SocketMock()
+        
+
+    def call_later(self, delay, callback_f, *args, **kwds):
+        return Task(delay, callback_f, *args, **kwds)
+
+    def sendto(self, data, addr):
+        pass
+    
+
+
+    
+class _SocketMock(object):
+
+    def sendto(self, data, addr):
+        if len(data) > BUFFER_SIZE:
+            return BUFFER_SIZE
+        raise socket.error
+
+    def recvfrom(self, buffer_size):
+        raise socket.error
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/node.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/node.py
new file mode 100644 (file)
index 0000000..4b2e5c9
--- /dev/null
@@ -0,0 +1,165 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+
+import utils
+import identifier
+
+class Node(object):
+
+    def __init__(self, addr, node_id=None, ns_node=False):
+        self._addr = addr
+        self._id = node_id
+        self.is_ns = ns_node
+        self._compact_addr = utils.compact_addr(addr)
+
+    def get_id(self):
+        return self._id
+    def set_id(self, node_id):
+        if self._id is None:
+            self._id = node_id
+        else:
+            raise AttributeError, "Node's id is read-only"
+    id = property(get_id, set_id)
+
+    @property
+    def addr(self):
+        return self._addr
+
+    @property
+    def compact_addr(self):
+        return self._compact_addr
+
+    @property
+    def ip(self):
+        return self._addr[0]
+    
+    def __eq__(self, other):
+        try:
+            return self.addr == other.addr and self.id == other.id
+        except AttributeError: #self.id == None
+            return self.id is None and other.id is None \
+                   and self.addr == other.addr
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __repr__(self):
+        return '<node: %r %r>' % (self.addr, self.id)
+
+    def log_distance(self, other):
+        return self.id.log_distance(other.id)
+
+    def compact(self):
+        """Return compact format"""
+        return self.id.bin_id + self.compact_addr
+
+    def get_rnode(self):
+        return RoutingNode(self)
+    
+
+
+QUERY = 'query'
+REPLY = 'reply'
+TIMEOUT = 'timeout'
+
+LAST_RTT_W = 0.2 # The weight of the last RTT to calculate average
+
+MAX_NUM_TIMEOUT_STRIKES = 2
+QUARANTINE_PERIOD = 3 * 60 # 3 minutes
+
+
+class RoutingNode(Node):
+
+    def __init__(self, node_):
+        Node.__init__(self, node_.addr, node_.id, node_.is_ns)
+        self._rtt_avg = None
+        self._num_queries = 0
+        self._num_responses = 0
+        self._num_timeouts = 0
+        self._msgs_since_timeout = 0
+        self._last_events = []
+        self._max_last_events = 10
+        self.refresh_task = None
+        self._rank = 0
+        current_time = time.time()
+        self._creation_ts = current_time
+        self._last_action_ts = current_time
+        self.in_quarantine = True
+        
+    def __repr__(self):
+        return '<rnode: %r %r>' % (self.addr, self.id)
+
+    def get_rnode(self):
+        return self
+    
+    def on_query_received(self):
+        """Register a query from node.
+
+        You should call this method when receiving a query from this node.
+
+        """
+        self._last_action_ts = time.time()
+        self._msgs_since_timeout += 1
+        self._num_queries += 1
+        self._last_events.append((time.time(), QUERY))
+        self._last_events[:self._max_last_events]
+
+    def on_response_received(self, rtt=0):
+        """Register a reply from rnode.
+
+        You should call this method when receiving a response from this rnode.
+
+        """
+        current_time = time.time()
+        #self._reset_refresh_task()
+        if self.in_quarantine:
+            self.in_quarantine = \
+                self._last_action_ts < current_time - QUARANTINE_PERIOD
+                
+        self._last_action_ts = current_time
+        self._msgs_since_timeout += 1
+        try:
+            self._rtt_avg = \
+                self._rtt_avg * (1 - LAST_RTT_W) + rtt * LAST_RTT_W
+        except TypeError: # rtt_avg is None
+            self._rtt_avg = rtt
+        self._num_responses += 1
+        self._last_events.append((time.time(), REPLY))
+        self._last_events[:self._max_last_events]
+
+    def on_timeout(self):
+        """Register a timeout for this rnode.
+
+        You should call this method when getting a timeout for this node.
+
+        """
+        self._last_action_ts = time.time()
+        self._msgs_since_timeout = 0
+        self._num_timeouts += 1
+        self._last_events.append((time.time(), TIMEOUT))
+        self._last_events[:self._max_last_events]
+
+    def timeouts_in_a_row(self, consider_queries=True):
+        """Return number of timeouts in a row for this rnode."""
+        result = 0
+        for timestamp, event in reversed(self._last_events):
+            if event == TIMEOUT:
+                result += 1
+            elif event == REPLY or \
+                     (consider_queries and event == QUERY):
+                return result
+        return result # all timeouts (and queries), or empty list
+            
+#     def rank(self):
+#         if self._num_responses == 0:
+#             # No responses received, the node might be unreachable
+#             return 0
+#         if self.timeouts_in_a_row() > MAX_NUM_TIMEOUT_STRIKES:
+#             return 0
+#         return self._num_queries + self._num_responses + \
+#             -3 * self._num_timeouts
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/querier.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/querier.py
new file mode 100644 (file)
index 0000000..5bf632d
--- /dev/null
@@ -0,0 +1,251 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import sys
+
+import logging
+
+import message
+import identifier
+
+logger = logging.getLogger('dht')
+
+
+TIMEOUT_DELAY = 3
+
+class Query(object):
+
+    def __init__(self, tid, query_type, node_,
+                 on_response_f, on_error_f, on_timeout_f,
+                 notify_routing_m_on_response_f,
+                 notify_routing_m_on_error_f,
+                 notify_routing_m_on_timeout_f,
+                 notify_routing_m_on_nodes_found_f):
+        #assert on_response_f
+        #assert on_error_f
+        #assert on_timeout_f
+        #assert notify_routing_m_on_response_f
+        #assert notify_routing_m_on_error_f
+        #assert notify_routing_m_on_timeout_f
+        #assert notify_routing_m_on_nodes_found_f
+        
+        self.tid = tid
+        self.query = query_type
+        self.node = node_
+        self.on_response_f = on_response_f
+        self.on_error_f = on_error_f
+        self.on_timeout_f = on_timeout_f
+        self.notify_routing_m_on_response_f = \
+            notify_routing_m_on_response_f
+        self.notify_routing_m_on_error_f = \
+            notify_routing_m_on_error_f
+        self.notify_routing_m_on_timeout_f = \
+            notify_routing_m_on_timeout_f
+        self.notify_routing_m_on_nodes_found_f = \
+            notify_routing_m_on_nodes_found_f
+        self.timeout_task = None
+
+    def on_response_received(self, response_msg):
+        try:
+            response_msg.sanitize_response(self.query)
+        except (message.MsgError):
+            logger.exception(
+                "We don't like dirty reponses: %r|nresponse ignored"
+                % response_msg)
+            return # Response ignored 
+        self.node.is_ns = response_msg.ns_node
+        if self.node.id:
+            if response_msg.sender_id != self.node.id:
+                return # Ignore response
+        else:
+            self.node.id = response_msg.sender_id
+        #TODO2: think whether late responses should be accepted
+        if self.timeout_task.cancelled:
+            logger.warning(
+                "Response recevived but it's too late!!\n%r, %r" %
+                (response_msg,
+                self.timeout_task))
+            return # Ignore response
+        self.timeout_task.cancel()
+        nodes = []
+        try:
+            nodes.extend(response_msg.nodes)
+        except (AttributeError):
+            pass
+        try:
+            nodes.extend(response_msg.nodes2)
+        except (AttributeError):
+            pass
+        # Notify routing manager (if nodes found).
+        # Do not notify when the query was a GET_PEERS because
+        # the lookup is in progress and the routing_m shouldn't
+        # generate extra traffic.
+        if self.query == message.FIND_NODE and \
+                nodes and self.notify_routing_m_on_nodes_found_f:
+            self.notify_routing_m_on_nodes_found_f(nodes)
+        # Notify routing manager (response)
+        self.node.is_ns = response_msg.ns_node
+        if self.notify_routing_m_on_response_f:
+            self.notify_routing_m_on_response_f(self.node)
+        # Do callback to whomever did the query
+        if self.on_response_f:
+            self.on_response_f(response_msg, self.node)
+        return True # the response was fine
+
+    def on_error_received(self, error_msg):
+        if self.on_error_f:
+            self.on_error_f(error_msg, self.node)
+        if self.notify_routing_m_on_error_f:
+            self.notify_routing_m_on_error_f(self.node)
+
+    def on_timeout(self):
+        # Careful here. Node might not have ID.
+        if self.on_timeout_f:
+            self.on_timeout_f(self.node)
+        if self.notify_routing_m_on_timeout_f:
+            self.notify_routing_m_on_timeout_f(self.node)
+
+    def matching_tid(self, response_tid):
+        return message.matching_tid(self.tid, response_tid)
+
+class Querier(object):
+
+    def __init__(self, rpc_m, my_id, default_timeout_delay=TIMEOUT_DELAY):
+        self.rpc_m = rpc_m
+        self.my_id = my_id
+        self.default_timeout_delay = default_timeout_delay
+        self.rpc_m.add_msg_callback(message.RESPONSE, self.on_response_received)
+        self.rpc_m.add_msg_callback(message.ERROR, self.on_error_received)
+        self.pending = {} # collections.defaultdict(list)
+        self._tid = [0, 0]
+        self.notify_routing_m_on_response = None
+        self.notify_routing_m_on_error = None
+        self.notify_routing_m_on_timeout = None
+        self.notify_routing_m_on_nodes_found = None
+
+    def _next_tid(self):
+        current_tid_str = ''.join([chr(c) for c in self._tid])
+        self._tid[0] = (self._tid[0] + 1) % 256
+        if self._tid[0] == 0:
+            self._tid[1] = (self._tid[1] + 1) % 256
+        return current_tid_str # raul: yield created trouble
+
+    def set_on_response_received_callback(self, callback_f):
+        self.notify_routing_m_on_response = callback_f
+
+    def set_on_error_received_callback(self, callback_f):
+        self.notify_routing_m_on_error = callback_f
+        
+    def set_on_timeout_callback(self, callback_f):
+        self.notify_routing_m_on_timeout = callback_f
+    
+    def set_on_nodes_found_callback(self, callback_f):
+        self.notify_routing_m_on_nodes_found = callback_f
+    
+    def send_query(self, msg, node_, on_response_f,
+                   on_timeout_f, on_error_f,
+                   timeout_delay=None):
+        timeout_delay = timeout_delay or self.default_timeout_delay
+        tid = self._next_tid()
+        logger.debug('sending to node: %r\n%r' % (node_, msg))
+        query = Query(tid, msg.query, node_,
+                      on_response_f, on_error_f,
+                      on_timeout_f,
+                      self.notify_routing_m_on_response,
+                      self.notify_routing_m_on_error,
+                      self.notify_routing_m_on_timeout,
+                      self.notify_routing_m_on_nodes_found) 
+        # if node is not in the dictionary, it will create an empty list
+        self.pending.setdefault(node_.addr, []).append(query)
+        bencoded_msg = msg.encode(tid)
+        query.timeout_task = self.rpc_m.get_timeout_task(node_.addr,
+                                                    timeout_delay,
+                                                    self.on_timeout)
+        self.rpc_m.send_msg_to(bencoded_msg, node_.addr)
+        return query
+
+    def send_query_later(self, delay, msg, node_, on_response_f,
+                         on_timeout_f, on_error_f,
+                         timeout_delay=None):
+        return self.rpc_m.call_later(delay, self.send_query,
+                                     msg, node_,
+                                     on_response_f,
+                                     on_timeout_f,
+                                     on_error_f,
+                                     timeout_delay)
+        
+    def on_response_received(self, response_msg, addr):
+        # TYPE and TID already sanitized by rpc_manager
+        logger.debug('response received: %s' % repr(response_msg))
+        try:
+            addr_query_list = self.pending[addr]
+        except (KeyError):
+            logger.warning('No pending queries for %s', addr)
+            return # Ignore response
+        # There are pending queries from node (let's find the right one (TID)
+        query_found = False
+        for query_index, query in enumerate(addr_query_list):
+            logger.debug('response node: %s, query:\n(%s, %s)' % (
+                `addr`,
+                `query.tid`,
+                `query.query`))
+            if query.matching_tid(response_msg.tid):
+                query_found = True
+                break
+        if not query_found:
+            logger.warning('No query for this response\n%s\nsource: %s' % (
+                response_msg, addr))
+            return # ignore response 
+        # This response matches query. Trigger query's callback
+        response_is_ok = query.on_response_received(response_msg)
+        if response_is_ok:
+            # Remove this query from pending
+            if len(addr_query_list) == 1:
+                # There is one item in the list. Remove the whole list.
+                del self.pending[addr]
+            else:
+                del addr_query_list[query_index]
+        else:
+            logger.warning('Bad response from %r\n%r' % (addr,
+                                                          response_msg))
+
+    def on_error_received(self, error_msg, addr):
+        logger.warning('Error message received:\n%s\nSource: %s',
+                        `error_msg`,
+                        `addr`)
+        # TODO2: find query (with TID)
+        # and fire query.on_error_received(error_msg)
+
+    def on_timeout(self, addr):
+        #try
+        addr_query_list = self.pending[addr]
+        #except (KeyError):
+        #    logger.warning('No pending queries for %s', addr)
+        #    return # Ignore response
+        # There are pending queries from node (oldest query)
+        query = addr_query_list.pop(0)
+        # Remove this query from pending
+        if not addr_query_list:
+            # The list is empty. Remove the whole list.
+            del self.pending[addr]
+        # Trigger query's on_timeout callback
+        query.on_timeout()
+
+        
+    def stop(self):
+        self.rpc_m.stop()
+
+
+class QuerierMock(Querier):
+
+    def __init__(self, my_id):
+        import minitwisted
+        import rpc_manager
+        import test_const as tc
+        reactor = minitwisted.ThreadedReactorMock()
+        rpc_m = rpc_manager.RPCManager(reactor, 1)
+        Querier.__init__(self, rpc_m, my_id)
+
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/responder.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/responder.py
new file mode 100644 (file)
index 0000000..864171f
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import message
+import node
+import sys
+
+import logging
+
+logger = logging.getLogger('dht')
+
+
+class Responder(object ):
+    "docstring for Responder"
+    def __init__(self, my_id, routing_m, tracker, token_m):
+        self.my_id = my_id
+        self.routing_m = routing_m
+        self.tracker = tracker
+        self.token_m = token_m
+        self.query_handler = {message.PING: self._on_ping,
+                              message.FIND_NODE: self._on_find_node,
+                              message.GET_PEERS: self._on_get_peers,
+                              message.ANNOUNCE_PEER: self._on_announce_peer,
+                              }
+        self.notify_routing_m_on_query = None
+
+    def set_on_query_received_callback(self, callback_f):
+        self.notify_routing_m_on_query = callback_f
+        
+    def on_query_received(self, query_msg, addr):
+        logger.debug('query received\n%s\nSource: %s' % (`query_msg`,
+                                                          `addr`))
+        try:
+            handler = self.query_handler[query_msg.query]
+        except (KeyError, ValueError):
+            logger.exception('Invalid QUERY')
+            return # ignore query #TODO2: send error back?
+        response_msg = handler(query_msg)
+        self.notify_routing_m_on_query(node.Node(addr,
+                                                 query_msg.sender_id,
+                                                 query_msg.ns_node))
+        return response_msg
+
+    def _on_ping(self, query_msg):
+        return message.OutgoingPingResponse(self.my_id)
+
+    def _on_find_node(self, query_msg):
+        rnodes = self.routing_m.get_closest_rnodes(query_msg.target)
+        return message.OutgoingFindNodeResponse(self.my_id,
+                                                nodes2=rnodes)
+
+    def _on_get_peers(self, query_msg):
+        #get peers from the tracker (if any)
+        token = self.token_m.get()
+        peers = self.tracker.get(query_msg.info_hash)
+        if peers:
+            return message.OutgoingGetPeersResponse(self.my_id,
+                                                    token,
+                                                    peers=peers)
+        rnodes = self.routing_m.get_closest_rnodes(query_msg.info_hash)
+        return message.OutgoingGetPeersResponse(self.my_id,
+                                                token,
+                                                nodes2=rnodes)
+    def _on_announce_peer(self, query_msg):
+        return message.OutgoingAnnouncePeerResponse(self.my_id)
+        
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/routing_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/routing_manager.py
new file mode 100644 (file)
index 0000000..1d8cc4f
--- /dev/null
@@ -0,0 +1,307 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import random
+
+import logging
+
+import identifier as identifier
+import message as message
+from node import Node, RoutingNode
+from routing_table import RoutingTable, RnodeNotFound, BucketFullError
+
+
+logger = logging.getLogger('dht')
+
+
+#TODO2: Stop expelling nodes from tables when there are many consecutive
+# timeouts (and enter off-line mode)
+
+NUM_BUCKETS = identifier.ID_SIZE_BITS + 1
+"""
+We need (+1) to cover all the cases. See the following table:
+Index | Distance      | Comment
+0     | [2^0,2^1)     | All bits equal but the least significant bit
+1     | [2^1,2^2)     | All bits equal till the second least significant bit
+...
+158   | [2^159,2^160) | The most significant bit is equal the second is not
+159   | [2^159,2^160) | The most significant bit is different
+-1    | 0             | The bit strings are equal
+"""
+
+DEFAULT_NUM_NODES = 8
+NODES_PER_BUCKET = [] # 16, 32, 64, 128, 256]
+NODES_PER_BUCKET[:0] = [DEFAULT_NUM_NODES] \
+    * (NUM_BUCKETS - len(NODES_PER_BUCKET))
+
+REFRESH_PERIOD = 10 * 60 # 10 minutes
+QUARANTINE_PERIOD = 3 * 60 # 3 minutes
+
+MAX_NUM_TIMEOUTS = 3
+PING_DELAY_AFTER_TIMEOUT = 30 #seconds
+
+
+MIN_RNODES_BOOTSTRAP = 50
+NUM_NODES_PER_BOOTSTRAP_STEP = 10
+BOOTSTRAP_DELAY = 1
+
+BOOTSTRAP_MODE = 'bootstrap_node'
+NORMAL_MODE = 'normal_mode'
+
+MAX_CONCURRENT_REFRESH_MSGS = 20
+NO_PRIORITY = 0
+PRIORITY = 10
+
+REFRESH_DELAY_FOR_NON_NS = .200 #seconds
+
+class RoutingManager(object):
+    
+    def __init__(self, my_node, querier, bootstrap_nodes):
+        self.my_node = my_node
+        self.querier = querier
+        #Copy the bootstrap list
+        self.bootstrap_nodes = [n for n in bootstrap_nodes]
+        
+        self.main = RoutingTable(my_node, NODES_PER_BUCKET)
+        self.replacement = RoutingTable(my_node, NODES_PER_BUCKET)
+        self.ping_msg = message.OutgoingPingQuery(my_node.id)
+        self.find_node_msg = message.OutgoingFindNodeQuery(
+            my_node.id,
+            my_node.id)
+        self.mode = BOOTSTRAP_MODE
+        self.num_concurrent_refresh_msgs = 0
+        #This must be called by an external party: self.do_bootstrap()
+        #After initializing callbacks
+
+        # Add myself to the routing table
+        rnode = self.main.add(my_node)
+        self._reset_refresh_task(rnode)
+
+    def do_bootstrap(self):
+        if self.main.num_rnodes > MIN_RNODES_BOOTSTRAP:
+            # Enough nodes. Stop bootstrap.
+            return
+        for _ in xrange(NUM_NODES_PER_BOOTSTRAP_STEP):
+            if not self.bootstrap_nodes:
+                self.mode = NORMAL_MODE
+                return
+            index = random.randint(0,
+                                   len(self.bootstrap_nodes) - 1)
+            self.querier.send_query(self.find_node_msg,
+                                    self.bootstrap_nodes[index],
+                                    None,
+                                    None,
+                                    None)
+            del self.bootstrap_nodes[index]
+        #TODO2: Don't use querier's rpc_m
+        self.querier.rpc_m.call_later(BOOTSTRAP_DELAY,
+                                      self.do_bootstrap)
+    
+    def on_query_received(self, node_):
+        try:
+            rnode = self.main.get_rnode(node_)
+        except RnodeNotFound:
+            pass # node is not in the main table
+        else:
+            # node in routing table: inform rnode
+            rnode.on_query_received()
+            self._reset_refresh_task(rnode)
+            return
+        # Node is not in routing table
+        # Check reachability (if the bucket is not full)
+        if self.main.there_is_room(node_):
+            # there is room in the bucket: ping node to check reachability
+            self._refresh_now(node_)
+            return
+        # No room in the main routing table
+        # Add to replacement table (if the bucket is not full)
+        bucket = self.replacement.get_bucket(node_)
+        worst_rnode = self._worst_rnode(bucket.rnodes)
+        if worst_rnode \
+                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
+            self.replacement.remove(worst_rnode)
+            self.replacement.add(node_)
+
+            
+    def on_response_received(self, node_): #TODO2:, rtt=0):
+        try:
+            rnode = self.main.get_rnode(node_)
+        except (RnodeNotFound):
+            pass
+        else:
+            # node in routing table: refresh it
+            rnode.on_response_received()
+            self._reset_refresh_task(rnode)
+            return
+        # The node is not in main
+        try:
+            rnode = self.replacement.get_rnode(node_)
+        except (RnodeNotFound):
+            pass
+        else:
+            # node in replacement table
+            # let's see whether there is room in the main
+            rnode.on_response_received()
+            if self.main.there_is_room(node_):
+                rnode = self.main.add(rnode)
+                self._reset_refresh_task(rnode)
+                self.replacement.remove(rnode)
+            return
+        # The node is nowhere
+        # Add to replacement table (if the bucket is not full)
+        bucket = self.replacement.get_bucket(node_)
+        if self.main.there_is_room(node_):
+            if not bucket.rnodes:
+                # Replacement is empty
+                rnode = self.main.add(node_)
+                self._reset_refresh_task(rnode)
+                return
+        # The main bucket is full or the repl bucket is not empty
+        worst_rnode = self._worst_rnode(bucket.rnodes)
+        # Get the worst node in replacement bucket and see whether
+        # it's bad enough to be replaced by node_
+        if worst_rnode \
+                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
+            # This node is better candidate than worst_rnode
+            self.replacement.remove(worst_rnode)
+        try:
+            self.replacement.add(node_)
+        except (BucketFullError):
+            pass
+
+        
+    def on_error_received(self, node_):
+        pass
+    
+    def on_timeout(self, node_):
+        if node_ is self.my_node:
+            raise Exception, 'I got a timeout from myself!!!' 
+        if not node_.id:
+            return # This is a bootstrap node (just addr, no id)
+        try:
+            rnode = self.main.get_rnode(node_)
+        except RnodeNotFound:
+            pass
+        else:
+            # node in routing table: check whether it should be removed
+            rnode.on_timeout()
+            replacement_bucket = self.replacement.get_bucket(node_)
+            self._refresh_replacement_bucket(replacement_bucket)
+            self.main.remove(rnode)
+            try:
+                self.replacement.add(rnode)
+            except (BucketFullError):
+                worst_rnode = self._worst_rnode(replacement_bucket.rnodes)
+                if worst_rnode:
+                    # Replace worst node in replacement table
+                    self.replacement.remove(worst_rnode)
+                    self._refresh_replacement_bucket(replacement_bucket)
+                    # We don't want to ping the node which just did timeout
+                    self.replacement.add(rnode)
+        # Node is not in main table
+        try:
+            rnode = self.replacement.get_rnode(node_)
+        except RnodeNotFound:
+            pass # the node is not in any table. Nothing to do here.
+        else:
+            # Node in replacement table: just update rnode
+            rnode.on_timeout()
+            
+    def on_nodes_found(self, nodes):
+        #FIXME: this will send ping at exponential rate
+        #not good!!!!
+        logger.debug('nodes found: %r', nodes)
+        for node_ in nodes:
+            try:
+                rnode = self.main.get_rnode(node_)
+            except RnodeNotFound:
+                # Not in the main: ping it if there is room in main
+                if self.main.there_is_room(node_):
+                    logger.debug('pinging node found: %r', node_)
+                    self._refresh_now(node_, NO_PRIORITY)
+                    #TODO2: prefer NS
+
+    def get_closest_rnodes(self, target_id, num_nodes=DEFAULT_NUM_NODES):
+        return self.main.get_closest_rnodes(target_id, num_nodes)
+
+    def get_all_rnodes(self):
+        return (self.main.get_all_rnodes(),
+                self.replacement.get_all_rnodes())
+
+    def print_stats(self):
+        print '=== MAIN ==='
+        self.main.print_stats()
+        print '=== REPLACEMENT ==='
+        self.replacement.print_stats()
+        print '=== ==='
+
+    def _refresh_now(self, node_, priority=PRIORITY):
+        if priority == NO_PRIORITY and \
+                self.num_concurrent_refresh_msgs > MAX_CONCURRENT_REFRESH_MSGS:
+            return
+        self.num_concurrent_refresh_msgs += 1
+        return self.querier.send_query(self.find_node_msg,
+                                       node_,
+                                       self._refresh_now_callback,
+                                       self._refresh_now_callback,
+                                       self._refresh_now_callback)
+    
+    def _reset_refresh_task(self, rnode):
+        if rnode.refresh_task:
+            # Cancel the current refresh task
+            rnode.refresh_task.cancel()
+        if rnode.in_quarantine:
+            rnode.refresh_task = self._refresh_later(rnode,
+                                                     QUARANTINE_PERIOD)
+        else:
+            rnode.refresh_task = self._refresh_later(rnode)
+
+
+    def _refresh_later(self, rnode, delay=REFRESH_PERIOD):
+        return self.querier.send_query_later(delay,
+                                             self.find_node_msg,
+                                             rnode,
+                                             None,
+                                             None,
+                                             None)
+    def _do_nothing(self, *args, **kwargs):
+        pass
+
+    def _refresh_now_callback(self, *args, **kwargs):
+        self.num_concurrent_refresh_msgs -= 1
+
+
+    def _refresh_replacement_bucket(self, bucket):
+        for rnode in bucket.rnodes:
+            if rnode.is_ns:
+                # We give advantage to NS nodes
+                self._refresh_now(rnode)
+            else:
+                self._refresh_later(rnode, REFRESH_DELAY_FOR_NON_NS)
+    
+    def _worst_rnode(self, rnodes):
+        max_num_timeouts = -1
+        worst_rnode_so_far = None
+        for rnode in rnodes:
+            num_timeouots = rnode.timeouts_in_a_row()
+            if num_timeouots >= max_num_timeouts:
+                max_num_timeouts = num_timeouots
+                worst_rnode_so_far = rnode
+        return worst_rnode_so_far
+
+        
+                
+            
+class RoutingManagerMock(object):
+
+    def get_closest_rnodes(self, target_id):
+        import test_const as tc
+        if target_id == tc.INFO_HASH_ZERO:
+            return (tc.NODES_LD_IH[155][4], 
+                    tc.NODES_LD_IH[157][3],
+                    tc.NODES_LD_IH[158][1],
+                    tc.NODES_LD_IH[159][0],
+                    tc.NODES_LD_IH[159][2],)
+        else:
+            return tc.NODES
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/routing_table.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/routing_table.py
new file mode 100644 (file)
index 0000000..70cae23
--- /dev/null
@@ -0,0 +1,125 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import logging
+
+logger = logging.getLogger('dht')
+
+
+class BucketFullError(Exception):
+    pass
+class RnodeNotFound(IndexError):
+    pass
+    
+class Bucket(object):
+
+    def __init__(self, max_nodes):
+        self.max_nodes = max_nodes
+        self.rnodes = []
+
+    def __getitem__(self, node_):
+        try:
+            return self.rnodes[self._index(node_)]
+        except (KeyError):
+            raise RnodeNotFound
+
+    def add(self, rnode):
+        if len(self.rnodes) == self.max_nodes:
+            raise BucketFullError
+        self.rnodes.append(rnode)
+
+    def remove(self, node_):
+        del self.rnodes[self._index(node_)]
+        
+    def __repr__(self):
+        return '\n'.join([repr(rnode) for rnode in self.rnodes])
+
+    def __len__(self):
+        return len(self.rnodes)
+
+    def is_full(self):
+        return len(self.rnodes) == self.max_nodes
+
+    def _index(self, node_):
+        for i, rnode in enumerate(self.rnodes):
+            if rnode == node_:
+                return i
+        raise KeyError # not found
+
+NUM_BUCKETS = 160 + 1 # log_distance returns a number in range [-1,159]
+NUM_NODES = 8
+class RoutingTable(object):
+    '''
+    '''
+    def __init__(self, my_node, nodes_per_bucket):
+        assert len(nodes_per_bucket) == NUM_BUCKETS
+        self.my_node = my_node
+        self.buckets = [Bucket(num_nodes)
+                        for num_nodes in nodes_per_bucket]
+        self.num_rnodes = 0
+
+    def get_rnode(self, node_):
+        index = node_.log_distance(self.my_node)
+        return self.buckets[index][node_]
+                
+    def get_bucket(self, node_):
+        index = node_.log_distance(self.my_node)
+        return self.buckets[index]
+
+    def there_is_room(self, node_):
+        return not self.get_bucket(node_).is_full()
+
+    def add(self, node_):
+        rnode = node_.get_rnode()
+        index = node_.log_distance(self.my_node)
+        bucket = self.buckets[index].add(rnode)
+        self.num_rnodes += 1
+        return rnode
+
+    def remove(self, node_):
+        index = node_.log_distance(self.my_node)
+        bucket = self.buckets[index].remove(node_)
+        self.num_rnodes -= 1
+        
+    def get_closest_rnodes(self, id_, num_nodes=NUM_NODES):
+        # Myself
+        if id_ == self.my_node.id:
+            return [self.my_node]
+        # id_ is not myself
+        result = []
+        highest_index = id_.log_distance(self.my_node.id)
+        for i, bucket in enumerate(self.buckets[highest_index::-1]):
+            result.extend(bucket.rnodes[:num_nodes-len(result)])
+            #TODO2: get all nodes in the bucket and order
+            if len(result) == num_nodes:
+                break
+        if len(result) < num_nodes:
+            result.extend(self.buckets[-1].rnodes) # myself
+        return result 
+
+    def get_all_rnodes(self):
+        rnodes = []
+        for bucket in self.buckets:
+            rnodes.extend(bucket.rnodes)
+        return rnodes
+
+    def print_stats(self):
+        num_nodes = 0
+        for i, bucket in enumerate(self.buckets):
+            if len(bucket):
+                print i, len(bucket)
+                num_nodes += len(bucket)
+        print 'Total:', num_nodes
+    
+    def __repr__(self):
+        msg = ['==============RoutingTable============= BEGIN']
+        for i, bucket in enumerate(self.buckets):
+            msg.append('%d %r' % (i, bucket))
+        msg.append('==============RoutingTable============= END')
+        return '\n'.join(msg)
+
+    
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/rpc_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/rpc_manager.py
new file mode 100644 (file)
index 0000000..f0fa951
--- /dev/null
@@ -0,0 +1,66 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+
+import logging
+
+import message
+
+logger = logging.getLogger('dht')
+
+
+class RPCManager(object):
+
+    def __init__(self, reactor, port):
+        self.reactor = reactor
+        self.reactor.listen_udp(port, self._on_datagram_received)
+        self.msg_callbacks_d = {}
+        self.timeout_callbacks = []
+
+    def get_timeout_task(self, addr, timeout_delay, timeout_callback):
+        timeout_callbacks = self.timeout_callbacks + [timeout_callback]
+        return self.reactor.call_later(timeout_delay,
+                                               timeout_callbacks, addr)
+    def send_msg_to(self, bencoded_msg, addr):
+        """This must be called right after get_timeout_task
+        (when timeout is needed).
+        """
+        self.reactor.sendto(bencoded_msg, addr)
+    
+    def call_later(self, delay, callback_fs, *args, **kwargs):
+        return self.reactor.call_later(delay, callback_fs, *args, **kwargs)
+    
+    def add_msg_callback(self, msg_type, callback_f):
+        self.msg_callbacks_d.setdefault(msg_type, []).append(callback_f)
+
+    def add_timeout_callback(self, callback_f):
+        self.timeout_callbacks.append(callback_f)
+                                
+    def stop(self):
+        self.reactor.stop()
+
+    def _on_datagram_received(self, data, addr):
+        # Sanitize bencode
+        try:
+            msg = message.IncomingMsg(data)
+        except (message.MsgError):
+            logger.info('MsgError when decoding\n%s\nsouce: %s' % (
+                data, addr))
+            return # ignore message
+        try:
+            # callback according to message's type
+            callback_fs = self.msg_callbacks_d[msg.type]
+        except (KeyError):
+            logger.info('Key TYPE has an invalid value\n%s\nsouce: %s' % (
+                data, addr))
+            return #ignore message
+        # Call the proper callback (selected according msg's TYPE)
+        response_msg = None
+        for callback_f in callback_fs:
+            # if there is a response we should keep it
+            response_msg = callback_f(msg, addr) or response_msg
+        if response_msg:
+            bencoded_response = response_msg.encode(msg.tid)
+            self.send_msg_to(bencoded_response, addr)
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/run_dht_node_forever.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/run_dht_node_forever.py
new file mode 100644 (file)
index 0000000..55800d7
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+import sys
+
+import logging, logging_conf
+
+import identifier
+import kadtracker
+
+def peers_found(peers):
+    for peer in peers:
+        print peer
+    print 'Type "EXIT" to stop the DHT and exit'
+    print 'Type an info_hash (in hex digits):'
+
+def lookup_done():
+    print 'No peers found'
+    print 'Type "EXIT" to stop the DHT and exit'
+    print 'Type an info_hash (in hex digits):'
+
+if len(sys.argv) == 5 and sys.argv[1] == 'interactive_dht':
+    logging.critical('argv %r' % sys.argv)
+    assert 0
+    RUN_DHT = True
+    my_addr = (sys.argv[1], sys.argv[2]) #('192.16.125.242', 7000)
+    logs_path = sys.argv[3]
+    dht = kadtracker.KadTracker(my_addr, logs_path)
+else:
+    RUN_DHT = False
+    print 'usage: python interactive_dht ip port paht'
+    
+while (RUN_DHT):
+    input = sys.stdin.readline()[-1]
+    if input == 'EXIT':
+        dht.stop()
+        break
+    try:
+        info_hash = identifier.Id(hex_id)
+    except (IdError):
+        print 'Invalid info_hash (%s)' % hex_id
+        continue
+    dht.get_peers(info_hash, do_nothing)
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/server_dht.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/server_dht.py
new file mode 100644 (file)
index 0000000..ae50021
--- /dev/null
@@ -0,0 +1,87 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+import sys
+import pdb
+#import guppy
+
+import logging, logging_conf
+logs_path = '.'
+logs_level = logging.DEBUG # This generates HUGE (and useful) logs
+#logs_level = logging.INFO # This generates some (useful) logs
+#logs_level = logging.WARNING # This generates warning and error logs
+#logs_level = logging.CRITICAL
+
+import identifier
+import kadtracker
+
+
+#hp = guppy.hpy()
+
+def peers_found(peers):
+    print 'Peers found:', time.time()
+    return
+    for peer in peers:
+        print peer
+    print '-'*20
+
+def lookup_done():
+    print 'Lookup DONE'
+
+
+info_hashes = (
+    identifier.RandomId(),
+    identifier.Id('28f2e5ea2bf87eae4bcd5e3fc9021844c01a4df9'),
+    identifier.RandomId(),
+    identifier.Id('dd5c25b4b8230e108fbf9d07f87a86c6b05c9b6d'),
+    identifier.RandomId(),
+    identifier.Id('bcbdb9c2e7b49c65c9057431b492cb7957c8a330'),
+    identifier.RandomId(),
+    identifier.Id('d93df7a507f3c9d2ebfbe49762a217ab318825bd'),
+    identifier.RandomId(),
+    identifier.Id('6807e5d151e2ac7ae92eabb76ddaf4237e4abb60'),
+    identifier.RandomId(),
+    identifier.Id('83c7b3b7d36da4df289670592be68f9dc7c7096e'),
+    identifier.RandomId(),
+    identifier.Id('9b16aecf952597f9bb051fecb7a0d8475d060fa0'),
+    identifier.RandomId(),
+    identifier.Id('24f2446365d3ef782ec16ad63aea1206df4b8d21'),
+    identifier.RandomId(),
+    identifier.Id('a91af3cde492e29530754591b862b1beecab10ff'),
+    identifier.RandomId(),
+    identifier.Id('3119baecadea3f31bed00de5e7e76db5cfea7ca1'),
+    )
+    
+if len(sys.argv) == 4 and sys.argv[0] == 'server_dht.py':
+    logging.critical('argv %r' % sys.argv)
+    RUN_DHT = True
+    my_addr = (sys.argv[1], int(sys.argv[2])) #('192.16.125.242', 7000)
+    logs_path = sys.argv[3]
+    print 'logs_path:', logs_path
+    logging_conf.setup(logs_path, logs_level)
+    dht = kadtracker.KadTracker(my_addr, logs_path)
+else:
+    RUN_DHT = False
+    print 'usage: python server_dht.py dht_ip dht_port path'
+    
+try:
+    print 'Type Control-C to exit.'
+    i = 0
+    while (RUN_DHT):
+        for info_hash in info_hashes:
+            #splitted_heap_str = str(hp.heap()).split()
+            #print i, splitted_heap_str[10]
+            dht.print_routing_table_stats()
+            time.sleep(2 * 60)
+            print 'Getting peers:', time.time()
+            dht.get_peers(info_hash, peers_found)
+            #time.sleep(1.5)
+            #dht.stop()
+            #pdb.set_trace()
+            i = i + 1
+except (KeyboardInterrupt):
+    dht.stop()
+    
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_bencode.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_bencode.py
new file mode 100644 (file)
index 0000000..df548fd
--- /dev/null
@@ -0,0 +1,140 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import assert_raises, raises
+
+import cStringIO
+import logging, logging_conf
+
+from bencode import *
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+test_data = [
+    # strings
+    ('a', '1:a'),
+    ('1', '1:1'),
+    ('0123456789abcdef', '16:0123456789abcdef'),
+    ('A' * 100, '100:' + 'A' * 100),
+    ('{', '1:{'),
+    ('[', '1:['),
+    (chr(2), '1:' + chr(2)),
+    # integers
+    (0, 'i0e'),
+    (000, 'i0e'),
+    (1234567890, 'i1234567890e'),
+    (-1, 'i-1e'),
+    # lists
+    ([], 'le'),
+    ([[[[]]]], 'lllleeee'), # maximum recursivity depht
+    ([1, 2, 3], 'li1ei2ei3ee'),
+    (['A', 'B', 'C'], 'l1:A1:B1:Ce'),
+    (['A', 2, 'C'], 'l1:Ai2e1:Ce'),
+    ([1, ['X'], 2, 'Z'], 'li1el1:Xei2e1:Ze'),
+    # dictionaries
+    ({}, 'de'),
+    ({'key': 'a'}, 'd3:key1:ae'),
+    ({'ZZZ': 12345}, 'd3:ZZZi12345ee'),
+    # ordered dictionaries
+    ({'a':{'A':1, 'C':2, 'B':3}, 'b':2, 'z':3, 'c':[]},
+     'd1:ad1:Ai1e1:Bi3e1:Ci2ee1:bi2e1:cle1:zi3ee'),
+    # mixed types
+    ({'A': [], 'B': {'B': [1], 'C': [], 'D':{}}, 'C': 9},
+     'd1:Ale1:Bd1:Bli1ee1:Cle1:Ddee1:Ci9ee'),
+    ]
+
+test_data_encode_error = [
+    (False, EncodeError),
+    # Using no-string types in dict
+    ({1:1}, EncodeError),
+    ({None:1}, EncodeError),
+    ({(1,2):1}, EncodeError),
+    # There is no recursion limit when encoding
+    ]
+
+test_data_decode_error = [
+    ('', DecodeError), # empty bencode
+    ('leEXTRA', DecodeError), # extra characters after bencode
+    ('xWHATEVER', DecodeError), # start with invalid character
+    ('dxe', DecodeError), # invalid special character
+    ('ixe', DecodeError), # invalid integer 
+    ('li2e', DecodeError), # list end missing
+    ('li2eee', DecodeError), # extra end
+    ('d3:KEYe', DecodeError), # value missing
+    ('lllll', RecursionDepthError),
+    ('ddddd', DecodeError), # Notice that a dictionary is NOT a valid KEY.
+    ]
+
+
+def debug_print(test_num, input_, expected, output):
+    logger.debug('''test_num: %d
+    input:    %s
+    expected: %s
+    output:   %s''' % (test_num, input_, expected, output))
+       
+
+class TestEncode():
+
+    def setup(self):
+        pass
+
+    def test_encode(self):
+        for i, (data, expected) in enumerate(test_data):
+            bencoded = None
+            try:
+                bencoded = encode(data)
+            except(Exception), e:
+                debug_print(i, data, expected, e)
+                raise
+            if bencoded != expected:
+                debug_print(i, data, expected, bencoded)
+                assert False
+
+    def test_encode_error(self):
+        for i, (data, expected) in enumerate(test_data_encode_error):
+            logger.debug(
+                '>>>>>>>>>>>EXPECTED ERROR LOG: %r' % expected)
+            try:
+                encode(data)
+            except expected:
+                pass # Good. We got the expected exception.
+            except (Exception), e:
+                debug_print(i, data, expected, e)
+                raise # Fail. We got some other exception.
+            else:
+                debug_print(i, data, expected, 'NO EXCEPTION RAISED')
+                assert False # Fail. We got no exception at all.
+
+                
+class TestDecode:
+
+    def setup(self):
+        pass
+
+    def test_decode(self):
+        for i, (expected, bencoded) in enumerate(test_data):
+            data = None
+            try:
+                data = decode(bencoded)
+            except (Exception), e:
+                debug_print(i, bencoded, expected, e)
+                raise
+            else:
+                if data != expected:
+                    debug_print(i, bencoded, expected, data)
+                    assert False
+
+    def test_decode_error(self):
+        for i, (bencoded, expected) in enumerate(test_data_decode_error):
+            try:
+                decode(bencoded)
+            except expected:
+                pass
+            except (Exception), e:
+                debug_print(i, bencoded, expected, e)
+                raise
+            else:
+                debug_print(i, bencoded, expected, 'NO EXCEPTION RAISED')
+                assert False
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_const.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_const.py
new file mode 100644 (file)
index 0000000..78bebc8
--- /dev/null
@@ -0,0 +1,79 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+#from nose.tools import eq_, ok_
+'''
+import logging, logging_conf
+logs_path = 'test_logs'
+logs_level = logging.DEBUG
+logging_conf.setup(logs_path, logs_level)
+'''
+
+import identifier
+from identifier import Id, ID_SIZE_BITS, BITS_PER_BYTE
+import node
+
+
+
+TASK_INTERVAL = .01
+TIMEOUT_DELAY = .4
+
+CLIENT_ID = identifier.Id('\x41' * identifier.ID_SIZE_BYTES)
+CLIENT_ADDR = ('127.0.0.1', 6000)
+CLIENT_NODE = node.Node(CLIENT_ADDR, CLIENT_ID)
+BT_PORT = 7777
+
+SERVER_ID = identifier.Id('\x01' * identifier.ID_SIZE_BYTES)
+SERVER_ADDR = ('127.0.0.1', 6001)
+SERVER_NODE = node.Node(SERVER_ADDR, SERVER_ID)
+
+SERVER2_ID = identifier.Id('\x43' * identifier.ID_SIZE_BYTES)
+SERVER2_ADDR = ('127.0.0.2', 6002)
+SERVER2_NODE = node.Node(SERVER2_ADDR, SERVER2_ID)
+
+EXTERNAL_NODE_ADDR = ('127.0.0.1', 6881)
+EXTERNAL_NODE = node.Node(EXTERNAL_NODE_ADDR)
+
+NO_ADDR = ('127.0.0.1', 1)
+DEAD_NODE = node.Node(NO_ADDR)
+
+NODE_ID = identifier.Id('\x02' * identifier.ID_SIZE_BYTES)
+TARGET_ID = NODE_ID
+INFO_HASH = identifier.Id('\x60\xd5\xd8\x23\x28\xb4\x54\x75\x11\xfd\xea\xc9\xbf\x4d\x01\x12\xda\xa0\xce\x00')
+INFO_HASH_ZERO = identifier.Id('\x00' * identifier.ID_SIZE_BYTES)
+TID = 'a'
+TID2 = 'b'
+TOKEN = 'aa'
+
+NUM_NODES = 8
+NODE_IDS = [identifier.Id(chr(i) * identifier.ID_SIZE_BYTES) \
+            for i in xrange(NUM_NODES)]
+ADDRS = [('127.0.0.'+str(i), 7000 + i) for i in xrange(NUM_NODES)]
+NODES = [node.Node(addr, node_id) \
+             for addr, node_id in zip(ADDRS, NODE_IDS)]
+PEERS = ADDRS
+
+NODE2_IDS = [identifier.Id('\x01'+chr(i) * (identifier.ID_SIZE_BYTES-1)) \
+            for i in xrange(100, 100+NUM_NODES)]
+ADDRS2 = [('127.0.0.'+str(i), 7000 + i) \
+              for i in xrange(100, 100+NUM_NODES)]
+NODES2 = [node.Node(addr, node_id) \
+              for addr, node_id in zip(ADDRS2, NODE2_IDS)]
+PEERS2 = ADDRS2
+
+IPS = ['1.2.3.' + str(i) for i in xrange(NUM_NODES)]
+
+#TODO2: make this faster
+num_nodes_per_ld = 20
+NODES_LD_IH = [[]] * BITS_PER_BYTE
+for ld in xrange(BITS_PER_BYTE, ID_SIZE_BITS):
+    NODES_LD_IH.append([])
+    common_id = INFO_HASH_ZERO.generate_close_id(ld)
+    #eq_(common_id.log_distance(INFO_HASH_ZERO), ld)
+    for i in xrange(num_nodes_per_ld):
+        this_id = Id(common_id.bin_id[:-1] + chr(i))
+        #eq_(this_id.log_distance(INFO_HASH_ZERO), ld)
+        NODES_LD_IH[ld].append(
+            node.Node(('127.0.0.' + str(i), i), this_id))
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_controller.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_controller.py
new file mode 100644 (file)
index 0000000..b2f8dc3
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import test_const as tc
+
+import controller
+
+
+
+class TestController:
+
+    def setup(self):
+        self.controller = controller.Controller(tc.CLIENT_ADDR)
+
+    def test_start_stop(self):
+        self.controller.start()
+        self.controller.stop()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_floodbarrier.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_floodbarrier.py
new file mode 100644 (file)
index 0000000..76ff17b
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+import logging, logging_conf
+
+from test_const import *
+
+from floodbarrier import FloodBarrier
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+class TestFloodBarrier:
+
+    def setup(self):
+        #logger.critical('************* BEGIN **************')
+        pass
+
+    def test(self):
+        fb = FloodBarrier(checking_period=.4,
+                          max_packets_per_period=4,
+                          blocking_period=1)
+        for ip in IPS:
+            for _ in xrange(4):
+                assert not fb.ip_blocked(ip)
+        # Every ip is on the limit
+        assert fb.ip_blocked(IPS[0])
+        assert fb.ip_blocked(IPS[1])
+        # 0 and 3 blocked
+        time.sleep(.2)
+        # Half a period gone
+        assert fb.ip_blocked(IPS[0])
+        # IP 0 refreshes the blocking (extra .2 seconds)
+        time.sleep(.2)
+        # The initial floods are forgotten
+        # IP 0,1,3 are blocked
+        assert fb.ip_blocked(IPS[0])
+        # The blocking doesn't get refreshed now (.8 secs to expire)
+        assert fb.ip_blocked(IPS[1])
+        # The blocking doesn't get refreshed (.6 secs to expire)
+        assert not fb.ip_blocked(IPS[2])
+        time.sleep(.7)
+        # IP 0 is the only one still blocked (it got refreshed)
+        assert fb.ip_blocked(IPS[0])
+        assert not fb.ip_blocked(IPS[1])
+        assert not fb.ip_blocked(IPS[2])
+        assert not fb.ip_blocked(IPS[3])
+        time.sleep(.4)
+        for ip in IPS:
+            for _ in xrange(4):
+                assert not fb.ip_blocked(ip)
+        time.sleep(.4)
+        for ip in IPS:
+            for _ in xrange(4):
+                assert not fb.ip_blocked(ip)
+
+        
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_identifier.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_identifier.py
new file mode 100644 (file)
index 0000000..5191d7a
--- /dev/null
@@ -0,0 +1,199 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import random
+
+import logging, logging_conf
+
+from nose.tools import eq_, ok_, assert_raises, raises
+import test_const as tc
+
+import identifier
+from identifier import Id, RandomId, IdError
+from identifier import ID_SIZE_BYTES, ID_SIZE_BITS, BITS_PER_BYTE
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+BIN_ID0 = '\x00' * ID_SIZE_BYTES
+BIN_ID1 = '\x01' * ID_SIZE_BYTES
+BIN_ID2 = '\x02' * ID_SIZE_BYTES
+DIST0_1 = '\x01' * ID_SIZE_BYTES
+DIST1_2 = '\x03' * ID_SIZE_BYTES
+
+HEX_ID1 =  '01' * ID_SIZE_BYTES
+
+
+class TestId(object):
+    
+    def test_create(self):
+        _ = Id(BIN_ID1)
+        _ = RandomId()
+        assert_raises(IdError, Id, 1)
+        assert_raises(IdError, Id, '1')
+        _ = Id('1' * 40) # Hexadecimal
+        assert_raises(IdError, Id, 'Z'*40)
+        eq_(Id('\x00'*20).bin_id, Id('0'*40).bin_id)
+        eq_(Id('\xff'*20), Id('f'*40))
+
+    def test_has_repr(self):
+        eq_(repr(Id(BIN_ID1)), '<Id: ' + '01' * ID_SIZE_BYTES + '>')
+        
+    def test_is_hashable(self):
+        d = {Id(BIN_ID1): 1}
+        
+    def test_util(self):
+        assert identifier._bin_to_hex(BIN_ID1) == HEX_ID1
+        assert identifier._byte_xor('\0', '\1') == '\1'
+    
+    def test_first_different_byte(self):
+        str1 = '0' * ID_SIZE_BYTES
+        for i in range(ID_SIZE_BYTES):
+            str2 = '0' * i + '1' * (ID_SIZE_BYTES - i)
+            logger.debug('test_num: %d, _first_different_byte: %d' % (
+                i, identifier._first_different_byte(str1, str2)))
+            assert identifier._first_different_byte(str1, str2) == i
+        assert_raises(IndexError,
+                      identifier._first_different_byte, str1, str1)
+
+    def test_first_different_bit(self):
+        assert identifier._first_different_bit('\0', '\x01') == 7
+        assert identifier._first_different_bit('\0', '\x02') == 6
+        assert identifier._first_different_bit('\0', '\x04') == 5
+        assert identifier._first_different_bit('\0', '\x09') == 4
+        assert identifier._first_different_bit('\0', '\x10') == 3
+        assert identifier._first_different_bit('\0', '\x23') == 2
+        assert identifier._first_different_bit('\0', '\x40') == 1
+        assert identifier._first_different_bit('\0', '\xa5') == 0
+        assert identifier._first_different_bit('\0', '\xff') == 0
+        assert_raises(AssertionError, identifier._first_different_bit,
+                      '\5', '\5')
+
+    def test_bin_id(self):
+        assert Id(BIN_ID1).bin_id == BIN_ID1
+
+    def test_equal(self):
+        id1 = Id(BIN_ID0)
+        assert id1 == id1 # same instance
+        assert id1 == Id(BIN_ID0) #different instance, same value
+        assert id1 != Id(BIN_ID1)
+
+
+    @raises(AttributeError)
+    def test_bin_id_read_only(self):
+        id1 = Id(BIN_ID1)
+        id1.bin_id = BIN_ID2
+
+    def test_str(self):
+        id1 = Id(BIN_ID1)
+        assert BIN_ID1 == '%s' % id1
+
+    def test_distance(self):
+        id1 = Id(BIN_ID1)
+        id2 = Id(BIN_ID2)
+        dist1_2 = Id(DIST1_2)
+        assert id1.distance(id2).bin_id == dist1_2.bin_id
+        assert id2.distance(id1).bin_id == dist1_2.bin_id 
+        #assert id1.distance(id1).bin_id == ZeroId().bin_id
+
+    def test_log_distance(self):
+        id0 = Id(BIN_ID0)
+        id1 = Id(BIN_ID1)
+        id2 = Id(BIN_ID2)
+        eq_(id0.log_distance(id0), -1)
+        eq_(id0.log_distance(id1), ID_SIZE_BITS - 8)
+        eq_(id0.log_distance(id2), ID_SIZE_BITS - 7)
+
+        id_log = (
+            (Id('\x00' + '\xff' * (ID_SIZE_BYTES - 1)),
+             BITS_PER_BYTE * (ID_SIZE_BYTES - 1) - 1),
+            
+            (Id('\x53' * ID_SIZE_BYTES),
+            BITS_PER_BYTE * ID_SIZE_BYTES - 2),
+            
+            (Id(BIN_ID0[:7] + '\xff' * (ID_SIZE_BYTES - 7)),
+             (ID_SIZE_BYTES - 7) * BITS_PER_BYTE - 1),
+            
+            (Id(BIN_ID0[:9] + '\x01' * (ID_SIZE_BYTES - 9)),
+             (ID_SIZE_BYTES - 10) * BITS_PER_BYTE),
+            
+            (Id(BIN_ID0[:-1] + '\x06'),
+             2),
+            )
+        id2_log = (
+            (Id('\x41' * ID_SIZE_BYTES),
+             Id('\x41' * ID_SIZE_BYTES),
+             -1),
+
+            (Id('\x41' * ID_SIZE_BYTES),
+             Id('\x01' * ID_SIZE_BYTES),
+             158),
+
+            (Id('\x41' * ID_SIZE_BYTES),
+             Id('\x43' * ID_SIZE_BYTES),
+             153),
+            )
+
+        for (id_, log_) in id_log:
+            logger.debug('log_distance: %d' % id0.log_distance(id_))
+            logger.debug('expected: %d' % log_)
+            eq_(id0.log_distance(id_), log_)
+        for id1, id2, expected in id2_log:
+            eq_(id1.log_distance(id2), expected)
+
+            z = Id('\0'*20)
+            eq_(z.log_distance(Id('\x00'*19+'\x00')), -1)
+            eq_(z.log_distance(Id('\x00'*19+'\x00')), -1)
+            eq_(z.log_distance(Id('\x00'*19+'\x00')), -1)
+            eq_(z.log_distance(Id('\x00'*19+'\x00')), -1)
+            eq_(z.log_distance(Id('\x00'*19+'\x00')), -1)
+
+
+
+    def test_order_closest(self):
+        id0 = Id(BIN_ID0)
+        ordered_list = [
+            Id('\x00' * ID_SIZE_BYTES),
+            Id(BIN_ID0[:-1] + '\x06'),
+            Id(BIN_ID0[:9] + '\x01' * (ID_SIZE_BYTES - 9)),
+            Id(BIN_ID0[:7] + '\xff' * (ID_SIZE_BYTES - 7)),
+            Id(BIN_ID0[:7] + '\xff' * (ID_SIZE_BYTES - 7)),
+            Id('\x00' + '\xff' * (ID_SIZE_BYTES - 1)),
+            Id('\x53' * ID_SIZE_BYTES),
+            Id('\xff' * ID_SIZE_BYTES),
+            ]
+        random_list = random.sample(ordered_list, len(ordered_list))
+
+        random_list_copy = random_list[:]
+
+        logger.debug('ordered list')
+        for e in ordered_list: logger.debug('%s' % e)
+        logger.debug('random order')
+        for e in random_list: logger.debug('%s' % e)
+
+        result_list = id0.order_closest(random_list)
+        logger.debug('order_closest result')
+        for e in result_list: logger.debug('%s' % e)
+        logger.debug('random order (it should not change)')
+        for e in random_list: logger.debug('%s' % e)
+
+        # make sure order_closest does not modify random_list
+        assert random_list == random_list_copy
+        
+        for i, ordered_id in enumerate(ordered_list):
+            logger.debug('%d, %s, %s' % (i, ordered_id, result_list[i]))
+            assert ordered_id.bin_id == result_list[i].bin_id
+            # Notice that 'assert ordered_id is result_id'
+            # do not work when two Id instances have the same bin_id
+
+    def test_generate_closest_id(self):
+        id_ = RandomId()
+        for i in [-1] + range(ID_SIZE_BITS):
+            eq_(id_.log_distance(id_.generate_close_id(i)), i)
+
+            
+class TestRandomId:
+    for i in xrange(123):
+        assert RandomId() != RandomId()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_kadtracker.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_kadtracker.py
new file mode 100644 (file)
index 0000000..1462ca9
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import test_const as tc
+
+import kadtracker
+
+class TestKadTracker:
+
+    def _callback(self, *args, **kwargs):
+        return
+    
+    def setup(self):
+        self.dht = kadtracker.KadTracker(tc.CLIENT_ADDR, '.')
+
+    def test_all(self):
+        #self.dht.start()
+        self.dht.get_peers(tc.INFO_HASH, self._callback, tc.BT_PORT)
+        self.dht.stop()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_lookup_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_lookup_manager.py
new file mode 100644 (file)
index 0000000..d93fb02
--- /dev/null
@@ -0,0 +1,381 @@
+# Copyright (C) 2009 Flutra Osmani, Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import eq_, ok_, assert_raises
+import test_const as tc
+import logging, logging_conf
+
+import time
+
+import querier
+from routing_manager import RoutingManagerMock
+import lookup_manager
+import message
+from identifier import Id, ID_SIZE_BYTES
+from node import Node
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+class TestLookupQueue:
+
+    def setup(self):
+        self.lookup = lookup_manager._LookupQueue(tc.INFO_HASH_ZERO, 4)
+
+    def test_add_pop1(self):
+        nodes = (tc.NODES_LD_IH[157][0],
+                 tc.NODES_LD_IH[158][1],
+                 tc.NODES_LD_IH[154][2],
+                 tc.NODES_LD_IH[159][3],
+                 tc.NODES_LD_IH[158][4],
+                 tc.NODES_LD_IH[152][5],)
+        self.lookup.add(nodes)
+        # Just the 4 closest nodes are added
+        #This second add doesn't affect (duplicates are ignored)
+        self.lookup.add(nodes)
+        eq_(self.lookup.pop_closest_node(), tc.NODES_LD_IH[152][5])
+        eq_(self.lookup.pop_closest_node(), tc.NODES_LD_IH[154][2])
+        eq_(self.lookup.pop_closest_node(), tc.NODES_LD_IH[157][0])
+        eq_(self.lookup.pop_closest_node(), tc.NODES_LD_IH[158][1])
+        # Now the queue is empty
+        assert_raises(IndexError, self.lookup.pop_closest_node)
+        self.lookup.add(nodes)
+        # The nodes added are ingnored
+        assert_raises(IndexError, self.lookup.pop_closest_node)
+
+
+    def _test_add_pop2(self):
+        self.lookup.add(tc.NODES[3:6])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[3])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[4])
+        self.lookup.add(tc.NODES[2:3])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[2])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[5])
+        # Empty
+        assert_raises(IndexError, self.lookup.pop_closest_node)
+        # This add only affects 0,1,6,7
+        self.lookup.add(tc.NODES)
+        eq_(self.lookup.pop_closest_node(), tc.NODES[0])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[1])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[6])
+        eq_(self.lookup.pop_closest_node(), tc.NODES[7])
+
+
+class TestGetPeersLookup:
+
+    def _callback(self, peers):
+        self.got_peers = peers
+
+    def setup(self):
+        self.got_peers = None
+        querier_ = querier.QuerierMock(tc.CLIENT_ID)
+        bootstrap_nodes = RoutingManagerMock(
+            ).get_closest_rnodes(tc.INFO_HASH_ZERO)
+        self.lookup = lookup_manager.GetPeersLookup(tc.CLIENT_ID,
+                                                querier_,
+                                                2,
+                                                tc.INFO_HASH_ZERO,
+                                                self._callback,
+                                                bootstrap_nodes)
+
+    def test_n(self):
+        pass
+        
+    def _test_complete(self):
+        self.lookup.start()
+        """Start sends two parallel queries to the closest
+        bootstrap nodes (to the INFO_HASH)
+
+        """
+        # Ongoing queries to (sorted: oldest first):
+        # 155-4, 157-3, 
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 158-1, 159-0
+        # Notice 159-2 is kicked out from the queue
+        logger.critical("")
+        eq_(self.lookup.num_parallel_queries, 2)
+        nodes = [tc.NODES_LD_IH[157][5],
+                 tc.NODES_LD_IH[152][6],
+                 tc.NODES_LD_IH[158][7]]
+        self.lookup._on_response(*_gen_nodes_args(
+                tc.NODES_LD_IH[157][3],
+                nodes))
+        eq_(self.lookup._get_announce_candidates(),
+            [tc.NODES_LD_IH[157][3],
+             ])
+        # This response triggers a new query (to 152-6)
+        eq_(self.lookup.num_parallel_queries, 2)
+        # Ongoing queries to (sorted: oldest first):
+        # 155-4, 152-6
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 157-5, 158-1, 158-7, 159-0
+        self.lookup._on_timeout(tc.NODES_LD_IH[155][4])
+        eq_(self.lookup.num_parallel_queries, 2)
+        # This timeout triggers a new query (to 157-5)
+        eq_(self.lookup.num_parallel_queries, 2) 
+        # Ongoing queries to (sorted: oldest first):
+        # 155-4, 157-5 
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 158-1, 158-7, 159-0
+        self.lookup._on_timeout(tc.NODES_LD_IH[155][4])
+        # This timeout triggers a new query (to 158-1)
+        eq_(self.lookup.num_parallel_queries, 2) 
+        # Ongoing queries to (sorted: oldest first):
+        # 152-6, 158-1
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 158-7, 159-0
+        nodes = [tc.NODES_LD_IH[151][8],
+                 tc.NODES_LD_IH[150][9]]
+        self.lookup._on_response(*_gen_nodes_args(
+                tc.NODES_LD_IH[152][6],
+                nodes))
+        eq_(self.lookup._get_announce_candidates(),
+            [tc.NODES_LD_IH[152][6],
+             tc.NODES_LD_IH[157][3],
+             ])
+        # This response triggers a new query (to 150-9)
+        eq_(self.lookup.num_parallel_queries, 2) 
+        # Ongoing queries to (sorted: oldest first):
+        # 157-5, 150-9
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 151-8, 158-7, 159-0
+        nodes = [tc.NODES_LD_IH[151][10],
+                 tc.NODES_LD_IH[151][11],
+                 tc.NODES_LD_IH[156][12],
+                 tc.NODES_LD_IH[156][13],
+                 ]
+        self.lookup._on_response(*_gen_nodes_args(
+                tc.NODES_LD_IH[157][5],
+                nodes))
+        eq_(self.lookup._get_announce_candidates(),
+            [tc.NODES_LD_IH[152][6],
+             tc.NODES_LD_IH[157][3],
+             tc.NODES_LD_IH[157][5],
+                                               ])
+        # This response triggers a new query (to 151-8)
+        eq_(self.lookup.num_parallel_queries, 2) 
+        # Ongoing queries to (sorted: oldest first):
+        # 150-9, 151-8
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 151-10, 151-11, 156-12, 156-13
+        # Notice that the lookup queue size limit is 4, therefore
+        # 158-7 and 159-0 are removed from the queue
+        self.lookup._on_error(None, tc.NODES_LD_IH[151][8])
+        # This error triggers a new query (to 151-8)
+        eq_(self.lookup.num_parallel_queries, 2)
+        # Ongoing queries to (sorted: oldest first):
+        # 150-9, 151-10
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 151-11, 156-12, 156-13
+        self.lookup._on_timeout(tc.NODES_LD_IH[151][8])
+        # This timeout triggers a new query (to 151-11)
+        eq_(self.lookup.num_parallel_queries, 2)
+        # Ongoing queries to (sorted: oldest first):
+        # 151-10, 151-11
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 156-12, 156-13
+        nodes = [tc.NODES_LD_IH[144][14],
+                 tc.NODES_LD_IH[145][15],
+                 tc.NODES_LD_IH[145][16],
+                 tc.NODES_LD_IH[145][17],
+                 ]
+        self.lookup._on_response(*_gen_nodes_args(
+                tc.NODES_LD_IH[151][10],
+                nodes))
+        eq_(self.lookup._get_announce_candidates(), [tc.NODES_LD_IH[151][10],
+                                                     tc.NODES_LD_IH[152][6],
+                                                     tc.NODES_LD_IH[157][3],
+                                               ])
+        # This response triggers a new query (to 144-14)
+        eq_(self.lookup.num_parallel_queries, 2)
+        # Ongoing queries to (sorted: oldest first):
+        # 151-11, 144-14
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # Notice 156-13 is removed
+        # 145-15, 145-16, 145-17, 156-12
+        peers = [tc.NO_ADDR]
+        ok_(not self.got_peers)
+        self.lookup._on_response(*_gen_peers_args(
+                tc.NODES_LD_IH[144][14],
+                peers))
+        eq_(self.lookup._get_announce_candidates(), [tc.NODES_LD_IH[144][14],
+                                               tc.NODES_LD_IH[151][10],
+                                               tc.NODES_LD_IH[152][6],
+                                               ])
+        ok_(self.got_peers)
+        self.got_peers = False
+        # The response with peers halves parallelism to 1.
+        # No new query is  triggered.
+        eq_(self.lookup.num_parallel_queries, 1)
+        # Ongoing queries to (sorted: oldest first):
+        # 151-11
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 145-15, 145-16, 156-12
+        self.lookup._on_timeout(tc.NODES_LD_IH[151][11])
+        # This timeout triggers a new query (to 145-15)
+        eq_(self.lookup.num_parallel_queries, 1)
+        # Ongoing queries to (sorted: oldest first):
+        # 145-15
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 145-16, 145-17, 156-12
+        peers = [tc.NO_ADDR]
+        ok_(not self.got_peers)
+        self.lookup._on_response(*_gen_peers_args(
+                tc.NODES_LD_IH[145][15],
+                peers))
+        # This response triggers a new query (to 145-16)
+        # The parallelism is not halved (remains 1).
+        eq_(self.lookup.num_parallel_queries, 1)
+        # Ongoing queries to (sorted: oldest first):
+        # 145-16
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 145-17, 156-12
+        eq_(self.lookup._get_announce_candidates(), [tc.NODES_LD_IH[144][14],
+                                               tc.NODES_LD_IH[145][15],
+                                               tc.NODES_LD_IH[151][10],
+                                               ])
+        ok_(self.got_peers)
+        self.got_peers = False
+        self.lookup._on_timeout(tc.NODES_LD_IH[145][16])
+        # This timeout triggers a new query (to 145-17)
+        eq_(self.lookup.num_parallel_queries, 1)
+        # Ongoing queries to (sorted: oldest first):
+        # 145-17
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 156-12
+        self.lookup._on_timeout(tc.NODES_LD_IH[145][17])
+        # This timeout triggers a new query (to 156-12)
+        return
+        eq_(self.lookup.num_parallel_queries, 1)
+        # Ongoing queries to (sorted: oldest first):
+        # 156-12
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 
+        nodes = [tc.NODES_LD_IH[144][18],
+                 tc.NODES_LD_IH[145][19],
+                 ]
+        self.lookup._on_response(*_gen_nodes_args(
+                tc.NODES_LD_IH[156][12],
+                nodes))
+        eq_(self.lookup._get_announce_candidates(), [tc.NODES_LD_IH[144][14],
+                                               tc.NODES_LD_IH[145][15],
+                                               tc.NODES_LD_IH[151][10],
+                                               ])
+        # This response triggers a new query (to 144-18)
+        eq_(self.lookup.num_parallel_queries, 1)
+        # Ongoing queries to (sorted: oldest first):
+        # 144-18
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 145-19
+        peers = [tc.NO_ADDR]
+        ok_(not self.got_peers)
+        self.lookup._on_response(*_gen_peers_args(
+                tc.NODES_LD_IH[144][18],
+                peers))
+        eq_(self.lookup._get_announce_candidates(), [tc.NODES_LD_IH[144][14],
+                                               tc.NODES_LD_IH[144][18],
+                                               tc.NODES_LD_IH[145][15],
+                                               ])
+        ok_(self.got_peers)
+        self.got_peers = False
+        # This timeout triggers a new query (145-19)
+        eq_(self.lookup.num_parallel_queries, 0)
+        # Ongoing queries to (sorted: oldest first):
+        # 145-19
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        #
+        ok_(not self.lookup.is_done)
+        self.lookup._on_timeout(tc.NODES_LD_IH[145][19])
+        # THE END
+        eq_(self.lookup.num_parallel_queries, 0)
+        ok_(self.lookup.is_done)
+
+    def test_dont_query_myself(self):
+        logger.debug('test start')
+        self.lookup.start()
+        # Ongoing queries to (sorted: oldest first):
+        # 155-4, 157-3, 
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 158-1, 159-0
+        # Notice 159-2 is kicked out from the queue
+        eq_(self.lookup.num_parallel_queries, 2)
+        nodes = [Node(tc.CLIENT_ADDR, self.lookup._my_id)]
+        self.lookup._on_response(*_gen_nodes_args(
+                tc.NODES_LD_IH[157][3],
+                nodes))
+        eq_(self.lookup._get_announce_candidates(),
+            [tc.NODES_LD_IH[157][3],
+             ])
+        # This response triggers a new query to 158-1 (ignoring myself)
+        eq_(self.lookup.num_parallel_queries, 2)
+        # Ongoing queries to (sorted: oldest first):
+        # 155-4, 158-1
+        # Queued nodes to query (sorted by log_distance to info_hash):
+        # 159-0
+        self.lookup._on_timeout(tc.NODES_LD_IH[155][4])
+        # This timeout triggers a new query (to 159-0)
+        eq_(self.lookup.num_parallel_queries, 2) 
+        self.lookup._on_timeout(tc.NODES_LD_IH[158][1])
+        # No more nodes to send queries to
+        eq_(self.lookup.num_parallel_queries, 1)
+        ok_(not self.lookup.is_done)
+        self.lookup._on_timeout(tc.NODES_LD_IH[159][0]) 
+        # No more nodes to send queries to
+        eq_(self.lookup.num_parallel_queries, 0)
+        ok_(self.lookup.is_done)
+
+        
+class TestLookupManager:
+
+    def _on_got_peers(self, peers):
+        self.got_peers = peers
+    
+    
+    def setup(self):
+        self.got_peers = None
+        querier_ = querier.QuerierMock(tc.CLIENT_ID)
+        routing_m = RoutingManagerMock()
+        self.bootstrap_nodes = routing_m.get_closest_rnodes(
+            tc.INFO_HASH_ZERO)
+        self.lm = lookup_manager.LookupManager(tc.CLIENT_ID,
+                                               querier_,
+                                               routing_m,
+                                               2)
+        self.lookup = self.lm.get_peers(tc.INFO_HASH, self._on_got_peers,
+                                   tc.BT_PORT)
+
+    def test_all_nodes_timeout(self):
+        for node_ in self.bootstrap_nodes:
+            self.lookup._on_timeout(node_)
+        ok_(self.lookup.is_done)
+
+    def test_peers(self):
+        self.lookup._on_response(*_gen_peers_args(
+                self.bootstrap_nodes[0],
+                [tc.NO_ADDR]))
+        for node_ in self.bootstrap_nodes[1:]:
+            self.lookup._on_timeout(node_)
+        ok_(self.lookup.is_done)
+    def teardown(self):
+        self.lm.stop()
+        
+def _gen_nodes_args(node_, nodes):
+    out_msg = message.OutgoingGetPeersResponse(
+        node_.id,
+        tc.TOKEN,
+        nodes2=nodes).encode(tc.TID)
+    in_msg = message.IncomingMsg(out_msg)
+    in_msg.sanitize_response(message.GET_PEERS)
+    return in_msg, node_
+
+def _gen_peers_args(node_, peers):
+    out_msg = message.OutgoingGetPeersResponse(
+        node_.id,
+        tc.TOKEN,
+        peers=peers).encode(tc.TID)
+    in_msg = message.IncomingMsg(out_msg)
+    in_msg.sanitize_response(message.GET_PEERS)
+    return in_msg, node_
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_message.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_message.py
new file mode 100644 (file)
index 0000000..d30b5c1
--- /dev/null
@@ -0,0 +1,446 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import *
+
+import node
+import logging, logging_conf
+
+import test_const as tc
+import message
+from message import *
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+class TestMsg:
+
+    def setup(self):
+        pass
+
+    def test_tools(self):
+        bin_strs = ['23', '\1\5', 'a\3']
+        for bs in bin_strs:
+            i = bin_to_int(bs)
+            bs2 = int_to_bin(i)
+            logger.debug('bs: %s, bin_to_int(bs): %d, bs2: %s' % (bs,
+                                                                   i, bs2))
+            assert bs == bs2
+
+        ips = ['127.0.0.1', '222.222.222.222', '1.2.3.4']
+        ports = [12345, 99, 54321] 
+        for addr in zip(ips, ports):
+            c_addr = compact_addr(addr)
+            addr2 = uncompact_addr(c_addr)
+            assert addr == addr2
+
+            c_peers = message._compact_peers(tc.PEERS)
+            peers = message._uncompact_peers(c_peers)
+            for p1, p2 in zip(tc.PEERS, peers):
+                assert p1[0] == p2[0]
+                assert p1[0] == p2[0]
+            
+            c_nodes = message._compact_nodes(tc.NODES)
+            nodes = message._uncompact_nodes(c_nodes)
+            for n1, n2 in zip(tc.NODES, nodes):
+                assert n1 == n2
+
+        bin_ipv6s = ['\x00' * 10 + '\xff\xff' + '\1\2\3\4',
+                     '\x22' * 16,
+                     ]
+        assert bin_to_ip(bin_ipv6s[0]) == '1.2.3.4'
+        assert_raises(AddrError, bin_to_ip, bin_ipv6s[1])
+
+
+        PORT = 7777
+        BIN_PORT = int_to_bin(PORT)
+        c_nodes2 = [tc.CLIENT_ID.bin_id + ip + BIN_PORT for ip in bin_ipv6s]
+        nodes2 = [node.Node(('1.2.3.4', PORT), tc.CLIENT_ID)]
+        logger.debug(message._uncompact_nodes2(c_nodes2))
+        assert message._uncompact_nodes2(c_nodes2) == nodes2 
+        logger.warning(
+            "**IGNORE WARNING LOG** This exception was raised by a test")
+       
+
+    def test_tools_error(self):
+        c_nodes = message._compact_nodes(tc.NODES)
+        # Compact nodes is one byte short
+        assert_raises(MsgError, message._uncompact_nodes, c_nodes[:-1])
+        # IP size is weird
+        assert_raises(MsgError, bin_to_ip, '123')
+        # Port is 0 (
+        eq_(message._uncompact_nodes(c_nodes), tc.NODES)
+        n = tc.NODES[0]
+        tc.NODES[0] = node.Node((n.addr[0], 0), n.id)
+        c_nodes = message._compact_nodes(tc.NODES)
+        eq_(message._uncompact_nodes(c_nodes), tc.NODES[1:])
+        c_nodes2 = message._compact_nodes2(tc.NODES)
+        eq_(message._uncompact_nodes2(c_nodes2), tc.NODES[1:])
+        tc.NODES[0] = n
+        
+    def test_matching_tid(self):
+        # It _only_ matches the first byte)
+        ok_(matching_tid('aaa', 'aaa'))
+        ok_(matching_tid('axa', 'a1a'))
+        ok_(matching_tid('aQWEREWTWETWTWETWETEWT', 'a'))
+        ok_(not matching_tid('a', 'b'))
+        ok_(not matching_tid('aZZ', 'bZZ'))
+        
+    def test_ping(self):
+        #client
+        outgoing_query = OutgoingPingQuery(tc.CLIENT_ID)
+        data = outgoing_query.encode(tc.TID) # query_manager would do it
+        #server
+        incoming_query = IncomingMsg(data)
+        assert incoming_query.type is QUERY
+        outgoing_response = OutgoingPingResponse(tc.SERVER_ID)
+        data = outgoing_response.encode(incoming_query.tid)
+        #client
+        incoming_response = IncomingMsg(data)
+        assert incoming_response.type is RESPONSE
+        incoming_response.sanitize_response(outgoing_query.query)
+
+    def _test_ping_error(self):
+        outgoing_query = OutgoingPingQuery(tc.CLIENT_ID)
+        #outgoing_query.my_id = CLIENT_ID
+        #outgoing_query.tid = tc.TID
+        # TID and ARGS ID are None
+        assert_raises(MsgError, outgoing_query.encode)
+        logger.error(
+            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
+
+        outgoing_query = OutgoingPingQuery()
+        outgoing_query.my_id = tc.CLIENT_ID
+        #outgoing_query.tid = tc.TID
+        assert_raises(MsgError, outgoing_query.encode)
+        logger.error(
+            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
+
+        outgoing_query = OutgoingPingQuery()
+        #outgoing_query.my_id = tc.CLIENT_ID
+        outgoing_query.tid = tc.TID
+        assert_raises(MsgError, outgoing_query.encode)
+        logger.error(
+            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
+        
+        outgoing_query = OutgoingPingQuery()
+        assert_raises(MsgError, outgoing_query.__setattr__, 'my_id', '')
+        logger.error(
+            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
+                
+        outgoing_query = OutgoingPingQuery()
+        outgoing_query.my_id = tc.CLIENT_ID
+        outgoing_query.tid = 567
+        data = outgoing_query.encode()
+        assert_raises(MsgError, decode, data)
+        logger.error(
+            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
+
+        outgoing_query = OutgoingPingQuery()
+        outgoing_query.my_id = tc.CLIENT_ID
+        outgoing_query.tid = tc.TID
+        data = outgoing_query.encode()
+        data += 'this string ruins the bencoded msg'
+        assert_raises(MsgError, decode, data)
+        logger.error(
+            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
+
+
+
+        
+        outgoing_response = OutgoingPingResponse(tc.TID, tc.SERVER_ID)
+        outgoing_response.tid = None
+        assert_raises(MsgError, outgoing_response.encode)
+        logger.error(
+            "**IGNORE ERROR LOGS** This exception was raised by a test")
+
+            
+    def test_find_node(self):
+        #client
+        outgoing_query = OutgoingFindNodeQuery(tc.CLIENT_ID, tc.NODE_ID)
+        data = outgoing_query.encode(tc.TID)
+        #server
+        incoming_query = IncomingMsg(data)
+        assert incoming_query.type is QUERY
+        outgoing_response = OutgoingFindNodeResponse(tc.SERVER_ID,
+                                                     tc.NODES)
+        data = outgoing_response.encode(incoming_query.tid)
+        #client
+        incoming_response = IncomingMsg(data)
+        eq_(incoming_response.type, RESPONSE)
+        incoming_response.sanitize_response(outgoing_query.query)
+        for n1, n2 in zip(tc.NODES, incoming_response.nodes2):
+            eq_(n1, n2)
+
+
+    def test_find_node_error(self):
+        assert_raises(MsgError, OutgoingFindNodeResponse,
+                      tc.CLIENT_ID, nodes=tc.NODES)
+        assert_raises(MsgError, OutgoingFindNodeResponse,
+                      tc.CLIENT_ID)
+
+        
+    def test_get_peers_nodes(self):
+        #client
+        outgoing_query = OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH)
+        data = outgoing_query.encode(tc.TID)
+        #server
+        incoming_query = IncomingMsg(data)
+        assert incoming_query.type is QUERY
+        outgoing_response = OutgoingGetPeersResponse(tc.SERVER_ID,
+                                                     tc.TOKEN,
+                                                     nodes2=tc.NODES)
+        data = outgoing_response.encode(incoming_query.tid)
+        #client
+        incoming_response = IncomingMsg(data)
+        assert incoming_response.type is RESPONSE
+        incoming_response.sanitize_response(outgoing_query.query)
+        for n1, n2 in zip(tc.NODES, incoming_response.nodes2):
+            assert n1 == n2
+
+    def test_get_peers_nodes_error(self):
+        assert_raises(MsgError, OutgoingGetPeersResponse,
+                      tc.CLIENT_ID, tc.TOKEN)
+                        
+    def test_get_peers_peers(self):
+        #client
+        outgoing_query = OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH)
+        data = outgoing_query.encode(tc.TID)
+        #server
+        incoming_query = IncomingMsg(data)
+        assert incoming_query.type is QUERY
+        outgoing_response = OutgoingGetPeersResponse(tc.SERVER_ID,
+                                                     tc.TOKEN,
+                                                     peers=tc.PEERS)
+        data = outgoing_response.encode(incoming_query.tid)
+        #client
+        incoming_response = IncomingMsg(data)
+        assert incoming_response.type is RESPONSE
+        incoming_response.sanitize_response(outgoing_query.query)
+        for p1, p2 in zip(tc.PEERS, incoming_response.peers):
+            assert p1[0] == p2[0]
+            assert p1[1] == p2[1]
+
+    def test_get_peers_peers_error(self):
+        assert 1
+
+    def test_announce_peer(self):
+        #client
+        outgoing_query = OutgoingAnnouncePeerQuery(tc.CLIENT_ID,
+                                                   tc.INFO_HASH,
+                                                   tc.BT_PORT,
+                                                   tc.TOKEN)
+        outgoing_query.tid = tc.TID
+        data = outgoing_query.encode(tc.TID)
+        #server
+        incoming_query = IncomingMsg(data)
+        assert incoming_query.type is QUERY
+        outgoing_response = OutgoingAnnouncePeerResponse(tc.SERVER_ID)
+        data = outgoing_response.encode(incoming_query.tid)
+        #client
+        incoming_response = IncomingMsg(data)
+        assert incoming_response.type is RESPONSE
+        incoming_response.sanitize_response(outgoing_query.query)
+
+    def test_announce_peer_error(self):
+        assert 1
+
+    def _test_error(self):
+        outgoing_error_msg = OutgoingErrorMsg(tc.TID, GENERIC_E)
+        data = outgoing_error_msg.encode()
+        tid, msg_type, msg_dict = decode(data)
+        incoming_error_msg = IncomingErrorMsg(msg_dict)
+        logger.debug(incoming_error_msg.error)
+        assert incoming_error_msg.error == GENERIC_E
+
+
+def value_is_string(msg_d, k, valid_values=None):
+    v = msg_d[k]
+    ok_(isinstance(v, str))
+    
+        
+
+class TestIncomingMsg:
+
+    def setup(self):
+        b_ping = OutgoingPingQuery(tc.CLIENT_ID).encode(tc.TID)
+        self.msg_d = IncomingMsg(b_ping)._msg_dict
+
+    def test_bad_bencode(self):
+        assert_raises(MsgError, IncomingMsg, 'z')
+        assert_raises(MsgError, IncomingMsg, '1:aa')
+        assert_raises(MsgError, IncomingMsg, 'd')
+
+    def test_not_a_dict(self):
+        msgs = ([], 'a', 1)
+        for msg in msgs:               
+            assert_raises(MsgError, IncomingMsg, bencode.encode(msg))
+
+    def test_tid_error(self):
+        # no TID
+        del self.msg_d[TID] 
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        # invalid TID
+        self.msg_d[TID] = 1
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        self.msg_d[TID] = []
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        self.msg_d[TID] = {}
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        
+    def test_type_error(self):
+        # no TYPE
+        del self.msg_d[TYPE] 
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        # invalid TYPE
+        self.msg_d[TYPE] = 1
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        self.msg_d[TYPE] = []
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        self.msg_d[TYPE] = {}
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+        # unknown TYPE
+        self.msg_d[TYPE] = 'z'
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.msg_d))
+
+    def test_version_not_present(self):
+        del self.msg_d[VERSION]
+        IncomingMsg(bencode.encode(self.msg_d))
+
+    def test_unknown_error(self):
+        error_code = (999, "some weird error string")
+        b_err = OutgoingErrorMsg(error_code).encode(tc.TID)
+        
+        logger.info(
+            "TEST LOGGING ** IGNORE EXPECTED INFO ** Unknown error: %r",
+            error_code)
+        _ = IncomingMsg(b_err)
+
+
+        
+b_ping_q = OutgoingPingQuery(tc.CLIENT_ID).encode(tc.TID)
+b_fn_q = OutgoingFindNodeQuery(tc.CLIENT_ID, tc.NODE_ID).encode(tc.TID)
+b_gp_q = OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH).encode(tc.TID)
+b_ap_q = OutgoingAnnouncePeerQuery(tc.CLIENT_ID, tc.INFO_HASH,
+                                 tc.BT_PORT,tc.TOKEN).encode(tc.TID)
+
+class TestSanitizeQueryError:
+
+    def setup(self):
+        self.ping_d = IncomingMsg(b_ping_q)._msg_dict
+        self.fn_d = IncomingMsg(b_fn_q)._msg_dict
+        self.gp_d = IncomingMsg(b_gp_q)._msg_dict
+        self.ap_d = IncomingMsg(b_ap_q)._msg_dict
+
+    def test_weird_msg(self):
+        self.ping_d[ARGS] = []
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[ARGS] = 1
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[ARGS] = 'ZZZZ'
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        
+        
+        
+    def test_sender_id(self):
+        # no sender_id
+        del self.ping_d[ARGS][ID]
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        # bad ID
+        self.ping_d[ARGS][ID] = 'a'
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[ARGS][ID] = 1
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[ARGS][ID] = []
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[ARGS][ID] = {}
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+
+    def test_query(self): 
+        # no QUERY
+        del self.ping_d[QUERY]
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        # bad QUERY
+        self.ping_d[QUERY] = 1
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[QUERY] = []
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        self.ping_d[QUERY] = {}
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ping_d))
+        # unknown QUERY is not an error at this point
+        # responder will process it and send an errror msg if necesary
+        self.ping_d[QUERY] = 'a'
+        IncomingMsg(bencode.encode(self.ping_d))
+
+    def test_announce(self):
+        # Port must be integer
+        self.ap_d[ARGS][PORT] = 'a'
+        assert_raises(MsgError, IncomingMsg, bencode.encode(self.ap_d))
+
+        
+b_ping_r = OutgoingPingResponse(tc.CLIENT_ID).encode(tc.TID)
+b_fn2_r = OutgoingFindNodeResponse(tc.CLIENT_ID, nodes2=tc.NODES).encode(tc.TID)
+b_gp_r = OutgoingGetPeersResponse(tc.CLIENT_ID, token=tc.TOKEN,
+                                peers=tc.PEERS).encode(tc.TID)
+b_ap_r = OutgoingAnnouncePeerResponse(tc.CLIENT_ID).encode(tc.TID)
+
+class TestSanitizeResponseError:
+
+    def setup(self):
+        self.ping_r = IncomingMsg(b_ping_r)
+        self.fn2_r = IncomingMsg(b_fn2_r)
+        self.gp_r = IncomingMsg(b_gp_r)
+        self.ap_r = IncomingMsg(b_ap_r)
+
+    def test_nodes_not_implemented(self):
+        assert_raises(MsgError, OutgoingFindNodeResponse, tc.CLIENT_ID,
+                                        nodes=tc.NODES)
+    def test_sanitize(self):
+        self.ping_r.sanitize_response(PING)
+
+        del self.fn2_r._msg_dict[RESPONSE][NODES2]
+        # No NODES and no NODES2
+        assert_raises(MsgError, self.fn2_r.sanitize_response, FIND_NODE)
+        self.fn2_r._msg_dict[RESPONSE][NODES] = \
+            message._compact_nodes(tc.NODES)
+        # Just NODES
+        self.fn2_r.sanitize_response(FIND_NODE)
+        self.fn2_r._msg_dict[RESPONSE][NODES2] = \
+            message._compact_nodes2(tc.NODES)
+        # Both NODES and NODES2
+        self.fn2_r.sanitize_response(FIND_NODE)
+
+        # Both NODES and PEERS in response
+        self.gp_r._msg_dict[RESPONSE][NODES] = \
+            message._compact_nodes(tc.NODES)
+        self.gp_r.sanitize_response(GET_PEERS)
+        # No NODES and no PEERS
+        del self.gp_r._msg_dict[RESPONSE][NODES]
+        del self.gp_r._msg_dict[RESPONSE][VALUES]
+        assert_raises(MsgError, self.gp_r.sanitize_response, GET_PEERS)
+        
+        
+class TestSanitizeErrorError:
+
+    def test(self):
+        msg_out = OutgoingErrorMsg(1).encode(tc.TID)
+        assert_raises(MsgError, IncomingMsg, msg_out)
+        # Unknown error doesn't raise MsgError
+        msg_out = OutgoingErrorMsg((1,1)).encode(tc.TID)
+        _ = IncomingMsg(msg_out)
+    
+
+
+        
+class TestPrinting:
+    
+    def test_printing(self):
+        out_msg = OutgoingPingQuery(tc.CLIENT_ID)
+        in_msg = IncomingMsg(out_msg.encode(tc.TID))
+        str(out_msg)
+        repr(out_msg)
+        repr(in_msg)
+    
+                  
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_minitwisted.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_minitwisted.py
new file mode 100644 (file)
index 0000000..92cc8a2
--- /dev/null
@@ -0,0 +1,320 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from __future__ import with_statement
+import threading
+import time
+
+import logging, logging_conf
+
+from nose.tools import eq_, ok_, assert_raises
+import test_const as tc
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+from minitwisted import Task, TaskManager, \
+     ThreadedReactor, ThreadedReactorMock, \
+     ThreadedReactorSocketError
+
+
+ADDRS= (tc.CLIENT_ADDR, tc.SERVER_ADDR)
+DATA = 'testing...'
+
+
+class TestTaskManager:
+    
+    def callback_f(self, callback_id):
+        self.callback_order.append(callback_id)
+        
+    def setup(self):
+        # Order in which callbacks have been fired
+        self.callback_order = []
+        self.task_m = TaskManager()
+
+    def test_simple(self):
+        for i in xrange(5):
+            self.task_m.add(Task(.01, self.callback_f, i))
+        while True:
+            task = self.task_m.consume_task()
+            if task is None:
+                break
+            task.fire_callback()
+        logger.debug('%s' % self.callback_order)
+        assert self.callback_order == []
+        time.sleep(.01)
+        while True:
+            task = self.task_m.consume_task()
+            if task is None:
+                break
+            task.fire_callbacks() 
+        assert self.callback_order == range(5)
+
+    def test_cancel(self):
+        for i in xrange(5):
+            self.task_m.add(Task(.1, self.callback_f, i))
+        c_task = Task(.1, self.callback_f, 5)
+        self.task_m.add(c_task)
+        for i in xrange(6,10):
+            self.task_m.add(Task(.1, self.callback_f, i))
+        while True:
+            task = self.task_m.consume_task()
+            if task is None:
+                break
+            task.fire_callback()
+        logger.debug('%s' % self.callback_order)
+        assert self.callback_order == []
+        ok_(not c_task.cancelled)
+        c_task.cancel()
+        ok_(c_task.cancelled)
+        
+        time.sleep(.1)
+        while True:
+            task = self.task_m.consume_task()
+            if task is None:
+                break
+            task.fire_callbacks()
+        logger.debug('%s' % self.callback_order)
+        assert self.callback_order == [0,1,2,3,4,  6,7,8,9]
+        # task 5 was cancelled        
+
+    def test_different_delay(self):
+#         NOTICE: this test might fail if your configuration
+#         (interpreter/processor) is too slow
+        
+        task_delays = (1, 1, 1, .5, 1, 1, 2, 1, 1, 1,
+                       1, 1.5, 1, 1, 1, 1, .3)
+                       
+        expected_list = ([],
+                         ['a', 16, 3, 'b'], #9 is cancelled
+                         ['a', 0, 1, 2, 4, 5, 7, 8, 10, 12, 13, 15, 'c', 'b'],
+                         ['a', 11, 'c', 'b'],
+                         ['a', 6, 'c', 'b'],
+            )
+        tasks = [Task(delay, self.callback_f, i) \
+                 for i, delay in enumerate(task_delays)]
+        for task in tasks:
+            self.task_m.add(task)
+
+        for i, expected in enumerate(expected_list):
+            while True:
+                task = self.task_m.consume_task()
+                if task is None:
+                    break
+                task.fire_callbacks()
+            logger.debug('#: %d, result: %s, expected: %s' % (i,
+                                              self.callback_order, expected))
+            assert self.callback_order == expected
+            self.callback_order = []
+            self.task_m.add(Task(0, self.callback_f, 'a'))
+            self.task_m.add(Task(.5, self.callback_f, 'b'))
+            self.task_m.add(Task(1, self.callback_f, 'c'))
+            time.sleep(.5)
+            tasks[9].cancel() # too late (already fired) 
+            tasks[14].cancel() # should be cancelled
+
+    def _callback1(self, arg1, arg2):
+        if arg1 == 1 and arg2 == 2:
+            self.callback_order.append(1)
+    def _callback2(self, arg1, arg2):
+        if arg1 == 1 and arg2 == 2:
+            self.callback_order.append(2)
+    
+    def test_callback_list(self):
+        self.task_m.add(Task(tc.TASK_INTERVAL/2,
+                              [self._callback1, self._callback2],
+                              1, 2))
+        ok_(self.task_m.consume_task() is None)
+        eq_(self.callback_order, [])
+        time.sleep(tc.TASK_INTERVAL)
+        self.task_m.consume_task().fire_callbacks()
+        eq_(self.callback_order, [1,2])
+
+class TestMinitwisted:
+
+    def on_datagram_received(self, data, addr):
+        with self.lock:
+            self.datagrams_received.append((data, addr))
+
+    def callback_f(self, callback_id):
+        with self.lock:
+            self.callback_order.append(callback_id)
+            
+    def setup(self):
+        self.lock = threading.Lock()
+        self.datagrams_received = []
+        self.callback_order = []
+        self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
+        self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
+        self.client_r.listen_udp(tc.CLIENT_ADDR[1], self.on_datagram_received)
+        self.server_r.listen_udp(tc.SERVER_ADDR[1], self.on_datagram_received)
+        self.client_r.start()
+        self.server_r.start()
+
+    def test_listen_upd(self):
+        r = ThreadedReactor()
+        r.start()
+        logger.warning(''.join(
+            ('TESTING LOGS ** IGNORE EXPECTED WARNING ** ',
+             '(udp_listen has not been called)')))
+        self.client_r.sendto(DATA, tc.SERVER_ADDR)
+        while 1: #waiting for data
+            with self.lock:
+                if self.datagrams_received:
+                    break
+            time.sleep(tc.TASK_INTERVAL)
+        with self.lock:
+            first_datagram = self.datagrams_received.pop(0)
+            logger.debug('first_datagram: %s, %s' % (
+                    first_datagram,
+                    (DATA, tc.CLIENT_ADDR)))
+            assert first_datagram, (DATA, tc.CLIENT_ADDR)
+        r.stop()
+            
+    def test_network_callback(self):
+        self.client_r.sendto(DATA, tc.SERVER_ADDR)
+        time.sleep(tc.TASK_INTERVAL)
+        with self.lock:
+            first_datagram = self.datagrams_received.pop(0)
+            logger.debug('first_datagram: %s, %s' % (
+                    first_datagram,
+                    (DATA, tc.CLIENT_ADDR)))
+            assert first_datagram, (DATA, tc.CLIENT_ADDR)
+
+    def test_block_flood(self):
+        from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT
+        for _ in xrange(FLOOD_LIMIT):
+            self.client_r.sendto(DATA, tc.SERVER_ADDR)
+        for _ in xrange(10):
+            self.client_r.sendto(DATA, tc.SERVER_ADDR)
+            logger.warning(
+                "TESTING LOGS ** IGNORE EXPECTED WARNING **")
+        time.sleep(tc.TASK_INTERVAL)
+        with self.lock:
+            logger.debug('datagram processed: %d/%d' % (
+                              len(self.datagrams_received),
+                              FLOOD_LIMIT))
+            assert len(self.datagrams_received) <= FLOOD_LIMIT
+
+    def test_call_later(self):
+        self.client_r.call_later(.13, self.callback_f, 1)
+        self.client_r.call_later(.11, self.callback_f, 2)
+        self.client_r.call_later(.01, self.callback_f, 3)
+        task4 = self.client_r.call_later(.01, self.callback_f, 4)
+        task4.cancel()
+        time.sleep(.03)
+        with self.lock:
+            logger.debug('callback_order: %s' % self.callback_order)
+            assert self.callback_order == [3]
+            self.callback_order = []
+        self.client_r.call_now(self.callback_f, 5)
+        time.sleep(.03)
+        with self.lock:
+            logger.debug('callback_order: %s' % self.callback_order)
+            assert self.callback_order == [5]
+            self.callback_order = []
+        task6 = self.client_r.call_later(.03, self.callback_f, 6)
+        task6.cancel()
+        time.sleep(.1)
+        with self.lock:
+            logger.debug('callback_order: %s' % self.callback_order)
+            assert self.callback_order == [2, 1]
+
+    def test_network_and_delayed(self):
+        self.client_r.call_later(.2, self.callback_f, 0)
+        self.client_r.call_now(self.callback_f, 1)
+        task2 = self.client_r.call_later(.2, self.callback_f, 2)
+        with self.lock:
+            eq_(self.callback_order, [])
+        time.sleep(.1)
+
+        with self.lock:
+            logger.debug('callback_order: %s' % self.callback_order)
+            assert self.callback_order == [1]
+            self.callback_order = []
+            assert not self.datagrams_received
+        self.server_r.sendto(DATA, tc.CLIENT_ADDR)
+        time.sleep(.02) # wait for network interruption
+        with self.lock:
+            logger.debug('callback_order: %s' % self.callback_order)
+            assert self.callback_order == []
+            logger.debug('callback_order: %s' % self.callback_order)
+            assert self.datagrams_received.pop(0) == (DATA, tc.SERVER_ADDR)
+            task2.cancel() #inside critical region??
+        time.sleep(.1) # wait for task 0 (task 2 should be cancelled)
+        with self.lock:
+            assert self.callback_order == [0]
+            assert not self.datagrams_received
+
+    def test_sendto_socket_error(self): 
+        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
+        self.client_r.sendto('z', (tc.NO_ADDR[0], 0))
+
+    def teardown(self):
+        self.client_r.stop()
+        self.server_r.stop()
+
+class TestSocketErrors:
+
+    def _callback(self, *args, **kwargs):
+        self.callback_fired = True
+    
+    def setup(self):
+        self.callback_fired = False
+        self.r = ThreadedReactorSocketError()
+        self.r.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None)
+
+    def test_sendto(self):
+        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
+        self.r.sendto('z', tc.NO_ADDR)
+
+    def test_recvfrom(self):
+        self.r.start()
+        r2 = ThreadedReactor()
+        r2.listen_udp(tc.SERVER_ADDR[1], lambda x,y:None)
+        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
+        r2.sendto('z', tc.CLIENT_ADDR)
+        # self.r will call recvfrom (which raises socket.error)
+        time.sleep(tc.TASK_INTERVAL)
+        ok_(not self.callback_fired)
+        self.r.stop()
+
+    def test_sendto_too_large_data_string(self):
+        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
+        self.r.sendto('z'*12345, tc.NO_ADDR)
+            
+
+
+        
+class TestMockThreadedReactor:
+
+    def setup(self):
+        pass
+
+    def _callback(self, *args):
+        pass
+
+    def test_mock_threaded_reactor(self):
+        '''
+        Just making sure that the interface is the same
+
+        '''
+        r = ThreadedReactor(task_interval=.1)
+        rm = ThreadedReactorMock(task_interval=.1)
+
+        r.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None)
+        rm.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None)
+
+        r.start()
+        rm.start()
+
+        r.sendto(DATA, tc.CLIENT_ADDR)
+        rm.sendto(DATA, tc.CLIENT_ADDR)
+        
+        r.call_later(.1, self._callback)
+        rm.call_later(.1, self._callback)
+#        time.sleep(.002)
+        r.stop()
+        rm.stop()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_node.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_node.py
new file mode 100644 (file)
index 0000000..525088a
--- /dev/null
@@ -0,0 +1,130 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import ok_, eq_, raises, assert_raises
+import test_const as tc
+
+import logging, logging_conf
+
+import utils
+from identifier import Id, ID_SIZE_BYTES
+from node import Node, RoutingNode
+from node import LAST_RTT_W
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+
+bin_id1 = '1' * ID_SIZE_BYTES
+bin_id2 = '2' * ID_SIZE_BYTES
+id1 = Id(bin_id1)
+id2 = Id(bin_id2)
+addr1 = ('127.0.0.1', 1111)
+addr2 = ('127.0.0.1', 2222)
+
+
+class TestNode:
+
+    def setup(self):
+        pass
+    
+    def test_node(self):
+        node1 = Node(addr1, id1)
+        node2 = Node(addr2, id2)
+        node1b = Node(addr1, None)
+        node1ip = Node(('127.0.0.2', 1111), id1)
+        node1port = Node(addr2, id1)
+        node1id = Node(addr1, id2)
+
+        assert str(node1) == '<node: %r %r>' % (addr1, id1)
+        #<node: ('127.0.0.1', 1111) 0x1313131313131313131313131313131313131313>
+
+        assert node1.id == id1
+        assert node1.id != id2
+        assert node1.addr == addr1
+        assert node1.addr != addr2
+        assert node1 == node1
+
+        assert node1 != node1b
+        node1b.id = id1
+        assert node1 == node1b
+
+        assert node1 != node2
+        assert node1 != node1ip
+        assert node1 != node1port
+        assert node1 != node1id
+
+    def test_compact_addr(self):
+        eq_(tc.CLIENT_NODE.compact_addr,
+            utils.compact_addr(tc.CLIENT_ADDR))
+
+    def test_log_distance(self):
+        eq_(tc.CLIENT_NODE.log_distance(tc.SERVER_NODE),
+            tc.CLIENT_ID.log_distance(tc.SERVER_ID))
+
+    def test_compact(self):
+        eq_(tc.CLIENT_NODE.compact(),
+            tc.CLIENT_ID.bin_id + utils.compact_addr(tc.CLIENT_ADDR))
+        
+    def test_get_rnode(self):
+        eq_(tc.CLIENT_NODE.get_rnode(),
+            RoutingNode(tc.CLIENT_NODE))
+        
+    @raises(AttributeError)
+    def test_node_exceptions(self):
+        Node(addr1, id1).id = id2
+
+        
+
+class TestRoutingNode:
+
+    def setup(self):
+        self.rnode1 = RoutingNode(Node(addr1, id1))
+        self.rnode2 = RoutingNode(Node(addr2, id2))
+
+    def test_rnode(self):
+        RTT1 = 1
+        RTT2 = 2
+        assert self.rnode1.timeouts_in_a_row() == 0
+        self.rnode1.on_timeout()
+        self.rnode1.on_timeout()
+        self.rnode1.on_timeout()
+        assert self.rnode1.timeouts_in_a_row() == 3
+        assert self.rnode1.timeouts_in_a_row(False) == 3
+        self.rnode1.on_query_received()
+        assert self.rnode1.timeouts_in_a_row() == 0
+        eq_(self.rnode1.timeouts_in_a_row(False), 3)
+        self.rnode1.on_response_received(1)
+        assert self.rnode1.timeouts_in_a_row() == 0
+        assert self.rnode1.timeouts_in_a_row(False) == 0
+        assert self.rnode1._num_queries == 1
+        assert self.rnode1._num_responses == 1
+        assert self.rnode1._num_timeouts == 3        
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT1)
+        self.rnode1.on_response_received(RTT2)
+        eq_(self.rnode1._rtt_avg,
+            RTT1 * (1 - LAST_RTT_W) + RTT2 * LAST_RTT_W)
+        self.rnode1.on_timeout()
+        self.rnode1.on_timeout()
+        
+    def _test_rank(self):
+        eq_(self.rnode1.rank(), 0)
+        self.rnode1.on_query_received()
+        eq_(self.rnode1.rank(), 0)
+        self.rnode1.on_response_received()
+        eq_(self.rnode1.rank(), 1)
+
+    def test_repr(self):
+        _ = repr(RoutingNode(tc.CLIENT_NODE))
+
+    def test_get_rnode(self):
+        eq_(self.rnode1.get_rnode(),
+            self.rnode1)
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_querier.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_querier.py
new file mode 100644 (file)
index 0000000..f5e83d5
--- /dev/null
@@ -0,0 +1,415 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import ok_, eq_
+
+import sys
+import time
+import logging, logging_conf
+
+import node
+import identifier
+import message
+import minitwisted
+import rpc_manager
+import test_const as tc
+
+import querier
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+RUN_CPU_INTENSIVE_TESTS = False
+RUN_NETWORK_TESTS = False # Requires a running external DHT node
+
+class TestQuery:
+
+    def setup(self):
+        self.ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        ping_r_out = message.OutgoingPingResponse(tc.SERVER_ID)
+        self.ping_r_in = message.IncomingMsg(ping_r_out.encode(tc.TID))
+        fn_r_out = message.OutgoingFindNodeResponse(tc.SERVER_ID,
+                                                    nodes2=tc.NODES)
+        self.fn_r_in = message.IncomingMsg(fn_r_out.encode(tc.TID))
+
+        self.got_response = False
+        self.got_error = False
+        self.got_timeout = False
+        
+        self.got_routing_response = False
+        self.got_routing_error = False
+        self.got_routing_timeout = False
+        self.got_routing_nodes_found = False
+
+        self.query = querier.Query(tc.TID, self.ping_msg.query, tc.SERVER_NODE,
+                                   self.on_response,
+                                   self.on_error,
+                                   self.on_timeout,
+                                   self.on_routing_response,
+                                   self.on_routing_error,
+                                   self.on_routing_timeout,
+                                   self.on_routing_nodes_found)
+        self.query.timeout_task = minitwisted.Task(1, self.on_timeout,
+                                                   tc.SERVER_NODE) 
+        
+    def on_response(self, response_msg, addr):
+        self.got_response = True
+
+    def on_error(self, error_msg, addr):
+        self.got_error = True
+
+    def on_timeout(self, addr):
+        self.got_timeout = True
+
+    def on_routing_response(self, node_):
+        self.got_routing_response = True
+
+    def on_routing_error(self, node_):
+        self.got_routing_error = True
+
+    def on_routing_timeout(self, node_):
+        self.got_routing_timeout = True
+
+    def on_routing_nodes_found(self, node_):
+        self.got_routing_nodes_found = True
+
+
+    def test_fire_callback_on_response(self):
+        # the server creates the response
+        pong_msg = message.OutgoingPingResponse(tc.SERVER_ID)
+        pong_data = pong_msg.encode(tc.TID)
+        # rpc_m decodes the response received
+        pong_msg = message.IncomingMsg(pong_data)
+        # querier notifies of the message (callback)
+        self.query.on_response_received(pong_msg)
+        ok_(self.got_response)
+        ok_(not self.got_error)
+        ok_(not self.got_timeout)
+
+    def test_fire_callback_on_error(self):
+        # the server creates the response
+        error_msg = message.OutgoingErrorMsg(message.GENERIC_E)
+        error_data = error_msg.encode(tc.TID)
+        # rpc_m decodes the response received
+        error_msg = message.IncomingMsg(error_data)
+        # querier notifies of the message (callback)
+        self.query.on_error_received(error_msg)
+        assert not self.got_response and self.got_error
+
+    def test_on_timeout(self):
+        ok_(not self.got_timeout)
+        ok_(not self.got_routing_timeout)
+        self.query.on_timeout()
+        ok_(self.got_timeout)
+        ok_(self.got_routing_timeout)
+        
+    def test_fire_callback_on_timeout(self):
+        self.query.timeout_task.fire_callbacks()
+        self.query.timeout_task.cancel()
+        assert not self.got_response and not self.got_error \
+               and self.got_timeout
+        
+    def test_fire_callback_on_late_response(self):
+        self.query.timeout_task.fire_callbacks()
+        self.query.timeout_task.cancel()
+        # the server creates the response
+        pong_msg = message.OutgoingPingResponse(tc.SERVER_ID)
+        pong_data = pong_msg.encode(tc.TID)
+        # rpc_m decodes the response received
+        pong_msg = message.IncomingMsg(pong_data)
+        # querier notifies of the message (but it's too late)
+        self.query.on_response_received(pong_msg)
+        logger.warning(
+            "**IGNORE WARNING LOG**")
+        assert not self.got_response and not self.got_error \
+               and self.got_timeout
+        
+    def test_invalid_response_received(self):
+        # Response received is invalid
+        self.ping_r_in._msg_dict[message.RESPONSE] = 'zz'
+        ok_(not self.got_response) 
+        logger.warning(
+            "**IGNORE WARNING LOG**")
+        self.query.on_response_received(self.ping_r_in)
+        ok_(not self.got_response)
+
+    def test_response_contains_nodes(self):
+        # Trick query to accept find node response
+        self.query.query = message.FIND_NODE
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        self.query.on_response_received(self.fn_r_in)
+        ok_(self.got_response)
+        ok_(self.got_routing_response)
+        ok_(self.got_routing_nodes_found)
+
+    def test_different_node_id(self):
+        # We are expecting response from SERVER_NODE
+        # Here we test if the response contains an ID
+        # different to SERVER_ID
+        self.query.node = node.Node(tc.SERVER_ADDR,
+                                    tc.CLIENT_ID)
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        self.query.on_response_received(self.fn_r_in)
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+
+    def teardown(self):
+        pass
+
+class TestQuerier:
+
+    def setup(self):
+        if RUN_NETWORK_TESTS:
+            time.sleep(1) # Reduce test interdependence
+        self.got_response = False
+        self.got_timeout = False
+        self.got_error = False
+        self.found_nodes = False
+
+        self.got_routing_response = False
+        self.got_routing_error = False
+        self.got_routing_timeout = False
+        self.got_routing_nodes_found = False
+
+        self.querier_mock = querier.QuerierMock(tc.CLIENT_ID)
+
+        self.r = minitwisted.ThreadedReactor(task_interval=.01)
+        self.rpc_m = rpc_manager.RPCManager(self.r,
+                                            tc.CLIENT_ADDR[1])
+        self.querier = querier.Querier(self.rpc_m,
+                                            tc.CLIENT_NODE)
+        self.querier_routing = querier.Querier(self.rpc_m,
+                                               tc.CLIENT_NODE)
+        self.querier_routing.set_on_response_received_callback(
+            self.on_routing_response)
+        self.querier_routing.set_on_error_received_callback(
+            self.on_routing_error)
+        self.querier_routing.set_on_timeout_callback(
+            self.on_routing_timeout)
+        self.querier_routing.set_on_nodes_found_callback(
+            self.on_routing_nodes_found)
+  
+        self.r.start()
+
+
+        
+    def on_response(self, response_msg, node_):
+        self.got_response = True
+
+    def on_timeout(self, node_):
+        self.got_timeout = True
+
+    def on_error(self, error_msg, node_):
+        self.got_error = True
+
+    def on_routing_response(self, node_):
+        self.got_routing_response = True
+
+    def on_routing_error(self, node_):
+        self.got_routing_error = True
+
+    def on_routing_timeout(self, node_):
+        self.got_routing_timeout = True
+
+    def on_routing_nodes_found(self, node_):
+        self.got_routing_nodes_found = True
+
+
+    def test_generate_tids(self):
+        num_tids = 1000
+        if RUN_CPU_INTENSIVE_TESTS:
+            num_tids =  pow(2, 16) + 2 #CPU intensive
+        for i in xrange(num_tids):
+            eq_(self.querier._next_tid(),
+                chr(i%256)+chr((i/256)%256))
+
+        
+        
+    def send_query_and_get_response(self, querier_, later_delay=0):
+        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        msg = message.OutgoingFindNodeQuery(tc.CLIENT_ID,
+                                                 tc.CLIENT_ID)
+        if later_delay:
+            task = querier_.send_query_later(later_delay,
+                                             msg,
+                                             tc.EXTERNAL_NODE,
+                                             self.on_response,
+                                             self.on_timeout,
+                                             self.on_error,
+                                             tc.TIMEOUT_DELAY)
+            # This second query is just to have two elements
+            # in the querier_.pending[tc.EXTERNAL_ADDR] list
+            task = querier_.send_query_later(later_delay,
+                                             msg,
+                                             tc.EXTERNAL_NODE,
+                                             self.on_response,
+                                             self.on_timeout,
+                                             self.on_error,
+                                             tc.TIMEOUT_DELAY)
+        else:
+            node_ = (querier_ == self.querier_mock) and tc.SERVER_NODE
+            query = querier_.send_query(ping_msg, node_ or tc.EXTERNAL_NODE,
+                                        self.on_response,
+                                        self.on_timeout, self.on_error,
+                                        timeout_delay=tc.TIMEOUT_DELAY)
+        # querier creates TID
+        msg_tid = '\0\0'
+        if querier_ is self.querier_mock:
+            # server gets query
+            # the server creates the response
+            pong_msg = message.OutgoingPingResponse(tc.SERVER_ID)
+            pong_msg_data = pong_msg.encode(msg_tid)
+            # the client gets the response
+            # rpc_m decodes msg and calls callback
+            pong_msg = message.IncomingMsg(pong_msg_data)
+            querier_.on_response_received(pong_msg, tc.SERVER_ADDR)
+        if later_delay:
+            ok_(not self.got_response)
+            ok_(not self.got_timeout)
+            time.sleep(later_delay*2)
+        time.sleep(tc.TIMEOUT_DELAY+.1)
+        ### It crashed (timeout_task.cancelled??????)
+
+
+        #TODO2: move the 'real' tests to integration
+        
+        ###############################################
+        ### A DHT node must be running on peer_addr ###
+        ###############################################
+        ok_(self.got_response)
+        ok_(not self.got_timeout)
+        ###############################################
+        ###############################################
+
+    def send_query_and_get_error(self, querier):
+
+
+        ping_msg = message.OutgoingPingQuery()
+        query = querier.send_query(ping_msg, tc.EXTERNAL_NODE,
+                                   self.on_response,
+                                   self.on_timeout, self.on_error,
+                                   timeout_delay=tc.TIMEOUT_DELAY)
+        if querier is self.querier_mock:
+            # the server creates the response
+            error_msg = message.OutgoingErrorMsg(ping_msg.tid,
+                                                 message.GENERIC_E)
+            error_data = error_msg.encode()
+            # rpc_m decodes the response received
+            _, _, error_msg_dict = message.decode(error_data)
+            # rpc_m notifies of the message (callback)
+            querier.on_error_received(error_msg_dict, tc.EXTERNAL_NODE)
+        time.sleep(tc.TIMEOUT_DELAY + .1)
+        
+        ### It crashed (timeout_task.cancelled??????)
+
+
+        #TODO2: move the 'real' tests to integration
+        
+        ###############################################
+        ### A DHT node must be running on peer_addr ###
+        ###############################################
+        ########## assert self.got_response and not self.got_timeout
+        ###############################################
+        ###############################################
+
+
+
+    def send_query_and_get_timeout(self, querier):
+        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        query = querier.send_query(ping_msg, tc.DEAD_NODE,
+                                   self.on_response,
+                                   self.on_timeout, self.on_error,
+                                   timeout_delay=tc.TIMEOUT_DELAY)
+        if querier is self.querier_mock:
+            query.timeout_task.fire_callbacks()
+        time.sleep(tc.TIMEOUT_DELAY + .1)
+        assert not self.got_response and self.got_timeout
+
+    def test_send_query_mock(self):
+        self.send_query_and_get_response(self.querier_mock)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        ok_(not self.got_routing_timeout)
+
+    def test_send_query(self):
+        if RUN_NETWORK_TESTS:
+            self.send_query_and_get_response(self.querier)
+            ok_(not self.got_routing_response)
+            ok_(not self.got_routing_nodes_found)
+            ok_(not self.got_routing_timeout)
+
+    def test_send_query_routing(self):
+        if RUN_NETWORK_TESTS:
+            self.send_query_and_get_response(self.querier_routing)
+            ok_(self.got_routing_response)
+            ok_(not self.got_routing_nodes_found)
+            ok_(not self.got_routing_timeout)
+
+    def test_send_query_timeout_mock(self):
+        self.send_query_and_get_timeout(self.querier_mock)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        ok_(not self.got_routing_timeout)
+
+    def test_send_query_timeout(self):
+        self.send_query_and_get_timeout(self.querier)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        ok_(not self.got_routing_timeout)
+
+    def test_send_query_timeout_routing(self):
+        self.send_query_and_get_timeout(self.querier_routing)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        ok_(self.got_routing_timeout)
+
+    def test_send_query_later(self):
+        if RUN_NETWORK_TESTS:
+            self.send_query_and_get_response(self.querier_routing, .001)
+            ok_(self.got_routing_response)
+            ok_(self.got_routing_nodes_found)
+            ok_(not self.got_routing_timeout)
+
+    def test_unsolicited_response(self):
+        # We have a pending response from NO_ADDR (TID \0\0)
+        # but we get a response with different TID
+
+        # client
+        ping = message.OutgoingPingQuery(tc.CLIENT_ID)
+        self.querier.send_query(ping,
+                                tc.SERVER_NODE,
+                                self.on_response,
+                                self.on_error,
+                                self.on_timeout,
+                                tc.TIMEOUT_DELAY)
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+        # server
+        pong = message.OutgoingPingResponse(tc.SERVER_ID)
+        pong_in = message.IncomingMsg(pong.encode(tc.TID))
+        # client
+        self.querier.on_response_received(pong_in,
+                                               tc.SERVER_ADDR)
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+        ok_(not self.got_routing_nodes_found)
+
+    def test_error(self):
+        msg = message.OutgoingErrorMsg(message.GENERIC_E)
+        self.querier.on_error_received(msg, tc.SERVER_ADDR)
+
+
+    def teardown(self):
+        self.querier_mock.stop()
+        self.querier.stop()
+        self.querier_routing.stop()
+        
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_responder.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_responder.py
new file mode 100644 (file)
index 0000000..fa7df0e
--- /dev/null
@@ -0,0 +1,153 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import *
+
+import  message
+
+import logging, logging_conf
+import test_const as tc
+
+import routing_manager
+import token_manager
+import tracker
+import responder
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+class TestResponder:
+
+    def _notify_routing_m(self, node):
+        self.notification_callback_done = True
+
+    def setup(self):
+        routing_m = routing_manager.RoutingManagerMock()
+        self.tracker = tracker.Tracker()
+        self.token_m = token_manager.TokenManager()
+        self.responder = responder.Responder(tc.SERVER_ID, routing_m,
+                                             self.tracker, self.token_m)
+        self.notification_callback_done = False
+        self.responder.set_on_query_received_callback(self._notify_routing_m)
+
+    def test_return_response_for_ping(self):
+        # client side
+        query_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        # rpc_manager.sendto() encodes
+        query_data = query_msg.encode(tc.TID)
+        # server side
+        # rpc_manager.datagram_received() decodes
+        query_msg = message.IncomingMsg(query_data)
+        assert not self.notification_callback_done
+        response_msg = self.responder.on_query_received(query_msg,
+                                                         tc.CLIENT_ADDR)
+        response_data = response_msg.encode(query_msg.tid)
+
+        assert self.notification_callback_done
+        expected_msg = message.OutgoingPingResponse(tc.SERVER_ID)
+        expected_data = expected_msg.encode(tc.TID)
+        eq_(response_data, expected_data)
+    
+    def test_return_response_for_find_node(self):
+        # client side
+        query_msg = message.OutgoingFindNodeQuery(tc.CLIENT_ID,
+                                                  tc.TARGET_ID)
+        # querier encodes
+        query_data = query_msg.encode(tc.TID)
+        # server side
+        # rpc_manager.datagram_received() decodes
+        query_msg = message.IncomingMsg(query_data)
+        # rpc calls responder
+        assert not self.notification_callback_done
+        response_msg = self.responder.on_query_received(query_msg,
+                                                        tc.CLIENT_ADDR)
+        response_data = response_msg.encode(query_msg.tid)
+        assert self.notification_callback_done
+        expected_msg = message.OutgoingFindNodeResponse(tc.SERVER_ID,
+                                                        tc.NODES)
+        expected_data = expected_msg.encode(tc.TID)
+        eq_(response_data, expected_data)
+    
+    def test_return_response_for_get_peers_when_peers(self):
+        # server's tracker has peers
+        for peer in tc.PEERS:
+            self.tracker.put(tc.INFO_HASH, peer)
+
+        # client side
+        query_msg = message.OutgoingGetPeersQuery(tc.CLIENT_ID,
+                                                  tc.INFO_HASH) 
+        # querier encodes
+        query_data = query_msg.encode(tc.TID)
+        # server side
+        # rpc_manager.datagram_received() decodes
+        query_msg = message.IncomingMsg(query_data)
+        # rpc calls responder
+        assert not self.notification_callback_done
+        response_msg = self.responder.on_query_received(query_msg,
+                                                        tc.CLIENT_ADDR)
+        response_data = response_msg.encode(query_msg.tid)
+        assert self.notification_callback_done
+        expected_msg = message.OutgoingGetPeersResponse(tc.SERVER_ID,
+                                                        self.token_m.get(),
+                                                        peers=tc.PEERS)
+        expected_data = expected_msg.encode(tc.TID)
+        eq_(response_data, expected_data)
+    
+    def test_return_response_for_get_peers_when_no_peers(self):
+        # client side
+        query_msg = message.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.NODE_ID)
+        # rpc_manager.sendto() encodes
+        query_data = query_msg.encode(tc.TID) 
+        # server side
+        # rpc_manager.datagram_received() decodes
+        query_msg = message.IncomingMsg(query_data)
+        assert not self.notification_callback_done
+        response_msg = self.responder.on_query_received(query_msg,
+                                                        tc.CLIENT_ADDR)
+        response_data = response_msg.encode(query_msg.tid)
+        assert self.notification_callback_done
+        expected_msg = message.OutgoingGetPeersResponse(tc.SERVER_ID,
+                                                        self.token_m.get(),
+                                                        nodes2=tc.NODES)
+        expected_data = expected_msg.encode(query_msg.tid)
+        eq_(response_data, expected_data)
+    
+    def test_return_response_for_announce_peer_with_valid_tocken(self):
+        # client side
+        query_msg = message.OutgoingAnnouncePeerQuery(tc.CLIENT_ID,
+                                                      tc.INFO_HASH,
+                                                      tc.CLIENT_ADDR[1],
+                                                      self.token_m.get())
+        # querier.send_query() encodes
+        query_data = query_msg.encode(tc.TID)
+        # server side
+        # rpc_manager.datagram_received() decodes and calls responder (callback)
+        query_msg = message.IncomingMsg(query_data)
+        assert not self.notification_callback_done
+        response_msg = self.responder.on_query_received(query_msg,
+                                                         tc.CLIENT_ADDR)
+        response_data = response_msg.encode(query_msg.tid)
+        assert self.notification_callback_done
+        # responder returns to querier
+        expected_msg = message.OutgoingAnnouncePeerResponse(tc.SERVER_ID)
+        expected_data = expected_msg.encode(tc.TID)
+        assert response_data == expected_data
+
+    def test_errors(self):
+        # client side
+        query_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        # querier.send_query() encodes
+        query_data = query_msg.encode(tc.TID)
+        # server side
+        # rpc_manager.datagram_received() decodes and calls responder (callback)
+        query_msg = message.IncomingMsg(query_data)
+        ## 'xxxxxx' is not a valid QUERY
+        query_msg.query = 'zzzzzzzz'
+        assert not self.notification_callback_done
+        ok_(self.responder.on_query_received(query_msg,
+                                             tc.CLIENT_ADDR) is None)
+
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_routing_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_routing_manager.py
new file mode 100644 (file)
index 0000000..cb14477
--- /dev/null
@@ -0,0 +1,212 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import ok_, eq_, assert_raises
+
+import test_const as tc
+
+import minitwisted
+import rpc_manager
+import querier
+import message
+
+from routing_manager import RoutingManager, RoutingManagerMock
+
+import logging, logging_conf
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+class TestRoutingManager:
+
+    def setup(self):
+        for n in tc.NODES + [tc.SERVER_NODE, tc.SERVER2_NODE]:
+            n.is_ns = False
+        for n in tc.NODES2 + [tc.CLIENT_NODE]:
+            n.is_ns = True
+        
+        self.querier = querier.QuerierMock(tc.CLIENT_ID)
+        self.routing_m = RoutingManager(tc.CLIENT_NODE, self.querier,
+                                        tc.NODES)
+
+    def exercise_mock(self, mode):
+        # If this happens, we want to know
+        assert_raises(Exception, self.routing_m.on_timeout, tc.CLIENT_NODE)
+
+        # node is nowhere (timeout is ignored)
+        self.routing_m.on_timeout(tc.SERVER_NODE)
+
+        # main: CLIENT_NODE, replacement: empty
+        eq_(self.routing_m.get_closest_rnodes(tc.SERVER_ID),
+            [tc.CLIENT_NODE])
+
+        self.routing_m.on_response_received(tc.SERVER_NODE)
+        # main: client_node, server_node, replacement: empty
+
+        # this should reset refresh task
+        self.routing_m.on_response_received(tc.SERVER_NODE)
+
+        eq_(self.routing_m.get_closest_rnodes(tc.SERVER_ID),
+            [tc.SERVER_NODE, tc.CLIENT_NODE])
+
+        self.routing_m.on_timeout(tc.SERVER_NODE)
+        # main: client_node, replacement: server_node
+        eq_(self.routing_m.get_closest_rnodes(tc.SERVER_ID),
+            [tc.CLIENT_NODE])
+
+        self.routing_m.on_response_received(tc.SERVER2_NODE)
+        # main: client_node, server_node, replacement: server2_node(q)
+        eq_(self.routing_m.get_closest_rnodes(tc.SERVER_ID),
+            [tc.SERVER2_NODE, tc.CLIENT_NODE])
+
+        self.routing_m.on_response_received(tc.SERVER_NODE)
+        # main: client_node, server_node, replacement: server2_node(q)
+        eq_(self.routing_m.get_closest_rnodes(tc.SERVER_ID),
+            [tc.SERVER_NODE, tc.SERVER2_NODE, tc.CLIENT_NODE])
+        eq_(self.routing_m.get_closest_rnodes(tc.SERVER2_ID),
+            [tc.SERVER2_NODE, tc.CLIENT_NODE])
+        eq_(self.routing_m.get_closest_rnodes(tc.CLIENT_ID),
+            [tc.CLIENT_NODE])
+        for n in tc.NODES:
+            self.routing_m.on_response_received(n)
+        """
+        Main Routing Table
+        # -1
+        client
+        # 154
+        server2
+        # 159
+        server nodes[0:7]
+        """
+        eq_(self.routing_m.get_closest_rnodes(tc.CLIENT_ID),
+            [tc.CLIENT_NODE])
+        for n in tc.NODES:
+            eq_(self.routing_m.get_closest_rnodes(n.id),
+                [tc.SERVER_NODE] + tc.NODES[:7])
+        # bucket full (NODES[7] in replacement
+            
+        self.routing_m.on_query_received(tc.NODES[7])
+        eq_(self.routing_m.get_closest_rnodes(n.id),
+            [tc.SERVER_NODE] + tc.NODES[:7])
+        
+        # nodes[0] is kicked out from main
+        # all nodes in replacement are refreshed (pinged)
+        self.routing_m.on_timeout(tc.NODES[0])
+        eq_(self.routing_m.get_closest_rnodes(tc.NODES[0].id),
+            [tc.SERVER_NODE] + tc.NODES[1:7] + [tc.SERVER2_NODE])
+
+        # nodes[7] is refreshed
+        self.routing_m.on_query_received(tc.NODES[7])
+        # nodes[7] still in replacement (queries don't cause movements)
+        eq_(self.routing_m.get_closest_rnodes(tc.NODES[0].id),
+            [tc.SERVER_NODE] + tc.NODES[1:7] + [tc.SERVER2_NODE])
+
+        # nodes[7] is moved to the main table (response to refresh ping)
+        self.routing_m.on_response_received(tc.NODES[7])
+        eq_(self.routing_m.get_closest_rnodes(tc.NODES[0].id),
+            [tc.SERVER_NODE] + tc.NODES[1:8])
+
+        # nodes[7] is refreshed (no change to the tables)
+        self.routing_m.on_query_received(tc.NODES[7])
+        eq_(self.routing_m.get_closest_rnodes(tc.NODES[0].id),
+            [tc.SERVER_NODE] + tc.NODES[1:8])
+
+        # nodes[7] is in main and get response
+        self.routing_m.on_response_received(tc.NODES[7])
+        
+
+        # nodes[0] gets strike 2, 3 and 4 (timeouts)
+        self.routing_m.on_timeout(tc.NODES[0])
+        self.routing_m.on_timeout(tc.NODES[0])
+        self.routing_m.on_timeout(tc.NODES[0])
+        # and can be expelled from the replacement table
+        # nodes2[:] send responses
+
+        #TODO2: rnode(nodes[0] report 5 timeouts
+        eq_(self.routing_m.replacement.get_rnode(
+                tc.NODES[0]).timeouts_in_a_row(), 5)
+            
+        if mode is message.QUERY:
+            for n in tc.NODES2:
+                self.routing_m.on_query_received(n)
+        elif mode is message.RESPONSE:
+            for n in tc.NODES2:
+                self.routing_m.on_response_received(n)
+        # nodes[0] comes back but the repl bucket is full
+        self.routing_m.on_response_received(tc.NODES[0])
+        # nodes[0] sends error (routing manager ignores it)
+        self.routing_m.on_error_received(tc.NODES[0])
+
+        # timeout from node without id (ignored)
+                # nodes[0] comes back but the repl bucket is full
+        self.routing_m.on_timeout(tc.EXTERNAL_NODE)
+
+        # nodes found (but no room in main
+        self.routing_m.on_nodes_found(tc.NODES)
+        
+        # nodes[1] (in main) timeout and repl bucket is full
+        # find worst node in repl (nodes[7]) and replace it
+        # all nodes in repl bucket get refreshed (not nodes[1]
+        self.routing_m.on_timeout(tc.NODES[1])
+        eq_(self.routing_m.get_closest_rnodes(tc.NODES[0].id),
+            [tc.SERVER_NODE] + tc.NODES[2:8] +[tc.SERVER2_NODE])
+
+        # nodes found (there is room now)
+        # nodes2[0:1] get refreshed (pinged
+        self.routing_m.on_nodes_found(tc.NODES2)
+        # nodes2[0] replies (and is added to main)
+        self.routing_m.on_response_received(tc.NODES2[0])
+        eq_(self.routing_m.get_closest_rnodes(tc.NODES2[0].id),
+            [tc.SERVER_NODE] + tc.NODES[2:8] +tc.NODES2[0:1])
+
+
+        if mode == message.QUERY:
+            expected_main = [tc.SERVER2_NODE] + \
+                [tc.SERVER_NODE] + tc.NODES[2:8] + tc.NODES2[0:1] + \
+                [tc.CLIENT_NODE]
+            
+            expected_replacement = tc.NODES[0:2]
+            
+        elif mode == message.RESPONSE:
+            expected_main = [tc.SERVER2_NODE] + \
+                [tc.SERVER_NODE] + tc.NODES[2:8] + tc.NODES2[0:1] + \
+                [tc.CLIENT_NODE]
+            
+            expected_replacement = tc.NODES2[1:7] + tc.NODES[1:2]
+            
+        all_main, all_replacement = self.routing_m.get_all_rnodes()
+
+        for n, expected in zip(all_main, expected_main):
+            eq_(n, expected)
+        for n, expected in zip(all_replacement, expected_replacement):
+            eq_(n, expected)
+        eq_(len(all_main), len(expected_main))
+        eq_(len(all_replacement), len(expected_replacement))
+            
+
+    def test_query(self):
+        self.exercise_mock(message.QUERY)
+    def test_response(self):
+        self.exercise_mock(message.RESPONSE)
+
+            
+        
+        
+    def test_bootstrap(self):
+        self.routing_m.do_bootstrap()
+        fn_r = message.OutgoingFindNodeResponse(tc.NODES[0].id,
+                                                tc.NODES2[0:1])
+        fn_r = message.IncomingMsg(fn_r.encode('\0\0'))
+        self.querier.on_response_received(fn_r, tc.NODES[0].addr)
+
+    def test_routing_m_mock(self):
+        # Just testing interface
+        rm = RoutingManagerMock()
+        eq_(rm.get_closest_rnodes(tc.TARGET_ID), tc.NODES)
+
+
+    def test_complete_coverage(self):
+        self.routing_m._do_nothing()
+        self.routing_m._refresh_now_callback()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_routing_table.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_routing_table.py
new file mode 100644 (file)
index 0000000..443065a
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import logging, logging_conf
+
+from nose.tools import eq_, ok_, assert_raises
+
+import test_const as tc
+
+import node
+
+from routing_table import *
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+class TestRoutingTable:
+
+    def setup(self):
+        nodes_per_bucket = [2] * 161
+        self.rt = RoutingTable(tc.CLIENT_NODE,
+                               nodes_per_bucket)
+
+    def test_basics(self):
+        eq_(self.rt.get_bucket(tc.SERVER_NODE).rnodes, [])
+        ok_(self.rt.there_is_room(tc.SERVER_NODE))
+        assert_raises(RnodeNotFound, self.rt.get_rnode, tc.SERVER_NODE)
+        ok_(not self.rt.get_bucket(tc.SERVER_NODE).is_full())
+        eq_(self.rt.num_rnodes, 0)
+        eq_(self.rt.get_all_rnodes(), [])
+
+        self.rt.add(tc.SERVER_NODE)
+        ok_(self.rt.there_is_room(tc.SERVER_NODE))
+        eq_(self.rt.get_bucket(tc.SERVER_NODE).rnodes, [tc.SERVER_NODE])
+        eq_(self.rt.get_rnode(tc.SERVER_NODE), tc.SERVER_NODE)
+        ok_(not self.rt.get_bucket(tc.SERVER_NODE).is_full())
+        eq_(self.rt.num_rnodes, 1)
+        eq_(self.rt.get_all_rnodes(), [tc.SERVER_NODE])
+        
+        # Let's add a node to the same bucket
+        new_node = node.Node(tc.SERVER_NODE.addr,
+                             tc.SERVER_NODE.id.generate_close_id(1))
+        self.rt.add(new_node)
+        # full bucket
+        ok_(not self.rt.there_is_room(tc.SERVER_NODE))
+        eq_(self.rt.get_bucket(new_node).rnodes, [tc.SERVER_NODE, new_node])
+        eq_(self.rt.get_rnode(new_node), new_node)
+        ok_(self.rt.get_bucket(tc.SERVER_NODE).is_full())
+        eq_(self.rt.num_rnodes, 2)
+        eq_(self.rt.get_all_rnodes(), [tc.SERVER_NODE, new_node])
+
+
+        eq_(self.rt.get_closest_rnodes(tc.SERVER_ID, 1),
+            [tc.SERVER_NODE])
+        eq_(self.rt.get_closest_rnodes(tc.SERVER_ID),
+            [tc.SERVER_NODE, new_node])
+
+        assert_raises(BucketFullError, self.rt.add, new_node)
+        
+        self.rt.remove(new_node)
+        # there is one slot in the bucket
+        ok_(self.rt.there_is_room(tc.SERVER_NODE))
+        assert_raises(RnodeNotFound, self.rt.get_rnode, new_node)
+        eq_(self.rt.get_bucket(tc.SERVER_NODE).rnodes, [tc.SERVER_NODE])
+        eq_(self.rt.get_rnode(tc.SERVER_NODE), tc.SERVER_NODE)
+        ok_(not self.rt.get_bucket(tc.SERVER_NODE).is_full())
+        eq_(self.rt.num_rnodes, 1)
+        eq_(self.rt.get_all_rnodes(), [tc.SERVER_NODE])
+
+                             
+        eq_(self.rt.get_closest_rnodes(tc.SERVER_ID), [tc.SERVER_NODE])
+        
+    def test_complete_coverage(self):
+        str(self.rt.get_bucket(tc.SERVER_NODE))
+        repr(self.rt)
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_rpc_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_rpc_manager.py
new file mode 100644 (file)
index 0000000..75dc733
--- /dev/null
@@ -0,0 +1,131 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import ok_, eq_, assert_raises
+
+import logging, logging_conf
+
+import minitwisted
+import message
+import test_const as tc
+
+import rpc_manager
+
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+#FIXME: more tests!!!!
+
+class TestRPCManager:
+
+    def on_query_received(self, msg, addr):
+        self.got_query = True
+        return message.OutgoingPingResponse(tc.SERVER_ID)
+
+    def on_response_received(self, msg, addr):
+        self.got_response = True
+
+    def on_routing_response_received(self, msg, addr):
+        self.got_routing_response = True
+
+    def on_error_received(self, msg, addr):
+        self.got_error = True
+
+    def on_timeout(self, addr):
+        self.got_timeout = True
+
+    def on_routing_timeout(self, addr):
+        self.got_routing_timeout = True
+
+    def setup(self):
+        self.reactor = minitwisted.ThreadedReactor()
+        self.c = rpc_manager.RPCManager(self.reactor,
+                                        tc.CLIENT_ADDR[1])
+        self.s = rpc_manager.RPCManager(self.reactor,
+                                        tc.SERVER_ADDR[1])
+
+        self.got_query = False
+        self.got_response = False
+        self.got_routing_response = False
+        self.got_error = False
+        self.got_timeout = False
+        self.got_routing_timeout = False
+        
+    def test_querier_responder(self):
+        # client
+        # setup
+        self.c.add_msg_callback(message.RESPONSE,
+                                self.on_response_received)
+        self.c.add_msg_callback(message.RESPONSE,
+                                self.on_routing_response_received)
+        self.c.add_msg_callback(message.ERROR,
+                                self.on_error_received)
+        self.c.add_timeout_callback(self.on_routing_timeout)
+
+        # server
+        # setup
+        self.s.add_msg_callback(message.QUERY,
+                                self.on_query_received)
+        
+        # client creates and sends query
+        t_task = self.c.get_timeout_task(tc.SERVER_ADDR,
+                                         tc.TIMEOUT_DELAY,
+                                         self.on_timeout)
+        msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        msg_data = msg.encode(tc.TID)
+        self.c.send_msg_to(msg_data, tc.SERVER_ADDR)
+        # client sets up timeout
+
+        # server receives query, creates response and sends it back
+        self.s._on_datagram_received(msg_data, tc.CLIENT_ADDR)
+        # rpc_manager would send the message back automatically
+        ok_(self.got_query); self.got_query = False
+        msg = message.OutgoingPingResponse(tc.SERVER_ID)
+        msg_data = msg.encode(tc.TID)
+        self.s.send_msg_to(msg_data, tc.CLIENT_ADDR)
+
+        # client gets response
+        self.c._on_datagram_received(msg_data, tc.SERVER_ADDR)
+        ok_(self.got_response); self.got_response = False
+        ok_(self.got_routing_response)
+        self.got_routing_response = False
+        
+        # client gets error
+        msg_data = message.OutgoingErrorMsg(message.GENERIC_E
+                                            ).encode(tc.TID)
+        self.c._on_datagram_received(msg_data, tc.SERVER_ADDR)
+        ok_(self.got_error); self.got_error = False
+
+        # client gets timeout
+        t_task.fire_callbacks()
+        ok_(self.got_timeout); self.got_timeout = False
+        ok_(self.got_routing_timeout)
+        self.got_routing_timeout = False
+        
+        # server gets invalid message
+        self.s._on_datagram_received('zzz', tc.CLIENT_ADDR)
+        ok_(not self.got_query)
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+
+
+    def test_call_later(self):
+        t = self.c.call_later(tc.TIMEOUT_DELAY,
+                              self.on_timeout,
+                              1234)
+        t.fire_callbacks()
+        ok_(self.got_timeout)
+
+    def test_no_callback_for_type(self):
+        msg = message.OutgoingPingQuery(tc.CLIENT_ID)
+        msg_data = msg.encode(tc.TID)
+        self.s._on_datagram_received(msg_data,
+                                     tc.CLIENT_ADDR)
+        ok_(not self.got_query)
+        ok_(not self.got_response)
+        ok_(not self.got_routing_response)
+        
+    def teardown(self):
+        self.c.stop()
+        self.s.stop()
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_token_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_token_manager.py
new file mode 100644 (file)
index 0000000..f597e26
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) 2009 Raul Jimenez, Flutra Osmani
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import *
+
+import token_manager
+
+
+
+token_str = '123'
+invalid_token_str = ''
+
+class TestTokenManager:
+
+    def setup(self):
+        self.token_m = token_manager.TokenManager()
+
+    def test_get_token(self):
+        eq_(self.token_m.get(), token_str)
+
+    def test_check_token(self):
+        ok_(self.token_m.check(token_str))
+        ok_(not self.token_m.check(invalid_token_str))
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_tracker.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_tracker.py
new file mode 100644 (file)
index 0000000..59038cb
--- /dev/null
@@ -0,0 +1,107 @@
+# Copyright (C) 2009 Raul Jimenez, Flutra Osmani
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import *
+
+import time
+
+import tracker
+import minitwisted
+
+import logging, logging_conf
+
+logging_conf.testing_setup(__name__)
+logger = logging.getLogger('dht')
+
+
+keys = ('1','2')
+peers = (('1.2.3.4', 1234), ('2.3.4.5', 2222))
+
+class TestTracker(object):
+
+    def setup(self):
+        self.t = tracker.Tracker(.01, 5)
+
+    def test_put(self):
+        self.t.put(keys[0], peers[0])
+
+    def test_get_empty_key(self):
+        eq_(self.t.get(keys[0]), [])
+
+    def test_get_nonempty_key(self):
+        self.t.put(keys[0], peers[0])
+        eq_(self.t.get(keys[0]), [peers[0]])
+        
+    def test_get_expired_value(self):
+        self.t.put(keys[0], peers[0])
+        time.sleep(.015)
+        eq_(self.t.get(keys[0]), [])
+
+    def test_many_puts_and_gets(self):
+        #0
+        self.t.put(keys[0], peers[0])
+        time.sleep(.02)
+        #.02
+        self.t.put(keys[0], peers[0])
+        time.sleep(.02)
+        #.04
+        self.t.put(keys[0], peers[1])
+        eq_(self.t.get(keys[0]), [peers[0], peers[1]])
+        time.sleep(.07)
+        #.11
+        self.t.put(keys[0], peers[0])
+        eq_(self.t.get(keys[0]), [peers[1], peers[0]])
+        time.sleep(.02)
+        #.13
+        eq_(self.t.get(keys[0]), [peers[0]])
+
+    def test_hundred_puts(self):
+        # test > 5 puts
+        eq_(len(self.t.debug_view()), 0)
+        time.sleep(0)
+        eq_(len(self.t.debug_view()), 0)
+        self.t.put(1,1)
+        eq_(len(self.t.debug_view()), 1)
+        time.sleep(.006)
+        eq_(len(self.t.debug_view()), 1)
+        self.t.put(2,2)
+        eq_(len(self.t.debug_view()), 2)
+        time.sleep(.004)
+        eq_(len(self.t.debug_view()), 2)
+        self.t.put(3,3)
+        eq_(len(self.t.debug_view()), 3)
+        time.sleep(.0)
+        eq_(len(self.t.debug_view()), 3)
+        self.t.put(4,4)
+        eq_(len(self.t.debug_view()), 4)
+        time.sleep(.0)
+        eq_(len(self.t.debug_view()), 4)
+        self.t.put(5,5)
+        # cleaning... 1 out
+        eq_(len(self.t.debug_view()), 4)
+        time.sleep(.0)
+        eq_(len(self.t.debug_view()), 4)
+        self.t.put(6,6)
+        eq_(len(self.t.debug_view()), 5)
+        time.sleep(.00)
+        eq_(len(self.t.debug_view()), 5)
+        self.t.put(7,7)
+        eq_(len(self.t.debug_view()), 6)
+        time.sleep(.01)
+        eq_(len(self.t.debug_view()), 6)
+        self.t.put(8,8)
+        eq_(len(self.t.debug_view()), 7)
+        time.sleep(.00)
+        eq_(len(self.t.debug_view()), 7)
+        self.t.put(9,9)
+        eq_(len(self.t.debug_view()), 8)
+        time.sleep(.00)
+        eq_(len(self.t.debug_view()), 8)
+        self.t.put(0,0)
+        # cleaning ... 2,3,4,5,6,7 out
+        eq_(len(self.t.debug_view()), 3)
+
+            
+    def teardown(self):
+        pass
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_utils.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/test_utils.py
new file mode 100644 (file)
index 0000000..9551c68
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+from nose.tools import *
+
+import utils
+
+
+class TestUtils:
+
+    def test_compact_addr(self):
+        cases = ((('1.2.3.4', 255), (1,2,3,4,0,255)),
+                 (('199.2.3.4', 256), (199,2,3,4,1,0)),
+                 )
+        for case in cases:
+            expected = ''.join([chr(i) for i in case[1]])
+            eq_(utils.compact_addr(case[0]), expected)
+                
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/token_manager.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/token_manager.py
new file mode 100644 (file)
index 0000000..d5f9649
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) 2009 Raul Jimenez, Flutra Osmani
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+
+
+class TokenManager(object):
+    def __init__(self):
+        self.current_token = '123'
+        
+    def get(self):
+        return self.current_token
+
+    def check(self, token):
+        return token == self.current_token
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/tracker.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/tracker.py
new file mode 100644 (file)
index 0000000..7693d91
--- /dev/null
@@ -0,0 +1,64 @@
+# Copyright (C) 2009 Raul Jimenez, Flutra Osmani
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import time
+
+VALIDITY_PERIOD = 30 * 60 #30 minutes
+CLEANUP_COUNTER = 100
+
+class Tracker(object):
+
+    def __init__(self, validity_period=VALIDITY_PERIOD,
+                 cleanup_counter=CLEANUP_COUNTER):
+        self.tracker_dict = {}
+        self.validity_period = validity_period
+        self.cleanup_counter = cleanup_counter
+        self.put_counter = 0
+
+        
+    def _cleanup_list(self, ts_peers):
+        '''
+        Clean up the list as side effect.
+        '''
+        oldest_valid_ts = time.time() - self.validity_period
+        for i in range(len(ts_peers)):
+            if ts_peers[i][0] < oldest_valid_ts:
+                del ts_peers[i]
+                break
+        
+    
+    def put(self, k, peer):
+        #Clean up every n puts
+        self.put_counter += 1
+        if self.put_counter == self.cleanup_counter:
+            self.put_counter = 0
+            for k_ in self.tracker_dict.keys():
+                ts_peers = self.tracker_dict[k_]
+                self._cleanup_list(ts_peers)
+                if not ts_peers: #empty list. Delete key
+                    del self.tracker_dict[k_]
+        
+        ts_peers = self.tracker_dict.setdefault(k,[])
+        if ts_peers:
+            # let's see whether the peer is already there
+            for i in range(len(ts_peers)):
+                if ts_peers[i] == peer:
+                    del ts_peers[i]
+                    break
+        ts_peers.append((time.time(), peer))
+
+    def get(self, k):
+        ts_peers = self.tracker_dict.get(k, [])
+        self._cleanup_list(ts_peers)
+        return [ts_peer[1] for ts_peer in ts_peers]
+                               
+    def debug_view(self):
+        return self.tracker_dict
+            
+
+class TrackerMock(object):
+
+    def get(self, k):
+        import test_const
+        return test_const.PEERS
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/utils.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/utils.py
new file mode 100644 (file)
index 0000000..f4551e4
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) 2009 Raul Jimenez
+# Released under GNU LGPL 2.1
+# See LICENSE.txt for more information
+
+import socket
+
+
+class AddrError(Exception):
+    pass
+
+class IP6Addr(AddrError):
+    pass
+#TODO2: IPv6 support
+
+
+#TODO2: move binary functions from identifier
+
+def compact_port(port):
+    return ''.join(
+        [chr(port_byte_int) for port_byte_int in divmod(port, 256)])
+
+def uncompact_port(c_port_net):
+    return ord(bin_str[0]) * 256 + ord(bin_str[1])
+
+def compact_addr(addr):
+    return socket.inet_aton(addr[0]) + compact_port(addr[1])
+
+def uncompact_addr(c_addr):
+    try:
+        return (socket.inet_ntoa(c_addr[:-2],
+                                 uncompact_port(c_addr[-2:])))
+    except (socket.error):
+        raise AddrError
+
+compact_peer = compact_addr
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/version.txt b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/kadtracker/version.txt
new file mode 100644 (file)
index 0000000..eb56657
--- /dev/null
@@ -0,0 +1,11 @@
+Path: .
+URL: https://ttuki.vtt.fi/svn/p2p-next/KTH/kadtracker/branches/20100208-from_trunk_r882-kadtracker-1.0.1_integration
+Repository Root: https://ttuki.vtt.fi/svn/p2p-next
+Repository UUID: e16421f0-f15b-0410-abcd-98678b794739
+Revision: 899
+Node Kind: directory
+Schedule: normal
+Last Changed Author: raul
+Last Changed Rev: 898
+Last Changed Date: 2010-02-10 12:11:31 +0100 (Wed, 10 Feb 2010)
+
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/mainlineDHT.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/mainlineDHT.py
new file mode 100644 (file)
index 0000000..81e94fd
--- /dev/null
@@ -0,0 +1,41 @@
+# written by Fabian van der Werf, Arno Bakker
+# Modified by Raul Jimenez to integrate KTH DHT
+# see LICENSE.txt for license information
+
+import sys
+from traceback import print_exc
+
+dht_imported = False
+
+if sys.version.split()[0] >= '2.5':
+    try:
+        from BaseLib.Core.DecentralizedTracking.kadtracker.kadtracker import KadTracker
+        dht_imported = True
+    except (ImportError), e:
+        print_exc()
+
+DEBUG = False
+
+dht = None
+
+def init(*args, **kws):
+    global dht
+    global dht_imported
+    if DEBUG:
+        print >>sys.stderr,'dht: DHT initialization', dht_imported
+    if dht_imported and dht is None:
+        dht = KadTracker(*args, **kws)
+        if DEBUG:
+            print >>sys.stderr,'dht: DHT running'
+
+def control():
+    import pdb
+    pdb.set_trace()
+
+def deinit():
+    global dht
+    if dht is not None:
+        try:
+            dht.stop()
+        except:
+            pass
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/mainlineDHTChecker.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/mainlineDHTChecker.py
new file mode 100644 (file)
index 0000000..18b8998
--- /dev/null
@@ -0,0 +1,66 @@
+# written by Arno Bakker, Yuan Yuan
+# Modified by Raul Jimenez to integrate KTH DHT
+# see LICENSE.txt for license information
+
+import sys
+from threading import currentThread
+from BaseLib.Core.CacheDB.CacheDBHandler import TorrentDBHandler
+
+DEBUG = False
+
+class mainlineDHTChecker:
+    __single = None
+
+    def __init__(self):
+
+        if DEBUG:
+            print >>sys.stderr,'mainlineDHTChecker: initialization'
+        if mainlineDHTChecker.__single:
+            raise RuntimeError, "mainlineDHTChecker is Singleton"
+        mainlineDHTChecker.__single = self
+        
+        self.dht = None
+        self.torrent_db = TorrentDBHandler.getInstance()
+
+    def getInstance(*args, **kw):
+        if mainlineDHTChecker.__single is None:
+            mainlineDHTChecker(*args, **kw)
+        return mainlineDHTChecker.__single
+    getInstance = staticmethod(getInstance)
+
+    def register(self,dht):
+        self.dht = dht
+        
+    def lookup(self,infohash):
+        if DEBUG:
+            print >>sys.stderr,"mainlineDHTChecker: Lookup",`infohash`
+
+        if self.dht is not None:
+            from BaseLib.Core.DecentralizedTracking.kadtracker.identifier import Id, IdError
+            try:
+                infohash_id = Id(infohash)
+                func = lambda p:self.got_peers_callback(infohash,p)
+                self.dht.get_peers(infohash_id,func)
+            except (IdError):
+                print >>sys.stderr,"Rerequester: _dht_rerequest: self.info_hash is not a valid identifier"
+                return
+        elif DEBUG:
+            print >>sys.stderr,"mainlineDHTChecker: No lookup, no DHT support loaded"
+
+        
+    def got_peers_callback(self,infohash,peers):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"mainlineDHTChecker: Got",len(peers),"peers for torrent",`infohash`,currentThread().getName()
+            
+        alive = len(peers) > 0
+        if alive:
+            # Arno, 2010-02-19: this can be called frequently with the new DHT,
+            # so first check before doing commit.
+            #
+            torrent = self.torrent_db.getTorrent(infohash) # ,keys=('torrent_id','status_id') don't work, st*pid code
+            if torrent['status'] != "good":
+                status = "good"
+                kw = {'status': status}
+                self.torrent_db.updateTorrent(infohash, commit=True, **kw)
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/repex.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/repex.py
new file mode 100644 (file)
index 0000000..9dbdca3
--- /dev/null
@@ -0,0 +1,1116 @@
+# Written by Raynor Vliegendhart
+# see LICENSE.txt for license information
+import sys
+import os
+from time import time as ts_now
+from random import shuffle
+from traceback import print_exc,print_stack
+from threading import RLock,Condition,Event,Thread,currentThread
+from binascii import b2a_hex
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.osutils import *
+from BaseLib.Core.DecentralizedTracking.ut_pex import check_ut_pex_peerlist
+
+DEBUG = False
+REPEX_DISABLE_BOOTSTRAP = False
+
+# TODO: Move constants to simpledefs or make it configurable?
+REPEX_SWARMCACHE_SIZE = 4      # Number of peers per SwarmCache table
+REPEX_STORED_PEX_SIZE = 5      # Number of PEX addresses per peer per SwarmCache
+REPEX_PEX_MINSIZE = 1          # minimum number of peers in PEX message before considered a good peer
+                               # TODO: Currently set at 1, but what if a swarm consists of 1 user?
+REPEX_INTERVAL = 20*60         # Refresh SwarmCache every 20 minutes.
+REPEX_MIN_INTERVAL = 5*60      # Minimum time between attempts to prevent starvation in cases like continuous failures.
+REPEX_PEX_MSG_MAX_PEERS = 200  # only consider the first 200 peers (Opera10 sends its *whole* neighborhood set)
+REPEX_LISTEN_TIME = 50         # listen max. 50 seconds for PEX message 
+REPEX_INITIAL_SOCKETS = 4      # number of sockets used initially
+REPEX_MAX_SOCKETS = 8          # max number of sockets when all initial peers are checked or after the first failure has occured
+REPEX_SCAN_INTERVAL = 1*60     # Scan for stopped Downloads every minute.
+
+
+# Testing values
+# REPEX_INTERVAL = 10
+# REPEX_SCAN_INTERVAL = 30
+# REPEX_MIN_INTERVAL = 60
+# REPEX_DISABLE_BOOTSTRAP = True
+
+class RePEXerInterface:
+    """
+    Describes the RePEXer interface required by the SingleDownload and
+    the download engine classes.
+    """
+    
+    def repex_ready(self, infohash, connecter, encoder, rerequester):
+        """
+        Called by network thread. SingleDownload calls this method when 
+        everything is set up.
+        @param infohash Infohash of download.
+        @param connecter Connecter (Connecter.py from the download engine).
+        @param encoder Encoder (Encrypter.py from the download engine).
+        @param rerequester Rerequester (Rerequester.py from the download engine)
+        """
+    def repex_aborted(self, infohash, dlstatus=None):
+        """
+        Called by network thread. SingleDownload calls this method when 
+        the download is stopped or restarted, interrupting the RePEX mode.
+        @param infohash Infohash of download.
+        @param dlstatus Status of the download when the RePEX mode was
+        interrupted, or None if unknown.
+        """
+    def rerequester_peers(self, peers):
+        """
+        Called by network thread. Rerequester (accessible via Encoder) 
+        schedules this method call when peers have arrived.
+        @param peers [(dns,id)] or None in case of error.
+        """
+    def connection_timeout(self, connection):
+        """
+        Called by network thread. Encoder calls this when a connection
+        was not established within Encrypter's autoclose timeout.
+        @param connection Encrypter.Connection
+        """
+    def connection_closed(self, connection):
+        """
+        Called by network thread. Encoder or Connecter calls this when 
+        a connection was closed, either locally or remotely. It is also 
+        called right after a timeout. The RePEXer should maintain state 
+        on connections it has started.
+        @param connection Encrypter.Connection or Connecter.Connection
+        """
+
+    def connection_made(self, connection, ext_support):
+        """
+        Called by network thread. Connecter calls this when a connection
+        was established.
+        @param connection Connecter.Connection
+        @param ext_support Flag indicating whether the connection supports
+        the extension protocol.
+        """
+    
+    def got_extend_handshake(self, connection, version=None):
+        """
+        Called by network thread. Connecter calls this when a extended
+        handshake is received. Use connection's supports_extend_msg(msg_name)
+        method to figure out whether a message is supported.
+        @param connection Connecter.Connection
+        @param version Version string or None if not available.
+        """
+
+    def got_ut_pex(self, connection, d):
+        """
+        Called by network thread. Connecter calls this when a PEX message is 
+        received.
+        @param connection Connecter.Connection
+        @param d The PEX dictionary containing 'added' and 'added.f'
+        """
+
+def c2infohash_dns(connection):
+    """
+    Utility function to retrieve the infohash and dns of a Encrypter or
+    Connecter Connection.
+    
+    Note, if the returned dns is None, it is an incoming connection. 
+    """
+    # luckily the same interface
+    infohash = connection.connecter.infohash 
+    #dns = (connection.get_ip(True), connection.get_port(False)) # buggy, get_port might return -1
+    if hasattr(connection, 'got_ut_pex'):
+        encr_connection = connection.connection
+    else:
+        encr_connection = connection
+    dns = encr_connection.dns
+    return infohash, dns
+
+def swarmcache_ts(swarmcache):
+    """
+    Computes the timestamp of a SwarmCache or None if SwarmCache is empty.
+    """
+    ts = None
+    if swarmcache:
+        ts = max(v['last_seen'] for v in swarmcache.values())
+    # Currently the greatest timestamp is chosen as *the*
+    # timestamp of a SwarmCache. TODO: is this ok?
+    return ts
+
+class RePEXer(RePEXerInterface):
+    """
+    A RePEXer is associated with a single SingleDownload. While the interface 
+    is set up in a way that allows a RePEXer to be associated with multiple 
+    SingleDownloads, it is easier to maintain state when one RePEXer is created
+    per Download instance.  
+    """
+    # (Actually, the interface does not quite work that way... when the 
+    # rerequester delivers peers, the RePEXer cannot tell for which 
+    # Download they are meant)
+    
+    _observers = []
+    lock = RLock() # needed to atomically update observers list
+        
+    @classmethod
+    def attach_observer(cls, observer):
+        """
+        Attaches an observer to observe all RePEXer instances.
+        
+        @param observer RePEXerStatusCallback.
+        """
+        cls.lock.acquire()   
+        try:
+            cls._observers.append(observer)
+        finally:
+            cls.lock.release()
+    
+    @classmethod
+    def detach_observer(cls, observer):
+        """
+        Detaches a previously attached observer.
+        
+        @param observer RePEXerStatusCallback.
+        """
+        cls.lock.acquire()   
+        try:
+            cls._observers.remove(observer)
+        finally:
+            cls.lock.release()
+    
+    def __init__(self, infohash, swarmcache):
+        """
+        Constructs a RePEXer object, associated with a download's infohash. 
+        
+        @param infohash Infohash of download.
+        @param swarmcache Previous SwarmCache to check, which is a dict 
+        mapping dns to a dict with at least 'last_seen' and 'pex' keys.
+        """
+        # Note: internally in this class we'll use the name 'peertable',
+        # but the outside world calls it more appropiately the SwarmCache.
+        self.infohash = infohash
+        self.connecter = None
+        self.encoder = None
+        self.rerequest = None
+        
+        self.starting_peertable = swarmcache
+        self.final_peertable = None
+        self.to_pex = []
+        self.active_sockets = 0
+        self.max_sockets = REPEX_INITIAL_SOCKETS
+        self.attempted = set()
+        self.live_peers = {} # The pex-capable and useful peers.
+        
+        # The following two sets are usable in a debugging/logging context
+        self.bt_connectable = set() # sent BT handshake
+        self.bt_ext = set() # supported ext
+        self.bt_pex = set() # supported ut_pex
+        
+        self.dns2version = {} # additional data
+        
+        self.onlinecount = 0 # number of initial peers found online
+        self.shufflecount = 0 # number of peers in peertable unconnectable or useless
+        # sum of these two must become len(peertable) since we prefer the initial peertable
+        
+        self.datacost_bandwidth_keys = ['no_pex_support', 'no_pex_msg', 'pex', 'other']
+        self.datacost_counter_keys = ['connection_attempts','connections_made','bootstrap_peers','pex_connections']
+        self.datacost = {}
+        self.datacost['no_pex_support'] = (0,0) # down,up
+        self.datacost['no_pex_msg'] = (0,0) # down,up
+        self.datacost['pex'] = (0,0) # down,up
+        self.datacost['other'] = (0,0) # down,up
+        self.datacost['connection_attempts'] = 0 # number of times connect() successfully created a connection 
+        self.datacost['connections_made'] = 0 # number of times connection_made() was called
+        self.datacost['bootstrap_peers'] = 0 # total number of peers given to rerequester_peers()
+        self.datacost['pex_connections'] = 0 # total number of connections that sent a PEX reply
+        
+        self.requesting_tracker = False # needed to interact with Rerequester in case of failure
+        self.bootstrap_counter = 0 # how often did we call bootstrap()?
+        
+        self.is_closing = False # flag so that we only call close_all once
+        self.done = False # flag so we know when we're done or are aborted
+        self.aborted = False # flag so we know the exact done-reason
+        self.ready = False # flag so we know whether repex_ready has been called
+        self.ready_ts = -1 # for logging purposes, store the time repex_ready event was triggered
+        self.end_ts = -1 # for logging purposes, store the time done or aborted was sent
+        
+        # Added robustness, check whether received SwarmCache is not None
+        if self.starting_peertable is None:
+            print >>sys.stderr, 'RePEXer: __init__: swarmcache was None, defaulting to {}'
+            self.starting_peertable = {}
+            
+    
+    #
+    # RePEXerInterface
+    #
+    def repex_ready(self, infohash, connecter, encoder, rerequester):
+        if infohash != self.infohash:
+            print >>sys.stderr, "RePEXer: repex_ready: wrong infohash:", b2a_hex(infohash)
+            return
+        if self.done:
+            print >>sys.stderr, "RePEXer: repex_ready: already done"
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: repex_ready:", b2a_hex(infohash)
+        self.ready = True
+        self.ready_ts = ts_now()
+        self.connecter = connecter
+        self.encoder = encoder
+        self.rerequest = rerequester
+        
+        # Fill connect queue
+        self.to_pex = self.starting_peertable.keys()
+        self.max_sockets = REPEX_INITIAL_SOCKETS
+        
+        # We'll also extend the queue with all peers from the pex messages
+        # TODO: investigate whether a more sophisticated queueing scheme is more appropiate
+        # For example, only fill the queue when countering a failure
+        for dns in self.starting_peertable:
+            self.to_pex.extend([pexdns for pexdns,flags in self.starting_peertable[dns].get('pex',[])])
+        self.connect_queue()
+        
+        
+    def repex_aborted(self, infohash, dlstatus):
+        if self.done:
+            return
+        if infohash != self.infohash:
+            print >>sys.stderr, "RePEXer: repex_aborted: wrong infohash:", b2a_hex(infohash)
+            return
+        if DEBUG:
+            if dlstatus is None:
+                status_string = str(None)
+            else:
+                status_string = dlstatus_strings[dlstatus]
+            print >>sys.stderr, "RePEXer: repex_aborted:", b2a_hex(infohash),status_string
+        self.done = True
+        self.aborted = True
+        self.end_ts = ts_now()
+        for observer in self._observers:
+            observer.repex_aborted(self, dlstatus)
+        # Note that we do not need to close active connections
+        #  1) If repex_aborted is called because the download was stopped, 
+        #     the connections are closed automatically.
+        #  2) If repex_aborted is called because the download was restarted,
+        #     open connections are actually useful. 
+        
+    def rerequester_peers(self, peers):
+        self.requesting_tracker = False
+        if peers is not None:
+            numpeers = len(peers)
+        else:
+            numpeers = -1
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: rerequester_peers: received %s peers" % numpeers
+        if numpeers > 0:
+            self.to_pex.extend([dns for dns,id in peers])
+            self.datacost['bootstrap_peers'] += numpeers
+        self.connect_queue()
+        
+    def connection_timeout(self, connection):
+        infohash, dns = c2infohash_dns(connection)
+        if infohash != self.infohash or dns is None:
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: connection_timeout: %s:%s" % dns
+    def connection_closed(self, connection):
+        self.active_sockets -= 1
+        if self.active_sockets < 0:
+            self.active_sockets = 0
+        infohash, dns = c2infohash_dns(connection)
+        c = None # Connecter.Connection
+        if hasattr(connection, 'got_ut_pex'):
+            c = connection
+            connection = c.connection # Encrypter.Connection
+        if infohash != self.infohash or dns is None:
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: connection_closed: %s:%s" % dns
+        
+        singlesocket = connection.connection
+        
+        # Update costs and determine success
+        success = False
+        costtype = 'other'
+        if c is not None:
+            if c.pex_received > 0:
+                costtype = 'pex'
+                success = True
+            elif not c.supports_extend_msg('ut_pex'):
+                costtype = 'no_pex_support'
+            elif c.pex_received == 0:
+                costtype = 'no_pex_msg'
+            
+        if costtype:
+            d, u = self.datacost[costtype]
+            d += singlesocket.data_received
+            u += singlesocket.data_sent
+            self.datacost[costtype] = (d,u)
+        
+        # If the peer was in our starting peertable, update online/shuffle count
+        if dns in self.starting_peertable:
+            if success:
+                self.onlinecount += 1
+                self.live_peers[dns]['prev'] = True
+            else:
+                self.shufflecount += 1
+                #self.to_pex.extend([pexdns for pexdns,flags in self.starting_peertable[dns]['pex']])
+                # TODO: see repex_ready for now
+        
+        # Boost on failure of initial peer or when all initial peers are checked
+        if (dns in self.starting_peertable and not success) or self.initial_peers_checked():
+            self.max_sockets = REPEX_MAX_SOCKETS 
+        
+        # always try to connect
+        self.connect_queue()
+    
+    def connection_made(self, connection, ext_support):
+        infohash, dns = c2infohash_dns(connection)
+        if infohash != self.infohash or dns is None:
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: connection_made: %s:%s ext_support = %s" % (dns + (ext_support,))
+        self.datacost['connections_made'] += 1
+        self.bt_connectable.add(dns)
+        if ext_support:
+            self.bt_ext.add(dns)
+            # Start timer on Encryption.Connection
+            def auto_close(connection = connection.connection, dns=dns):
+                if not connection.closed:
+                    if DEBUG:
+                        print >>sys.stderr, "RePEXer: auto_close: %s:%s" % dns
+                    try:
+                        # only in rare circumstances
+                        # (like playing around in the REPL which is running in a diff. thread)
+                        # an Assertion is thrown.
+                        connection.close()
+                    except AssertionError, e:
+                        if DEBUG:
+                            print >>sys.stderr, "RePEXer: auto_close:", `e`
+                        self.connection_closed(connection)
+            self.connecter.sched(auto_close, REPEX_LISTEN_TIME)
+        else:
+            connection.close()
+    
+    def got_extend_handshake(self, connection, version=None):
+        infohash, dns = c2infohash_dns(connection)
+        ut_pex_support = connection.supports_extend_msg('ut_pex')
+        if infohash != self.infohash or dns is None:
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: got_extend_handshake: %s:%s version = %s ut_pex_support = %s" % (dns + (`version`,ut_pex_support ))
+        if ut_pex_support:
+            self.bt_pex.add(dns)
+        else:
+            connection.close()
+        self.dns2version[dns] = version
+    
+    def got_ut_pex(self, connection, d):
+        infohash, dns = c2infohash_dns(connection)
+        is_tribler_peer = connection.is_tribler_peer()
+        added = check_ut_pex_peerlist(d,'added')[:REPEX_PEX_MSG_MAX_PEERS]
+        addedf = map(ord, d.get('addedf',[]))[:REPEX_PEX_MSG_MAX_PEERS]
+        addedf.extend( [0]*(len(added)-len(addedf)) )
+        IS_SEED = 2
+        IS_SAME = 4
+        if infohash != self.infohash or dns is None:
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: got_ut_pex: %s:%s pex_size = %s" % (dns + (len(added),))
+        
+        # Remove bad IPs like 0.x.x.x (often received from Transmission peers)
+        for i in range(len(added)-1,-1,-1):
+            if added[i][0].startswith('0.'):
+                added.pop(i)
+                addedf.pop(i)
+        
+        # only store peer when sufficiently connected
+        if len(added) >= REPEX_PEX_MINSIZE:
+            # Clear flag IS_SAME if it was not a Tribler peer
+            if not is_tribler_peer:
+                addedf = [flag & ~IS_SAME for flag in addedf]
+                    
+            # sample PEX message and
+            picks = range(len(added))
+            shuffle(picks)
+            pex_peers = [(added[i],addedf[i]) for i in picks[:REPEX_STORED_PEX_SIZE]]
+            self.live_peers[dns] = {'last_seen' : ts_now(),
+                                    'pex' : pex_peers,
+                                    'version' : self.dns2version[dns]}
+            # Should we do the following? Might lower the load on the tracker even more?
+            # self.to_pex.extend(zip(*pex_peers)[0])
+            # Possible danger: too much crawling, wasting resources?
+            
+            # TODO: Might be more sophisticated to sampling of PEX msg at the end?
+            # (allows us to get more diversity and perhaps also security?)
+        
+        self.datacost['pex_connections'] += 1
+        
+        # Closing time
+        connection.close()
+    
+    #
+    # Status methods
+    #
+    def initial_peers_checked(self):
+        return len(self.starting_peertable) == (self.onlinecount + self.shufflecount)
+    
+    #
+    # Connect and bootstrap methods
+    #
+    def connect(self, dns, id=0):
+        if dns in self.attempted:
+            return
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: connecting: %s:%s" % dns
+        self.active_sockets += 1
+        self.datacost['connection_attempts'] += 1
+        self.attempted.add(dns)
+        if not self.encoder.start_connection(dns, id, forcenew = True):
+            print >>sys.stderr, "RePEXer: connecting failed: %s:%s" % dns
+            self.active_sockets -= 1
+            self.datacost['connection_attempts'] -= 1
+            if dns in self.starting_peertable:
+                self.shufflecount += 1
+    
+    def next_peer_from_queue(self):
+        # Only return a peer if we can connect
+        if self.can_connect() and self.to_pex:
+            return self.to_pex.pop(0)
+        else:
+            return None
+    
+    def can_connect(self):
+        return self.active_sockets < self.max_sockets
+    
+    def connect_queue(self):
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: connect_queue: active_sockets: %s" % self.active_sockets
+        
+        # We get here from repex_ready, connection_closed or from rerequester_peers.
+        # First we check whether we can connect, whether we're done, or whether we are closing.
+        if self.done or self.is_closing or not self.can_connect():
+            return
+        # when we have found sufficient live peers and at least the initial peers are checked,
+        # we are done and close the remaining connections:
+        if self.initial_peers_checked() and len(self.live_peers) >= REPEX_SWARMCACHE_SIZE:
+            # close_all() will result in generate several connection_closed events.
+            # To prevent reentry of this function, we'll set a flag we check at function entry.
+            self.is_closing = True
+            self.encoder.close_all()
+            assert self.active_sockets == 0
+            if self.active_sockets == 0:
+                self.send_done()
+            return
+        
+        # Connect to peers in the queue
+        peer = self.next_peer_from_queue()
+        while peer is not None:
+            self.connect(peer)
+            peer = self.next_peer_from_queue()
+        
+        # if we didn't connect at all and we have checked all initial peers, we are forced to bootstrap
+        if self.active_sockets == 0 and self.initial_peers_checked():
+            if self.bootstrap_counter == 0:
+                self.bootstrap()
+            elif not self.requesting_tracker:
+                # we have contacted the tracker before and that
+                # didn't give us any new live peers, so we are
+                # forced to give up
+                self.send_done()
+        
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: connect_queue: active_sockets: %s" % self.active_sockets
+            
+    def bootstrap(self):
+        if DEBUG:
+            print >>sys.stderr, "RePEXer: bootstrap"
+        self.bootstrap_counter += 1
+        if REPEX_DISABLE_BOOTSTRAP or self.rerequest is None:
+            self.rerequester_peers(None)
+            return
+        
+        # In the future, bootstrap needs to try 2-Hop TorrentSmell first...
+        # Or, Rerequester needs to modified to incorporate 2-Hop TorrentSmell.
+        if self.rerequest.trackerlist in [ [], [[]] ]:
+            # no trackers?
+            self.rerequester_peers(None)
+            return
+            
+        self.requesting_tracker = True
+        def tracker_callback(self=self):
+            if self.requesting_tracker:
+                # in case of failure, call  rerequester_peers with None
+                self.requesting_tracker = False
+                self.rerequester_peers(None)
+        self.rerequest.announce(callback=tracker_callback)
+    
+    #
+    # Get SwarmCache
+    #
+    def get_swarmcache(self):
+        """
+        Returns the updated SwarmCache and its timestamp when done (self.done), 
+        otherwise the old SwarmCache and its timestamp. The timestamp is 
+        None when the SwarmCache is empty.
+        
+        @return A dict mapping dns to a dict with at least 'last_seen'
+        and 'pex' keys. If it contains a 'prev'=True key-value pair, the peer
+        was known to be in the SwarmCache's predecessor.
+        """
+        if self.done:
+            swarmcache = self.final_peertable
+        else:
+            swarmcache = self.starting_peertable
+        ts = swarmcache_ts(swarmcache)
+        return swarmcache, ts
+    
+    #
+    # When done (or partially in case of peer shortage)
+    #
+    def send_done(self):
+        self.done = True
+        self.end_ts = ts_now()
+        
+        # Construct the new SwarmCache by removing excess peers
+        swarmcache = dict(self.live_peers)
+        to_delete = max(len(swarmcache) - REPEX_SWARMCACHE_SIZE, 0)
+        deleted = 0
+        for dns in swarmcache.keys():
+            if deleted == to_delete:
+                break
+            if dns not in self.starting_peertable:
+                del swarmcache[dns]
+                deleted += 1
+        
+        # TODO: Should we change the shuffle algorithm such that we 
+        # prefer to replace an offline peer with one of the peers
+        # in its PEX message?
+        
+        # create shufflepeers dict, allowing us to deduce why a peer was shuffled out
+        shufflepeers = {}
+        for dns in self.starting_peertable:
+            if dns not in swarmcache:
+                shufflepeers[dns] = (dns in self.bt_connectable, dns in self.bt_pex, self.starting_peertable[dns].get('last_seen',0))
+        
+        self.final_peertable = swarmcache
+        for observer in self._observers:
+            if DEBUG:
+                print >>sys.stderr, "RePEXer: send_done: calling repex_done on", `observer`
+            try:
+                observer.repex_done(self,
+                                    swarmcache,
+                                    self.shufflecount,
+                                    shufflepeers,
+                                    self.bootstrap_counter,
+                                    self.datacost)
+            except:
+                print_exc()
+    
+    #
+    # Informal string representation of a RePEXer
+    #
+    def __str__(self):
+        if self.done and self.aborted:
+            status = 'ABORTED'
+        elif self.done:
+            status = 'DONE'
+        elif self.ready:
+            status = 'REPEXING'
+        else:
+            status = 'WAITING'
+        infohash = '[%s]' % b2a_hex(self.infohash)
+        summary = ''
+        table = ''
+        datacost = ''
+        if self.done and not self.aborted:
+            infohash = '\n    ' + infohash
+            swarmcache = self.final_peertable
+            summary = '\n    table size/shuffle/bootstrap %s/%s/%s' % (len(swarmcache), self.shufflecount, self.bootstrap_counter)
+            prev_peers = set(self.starting_peertable.keys())
+            cur_peers = set(swarmcache.keys())
+            
+            for dns in sorted(set.symmetric_difference(prev_peers,cur_peers)):
+                if dns in cur_peers:
+                    table += '\n        A: %s:%s' % dns
+                else:
+                    table += '\n        D: %s:%s - BT/PEX %s/%s' % (dns + (dns in self.bt_connectable, dns in self.bt_pex))
+            table += '\n'
+            datacost = '    datacost:\n        %s(%s)/%s BT(PEX) connections made, received %s bootstrap peers\n'
+            datacost %= (self.datacost['connections_made'],self.datacost['pex_connections'],
+                         self.datacost['connection_attempts'],self.datacost['bootstrap_peers'])
+            for k in self.datacost_bandwidth_keys:
+                v = self.datacost[k]
+                datacost += '          %s: %s bytes down / %s bytes up\n' % (k.ljust(16), str(v[0]).rjust(6), str(v[1]).rjust(6))
+        
+        return '<RePEXer(%s)%s%s%s%s>' % (status,infohash,summary,table,datacost)
+
+class RePEXerStatusCallback:
+    """
+    Describes the interface required by RePEXer for status callbacks.
+    """
+    def repex_aborted(self, repexer, dlstatus=None):
+        """
+        Called by network thread. RePEXer calls this method when the
+        repex task is aborted. It is the propagation of the similarly 
+        named method in RePEXerInterface.
+        @param repexer RePEXer
+        @param dlstatus Status of the download when the RePEX mode was
+        interrupted, or None when unknown.
+        """
+
+    def repex_done(self, repexer, swarmcache, shufflecount, shufflepeers, bootstrapcount, datacost):
+        """
+        Called by network thread. RePEXer calls this method when it is done
+        repexing. 
+        @param repexer RePEXer
+        @param swarmcache A dict mapping dns to a dict with 'last_seen' and 
+        'pex' keys. The 'pex' key contains a list of (dns,flags) tuples.
+        @param shufflecount The number of peers in the old SwarmCache that
+        were not responding with a PEX message.
+        @param shufflepeers A dict mapping a shuffle peer's dns to a triple,
+        indicating (a) whether it sent a BT handshake, (b) whether it supported
+        ut_pex, and (c) the last time the peer was seen. 
+        @param bootstrapcount The number of times bootstrapping was needed.
+        @param datacost A dict with keys 'no_pex_support', 'no_pex_msg', 
+        'pex' and 'other', containing (download,upload) byte tuples, and
+        keys 'connection_attempts', 'connections_made', 'bootstrap_peers',
+        containing simple counters.
+        """
+
+# TODO: move this class to a module in Policies
+class RePEXScheduler(RePEXerStatusCallback):
+    """
+    The RePEXScheduler periodically requests a list of DownloadStates from
+    the Session and repexes the stopped downloads in a round robin fashion.
+    """
+    __single = None    # used for multithreaded singletons pattern
+    lock = RLock()
+    
+    @classmethod
+    def getInstance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self):
+        # always use getInstance() to create this object
+        # ARNOCOMMENT: why isn't the lock used on this read?!
+        if self.__single != None:
+            raise RuntimeError, "RePEXScheduler is singleton"
+        from BaseLib.Core.Session import Session # Circular import fix
+        self.session = Session.get_instance()
+        self.lock = RLock()
+        self.active = False
+        self.current_repex = None # infohash
+        self.downloads = {} # infohash -> Download; in order to stop Downloads that are done repexing
+        self.last_attempts = {} # infohash -> ts; in order to prevent starvation when a certain download
+                                #                 keeps producing empty SwarmCaches
+        
+    
+    def start(self):
+        """ Starts the RePEX scheduler. """
+        if DEBUG:
+            print >>sys.stderr, "RePEXScheduler: start"
+        self.lock.acquire()
+        try:
+            if self.active:
+                return
+            self.active = True
+            self.session.set_download_states_callback(self.network_scan)
+            RePEXer.attach_observer(self)
+        finally:
+            self.lock.release()
+    
+    def stop(self):
+        """ Stops the RePEX scheduler. """
+        if DEBUG:
+            print >>sys.stderr, "RePEXScheduler: stop"
+        self.lock.acquire()
+        try:
+            if not self.active:
+                return
+            RePEXer.detach_observer(self)
+            self.active = False
+            self.session.set_download_states_callback(self.network_stop_repex)
+        finally:
+            self.lock.release()
+        
+    def network_scan(self, dslist):
+        """
+        Called by network thread. Scans for stopped downloads and stores
+        them in a queue.
+        @param dslist List of DownloadStates"""
+        # TODO: only repex last X Downloads instead of all.
+        if DEBUG:
+            print >>sys.stderr, "RePEXScheduler: network_scan: %s DownloadStates" % len(dslist)
+        self.lock.acquire()
+        exception = None
+        try:
+            try:
+                if not self.active or self.current_repex is not None:
+                    return -1, False
+                
+                now = ts_now()
+                found_infohash = None
+                found_download = None
+                found_age = -1
+                for ds in dslist:
+                    download = ds.get_download()
+                    infohash = download.tdef.get_infohash()
+                    debug_msg = None
+                    if DEBUG:
+                        print >>sys.stderr, "RePEXScheduler: network_scan: checking", `download.tdef.get_name_as_unicode()`
+                    if ds.get_status() == DLSTATUS_STOPPED and ds.get_progress()==1.0:
+                        # TODO: only repex finished downloads or also prematurely stopped ones?
+                        age = now - (swarmcache_ts(ds.get_swarmcache()) or 0)
+                        last_attempt_ago = now - self.last_attempts.get(infohash, 0)
+                        
+                        if last_attempt_ago < REPEX_MIN_INTERVAL:
+                            debug_msg = "...too soon to try again, last attempt was %ss ago" % last_attempt_ago
+                        elif age < REPEX_INTERVAL:
+                            debug_msg = "...SwarmCache too fresh: %s seconds" % age
+                        else:
+                            if age >= REPEX_INTERVAL:
+                                debug_msg = "...suitable for RePEX!"
+                                if age > found_age:
+                                    found_download = download
+                                    found_infohash = infohash
+                                    found_age = age
+                    else:
+                        debug_msg = "...not repexable: %s %s%%" % (dlstatus_strings[ds.get_status()], ds.get_progress()*100)
+                    if DEBUG:
+                        print >>sys.stderr, "RePEXScheduler: network_scan:", debug_msg
+                
+                if found_download is None:
+                    if DEBUG:
+                        print >>sys.stderr, "RePEXScheduler: network_scan: nothing found yet"
+                    return REPEX_SCAN_INTERVAL, False
+                else:
+                    if DEBUG:
+                        print >>sys.stderr, "RePEXScheduler: network_scan: found %s, starting RePEX phase." % `found_download.tdef.get_name_as_unicode()`
+                    self.current_repex = found_infohash
+                    self.downloads[found_infohash] = found_download
+                    found_download.set_mode(DLMODE_NORMAL)
+                    found_download.restart(initialdlstatus=DLSTATUS_REPEXING)
+                    return -1, False
+            except Exception, e:
+                exception = e
+        finally:
+            self.lock.release()
+        if exception is not None:
+            # [E0702, RePEXScheduler.network_scan] Raising NoneType
+            # while only classes, instances or string are allowed
+            # pylint: disable-msg=E0702
+            raise exception
+    
+    def network_stop_repex(self, dslist):
+        """Called by network thread.
+        @param dslist List of DownloadStates"""
+        if DEBUG:
+            print >>sys.stderr, "RePEXScheduler: network_stop_repex:"
+        for d in [ds.get_download() for ds in dslist if ds.get_status() == DLSTATUS_REPEXING]:
+            if DEBUG:
+                print >>sys.stderr, "\t...",`d.tdef.get_name_as_unicode()`
+            d.stop()
+        return -1, False
+        
+    #
+    # RePEXerStatusCallback interface (called by network thread)
+    #
+    def repex_aborted(self, repexer, dlstatus=None):
+        if DEBUG:
+            if dlstatus is None:
+                status_string = str(None)
+            else:
+                status_string = dlstatus_strings[dlstatus]
+            print >>sys.stderr, "RePEXScheduler: repex_aborted:", b2a_hex(repexer.infohash), status_string
+        self.current_repex = None
+        self.last_attempts[repexer.infohash] = ts_now() 
+        self.session.set_download_states_callback(self.network_scan)
+
+    def repex_done(self, repexer, swarmcache, shufflecount, shufflepeers, bootstrapcount, datacost):
+        if DEBUG:
+            print >>sys.stderr, 'RePEXScheduler: repex_done: %s\n\ttable size/shuffle/bootstrap %s/%s/%s' % (
+                                b2a_hex(repexer.infohash), len(swarmcache), shufflecount, bootstrapcount)
+        self.current_repex = None
+        self.last_attempts[repexer.infohash] = ts_now()
+        self.downloads[repexer.infohash].stop()
+        self.session.set_download_states_callback(self.network_scan)
+
+#
+# Classes for logging/measurement purposes
+#
+
+class RePEXLogger(RePEXerStatusCallback):
+    """
+    For measurement: This class' sole purpose is to log all repex done 
+    messages.
+    """
+    __single = None    # used for multithreaded singletons pattern
+    lock = RLock()
+    
+    @classmethod
+    def getInstance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self):
+        # always use getInstance() to create this object
+        # ARNOCOMMENT: why isn't the lock used on this read?!
+        if self.__single != None:
+            raise RuntimeError, "RePEXLogger is singleton"
+        self.repexlog = RePEXLogDB.getInstance()
+        self.active = False
+    
+    def start(self):
+        """ Starts the RePEX logger. """
+        if DEBUG:
+            print >>sys.stderr, "RePEXLogger: start"
+        self.lock.acquire()
+        try:
+            if self.active:
+                return
+            self.active = True
+            RePEXer.attach_observer(self)
+        finally:
+            self.lock.release()
+    
+    def stop(self):
+        """ Stops the RePEX logger. """
+        if DEBUG:
+            print >>sys.stderr, "RePEXLogger: stop"
+        self.lock.acquire()
+        try:
+            if not self.active:
+                return
+            RePEXer.detach_observer(self)
+            self.active = False
+        finally:
+            self.lock.release()
+    
+    #
+    # RePEXerStatusCallback interface
+    #
+    def repex_aborted(self, repexer, dlstatus=None):
+        if dlstatus is None:
+            status_string = str(None)
+        else:
+            status_string = dlstatus_strings[dlstatus]
+        if DEBUG:
+            print >>sys.stderr, "RePEXLogger: repex_aborted:", b2a_hex(repexer.infohash), status_string
+    
+    def repex_done(self, repexer, swarmcache, shufflecount, shufflepeers, bootstrapcount, datacost):
+        if DEBUG:
+            print >>sys.stderr, 'RePEXLogger: repex_done: %s' % repexer
+        self.repexlog.storeSwarmCache(repexer.infohash, swarmcache,
+                                      (shufflecount,shufflepeers,bootstrapcount,datacost),
+                                      timestamp=repexer.ready_ts, endtimestamp=repexer.end_ts,
+                                      commit=True)
+
+class RePEXLogDB:
+    """
+    For measurements, stores the intermediate RePEX results.
+    """
+    __single = None    # used for multithreaded singletons pattern
+    lock = RLock()
+    PEERDB_FILE = 'repexlog.pickle'
+    PEERDB_VERSION = '0.7'
+    MAX_HISTORY = 20480 # let's say 1K per SwarmCache, 20480 would be max 20 MB...
+    
+    @classmethod
+    def getInstance(cls, *args, **kw):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kw)
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self,session):
+        # always use getInstance() to create this object
+        # ARNOCOMMENT: why isn't the lock used on this read?!
+        if self.__single != None:
+            raise RuntimeError, "RePEXLogDB is singleton"
+        #SQLiteCacheDBBase.__init__(self, *args, **kargs)
+        
+        state_dir = session.get_state_dir()
+        self.db = os.path.join(state_dir, self.PEERDB_FILE)
+        if not os.path.exists(self.db):
+            self.version = self.PEERDB_VERSION
+            self.history = []
+        else:
+            import cPickle as pickle
+            f = open(self.db,'rb')
+            tuple = pickle.load(f)
+            self.version, self.history = tuple
+            f.close()
+    
+    def commit(self):
+        """
+        Commits the last changes to file.
+        """
+        self.lock.acquire()   
+        try:
+            import cPickle as pickle
+            f = open(self.db,'wb')
+            pickle.dump((self.version, self.history), f)
+            f.close()
+        finally:
+            self.lock.release()
+        
+    def storeSwarmCache(self, infohash, swarmcache, stats = None, timestamp=-1, endtimestamp=-1, commit=False):
+        """
+        Stores the SwarmCache for a given infohash. Does not automatically
+        commit the changes to file.
+        @param infohash SHA1 hash of the swarm.
+        @param swarmcache A dict mapping dns to a dict with at least 
+        'last_seen' and 'pex' keys.
+        @param stats (shufflecount, shufflepeers, bootstrapcount, datacost) 
+        quadruple or None.
+        @param timestamp Optional timestamp, by default -1. Empty SwarmCaches 
+        don't contain any time information at all, so it's useful to explicitly
+        specify the time when the SwarmCache was created.
+        @param commit Flag to commit automatically.
+        """
+        if DEBUG:
+            print >>sys.stderr, 'RePEXLogDB: storeSwarmCache: DEBUG:\n\t%s\n\t%s\n\t%s' % (
+                            #b2a_hex(infohash), swarmcache, stats) # verbose
+                            b2a_hex(infohash), '', '') # less cluttered
+        self.lock.acquire()
+        try:
+            self.history.append((infohash,swarmcache,stats,timestamp,endtimestamp))
+            if len(self.history) > self.MAX_HISTORY:
+                del self.history[:-self.MAX_HISTORY]
+            if commit:
+                self.commit()
+        finally:
+            self.lock.release()
+    
+    def getHistoryAndCleanup(self):
+        """
+        For measurement purposes, gets the history of all stored SwarmCaches
+        (infohash, swarmcache, stats). This method clears the history and 
+        commits the empty history to file.
+        """
+        self.lock.acquire()
+        try:
+            res = self.history
+            self.history = []
+            self.commit()
+            return res
+        finally:
+            self.lock.release()
+    
+
+#
+# Manual testing class
+#
+
+class RePEXerTester(RePEXerStatusCallback):
+    """
+    Manual testing class for in the Python REPL.
+    
+    Usage:
+    
+    >>> from BaseLib.Core.TorrentDef import TorrentDef
+    >>> from BaseLib.Core.DownloadConfig import *
+    >>> from BaseLib.Core.DecentralizedTracking.repex import *
+    >>> tdef = TorrentDef.load('foo.torrent')
+    >>> dscfg = DownloadStartupConfig()
+    >>> dscfg.set_dest_dir('/tmp')
+    >>> r = RePEXerTester() 
+    >>> d = r.stopped_download(tdef,dscfg)
+    >>> sys.stdout=sys.stderr # optionally
+    >>> r.test_repex(d)
+    ...
+    >>> r.test_repex(d)
+    ...
+    >>> r.test_repex(d, swarmcache={('xxx.xxx.xxx.xxx',zzz) : {'last_seen':0, 'pex': []}})
+    ...
+    >>> r.test_repex(d, use_peerdb=True)
+    ...
+    
+    r.repexers[Download] and r.swarmcaches[Download] contain a list of created 
+    repexers and the SwarmCaches they have returned. 
+    """
+    def __init__(self):
+        from BaseLib.Core.Session import Session # Circular import fix
+        self.session = Session.get_instance()
+        self.peerdb = RePEXLogDB.getInstance()
+        self.downloads = {} # infohash -> Download 
+        self.swarmcaches = {} # Download -> [SwarmCache]
+        self.repexers = {} # Download -> [repexer]
+        # register as global observer
+        RePEXer.attach_observer(self)
+    
+    def stopped_download(self, tdef, dcfg):
+        """
+        For testing purposes, creates a stopped download given a TorrentDef 
+        and config.
+        @param tdef  A finalized TorrentDef.
+        @param dcfg DownloadStartupConfig or None, in which case 
+        a new DownloadStartupConfig() is created with its default settings
+        and the result becomes the runtime config of this Download.
+        @return Download
+        """
+        d = self.session.start_download(tdef,dcfg)
+        d.stop()
+        self.downloads[d.tdef.get_infohash()] = d
+        return d
+    
+    def test_repex(self, download, swarmcache=None):
+        """
+        Performs a RePEX on a stopped Download.
+        @param download A stopped Download
+        @param swarmcache Initial SwarmCache to use. If None, the latest
+        SwarmCache in the Download's pstate will be used.
+        """
+        download.stop()
+        self.downloads[download.tdef.get_infohash()] = download
+        if swarmcache is not None:
+            # Hacking into pstate must happen after network_stop!
+            def hack_into_pstate(d=download,swarmcache=swarmcache):
+                d.pstate_for_restart.setdefault('dlstate',{})['swarmcache'] = swarmcache
+            self.session.lm.rawserver.add_task(hack_into_pstate,0.0)
+        
+        download.set_mode(DLMODE_NORMAL)
+        download.restart(initialdlstatus=DLSTATUS_REPEXING)
+    
+    #
+    # RePEXerStatusCallback interface
+    #
+    def repex_aborted(self, repexer, dlstatus=None):
+        if dlstatus is None:
+            status_string = str(None)
+        else:
+            status_string = dlstatus_strings[dlstatus]
+        print >>sys.stderr, "RePEXerTester: repex_aborted:", `repexer`,status_string
+        download = self.downloads[repexer.infohash]
+        self.repexers.setdefault(download,[]).append(repexer)
+        self.swarmcaches.setdefault(download,[]).append(None)
+    
+    def repex_done(self, repexer, swarmcache, shufflecount, shufflepeers, bootstrapcount, datacost):
+        download = self.downloads[repexer.infohash]
+        print >>sys.stderr, 'RePEXerTester: repex_done: %s' % repexer
+        self.repexers.setdefault(download,[]).append(repexer)
+        self.swarmcaches.setdefault(download,[]).append(swarmcache)
+        
+        # Always log to RePEXLogDB
+        self.peerdb.storeSwarmCache(repexer.infohash, swarmcache,
+                                    (shufflecount,shufflepeers,bootstrapcount,datacost),
+                                    timestamp=repexer.ready_ts, endtimestamp=repexer.end_ts,
+                                    commit=True)
diff --git a/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/ut_pex.py b/instrumentation/next-share/BaseLib/Core/DecentralizedTracking/ut_pex.py
new file mode 100644 (file)
index 0000000..23f9814
--- /dev/null
@@ -0,0 +1,169 @@
+# Written by Arno Bakker, Bram Cohen
+# see LICENSE.txt for license information
+
+__fool_epydoc = 481
+"""
+uTorrent Peer Exchange (PEX) Support:
+-------------------------------------
+As documented in
+    https://trac.transmissionbt.com/browser/trunk/extras/extended-messaging.txt
+    BitTorrent-5.0.8/BitTorrent/Connector.py
+    (link no longer available) http://transmission.m0k.org/trac/browser/trunk/misc/utorrent.txt 
+    
+The PEX message payload is a bencoded dict with three keys:
+ 'added': the set of peers met since the last PEX
+ 'added.f': a flag for every peer, apparently with the following values:
+    \x00: unknown, assuming default
+    \x01: Prefers encryption (as suggested by LH-ABC-3.2.0/BitTorrent/BT1/Connector.py)
+    \x02: Is seeder (as suggested by BitTorrent-5.0.8/BitTorrent/Connector.py)
+  OR-ing them together is allowed as I've seen \x03 values.
+ 'dropped': the set of peers dropped since last PEX
+
+03/09/09 Boudewijn: Added a 'is same kind of peer as me' bit to the
+'added.f' value. When a Tribler peer send this bit as True this means
+'is also a Tribler peer'.
+    \x04: is same kind of peer
+
+The mechanism is insecure because there is no way to know if the peer addresses
+are really of some peers that are running BitTorrent, or just DoS victims.
+For peer addresses that come from trackers we at least know that the peer host
+ran BitTorrent and was downloading this swarm (assuming the tracker is trustworthy).
+
+"""
+import sys
+from types import DictType,StringType
+from BaseLib.Core.BitTornado.BT1.track import compact_peer_info
+from BaseLib.Core.BitTornado.bencode import bencode
+
+EXTEND_MSG_UTORRENT_PEX_ID = chr(1) # Can be any value, the name 'ut_pex' is standardized
+EXTEND_MSG_UTORRENT_PEX = 'ut_pex' # note case sensitive
+
+DEBUG = False
+
+def create_ut_pex(addedconns,droppedconns,thisconn):
+    #print >>sys.stderr,"ut_pex: create_ut_pex:",addedconns,droppedconns,thisconn
+    d = {}
+    compactedpeerstr = compact_connections(addedconns,thisconn)
+    d['added'] = compactedpeerstr
+    flags = ''
+    for i in range(len(addedconns)):
+        conn = addedconns[i]
+        if conn == thisconn:
+            continue
+        flag = 0
+        if conn.get_extend_encryption():
+            flag |= 1
+        if conn.download is not None and conn.download.peer_is_complete():
+            flag |= 2
+        if conn.is_tribler_peer():
+            flag |= 4
+            
+        #print >>sys.stderr,"ut_pex: create_ut_pex: add flag",`flag`
+        flags += chr(flag)
+    d['added.f'] = flags
+    compactedpeerstr = compact_connections(droppedconns)
+    d['dropped'] = compactedpeerstr
+    return bencode(d)
+
+def check_ut_pex(d):
+    if type(d) != DictType:
+        raise ValueError('ut_pex: not a dict')
+
+    # 'same' peers are peers that indicate (with a bit) that the peer
+    # in apeers is the same client type as itself. So if the sender of
+    # the pex message is a Tribler peer the same_apeers will also be
+    # tribler peers
+    same_apeers = []
+
+    apeers = check_ut_pex_peerlist(d,'added')
+    dpeers = check_ut_pex_peerlist(d,'dropped')
+    if 'added.f' in d:
+        addedf = d['added.f']
+        if type(addedf) != StringType:
+            raise ValueError('ut_pex: added.f: not string')
+        if len(addedf) != len(apeers) and not len(addedf) == 0:
+            # KTorrent sends an empty added.f, be nice
+            raise ValueError('ut_pex: added.f: more flags than peers')
+
+        # we need all flags to be integers
+        addedf = map(ord, addedf)
+
+        # filter out all 'same' peers. the loop runs in reverse order
+        # so the indexes don't change as we pop them from the apeers
+        # list
+        for i in range(min(len(apeers),len(addedf))-1,-1,-1):
+            if addedf[i] & 4:
+                same_apeers.append(apeers.pop(i))
+
+                # for completeness we should also pop the item from
+                # addedf even though we don't use it anymore
+                addedf.pop(i)
+                
+    # Arno, 2008-09-12: Be liberal in what we receive
+    ##else:
+        ##raise ValueError('ut_pex: added.f: missing')
+    
+    if DEBUG:
+        print >>sys.stderr,"ut_pex: Got",apeers
+    
+    return (same_apeers,apeers,dpeers)
+    
+def check_ut_pex_peerlist(d,name):
+    if name not in d:
+        # Arno, 2008-09-12: Be liberal in what we receive, some clients
+        # leave out 'dropped' key
+        ##raise ValueError('ut_pex:'+name+': missing')
+        return []
+    peerlist = d[name]
+    if type(peerlist) != StringType:
+        raise ValueError('ut_pex:'+name+': not string')
+    if len(peerlist) % 6 != 0:
+        raise ValueError('ut_pex:'+name+': not multiple of 6 bytes')
+    peers = decompact_connections(peerlist)
+    for ip,port in peers:
+        if ip == '127.0.0.1':
+            raise ValueError('ut_pex:'+name+': address is localhost')
+    return peers
+    
+def ut_pex_get_conns_diff(currconns,prevconns):
+    addedconns = []
+    droppedconns = []
+    for conn in currconns:
+        if not (conn in prevconns):
+            # new conn
+            addedconns.append(conn)
+    for conn in prevconns:
+        if not (conn in currconns):
+            # old conn, was dropped
+            droppedconns.append(conn)
+    return (addedconns,droppedconns)
+
+
+def compact_connections(conns,thisconn=None):
+    """ See BitTornado/BT1/track.py """
+    compactpeers = []
+    for conn in conns:
+        if conn == thisconn:
+            continue
+        ip = conn.get_ip()
+        port = conn.get_extend_listenport()
+        if port is None:
+            raise ValueError("ut_pex: compact: listen port unknown?!")
+        else:
+            compactpeer = compact_peer_info(ip,port)
+            compactpeers.append(compactpeer)
+        
+    # Create compact representation of peers
+    compactpeerstr = ''.join(compactpeers)
+    return compactpeerstr
+
+
+def decompact_connections(p):
+    """ See BitTornado/BT1/Rerequester.py """
+    peers = []
+    for x in xrange(0, len(p), 6):
+        ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
+        port = (ord(p[x+4]) << 8) | ord(p[x+5])
+        peers.append((ip, port))
+    return peers
+
diff --git a/instrumentation/next-share/BaseLib/Core/Download.py b/instrumentation/next-share/BaseLib/Core/Download.py
new file mode 100644 (file)
index 0000000..19cdcdd
--- /dev/null
@@ -0,0 +1,174 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+""" The representation of a running BT download/upload. """
+
+import sys
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.defaults import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.Base import *
+from BaseLib.Core.APIImplementation.DownloadRuntimeConfig import DownloadRuntimeConfig
+from BaseLib.Core.APIImplementation.DownloadImpl import DownloadImpl
+from BaseLib.Core.APIImplementation.miscutils import *
+from BaseLib.Core.osutils import *
+
+
+class Download(DownloadRuntimeConfig,DownloadImpl):
+    """
+    Representation of a running BT download/upload.
+    
+    A Download implements the DownloadConfigInterface which can be used to
+    change download parameters are runtime (for selected parameters).
+    
+    cf. libtorrent torrent_handle
+    """
+    
+    #
+    # Internal methods
+    #
+    def __init__(self,session,tdef):
+        """ Internal constructor
+        @param session Session
+        @param tdef TorrentDef 
+        """
+        DownloadImpl.__init__(self,session,tdef)
+    #
+    # Public methods
+    #
+    def get_def(self):
+        """
+        Return the read-only torrent definition (TorrentDef) for this Download.
+        @return A TorrentDef object.
+        """
+        return DownloadImpl.get_def(self)
+
+    
+    def set_state_callback(self,usercallback,getpeerlist=False):
+        """ 
+        Set a callback for retrieving the state of the download. This callback
+        will be called immediately with a DownloadState object as first parameter.
+        The callback method must return a tuple (when,getpeerlist) where "when" 
+        indicates whether the callback should be called again and represents a
+        number of seconds from now. If "when" <= 0.0 the callback will not be
+        called again. "getpeerlist" is a boolean that indicates whether the 
+        DownloadState passed to the callback on the next invocation should
+        contain info about the set of current peers.
+        
+        The callback will be called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+                
+        @param usercallback Function that accepts DownloadState as parameter and 
+        returns a (float,boolean) tuple.
+        """
+        DownloadImpl.set_state_callback(self,usercallback,getpeerlist=getpeerlist)
+        
+
+    def stop(self):
+        """ Stops the Download, i.e. closes all connections to other peers. """
+        # Called by any thread 
+        DownloadImpl.stop(self)
+        
+    def restart(self,initialdlstatus=None):
+        """
+        Restarts the stopped Download.
+        
+        @param initialdlstatus An optional parameter to restart the Download in 
+        a specific state.
+        """
+        # Called by any thread
+        DownloadImpl.restart(self, initialdlstatus)
+        
+    #
+    # Config parameters that only exists at runtime 
+    #
+    def set_max_desired_speed(self,direct,speed):
+        """ Sets the maximum desired upload/download speed for this Download. 
+        @param direct The direction (UPLOAD/DOWNLOAD) 
+        @param speed The speed in KB/s.
+        """
+        DownloadImpl.set_max_desired_speed(self,direct,speed)
+
+    def get_max_desired_speed(self,direct):
+        """ Returns the maximum desired upload/download speed for this Download.
+        @return The previously set speed in KB/s 
+        """
+        return DownloadImpl.get_max_desired_speed(self,direct)
+    
+    def get_dest_files(self, exts = None):
+        """ Returns the filenames on disk to which this Download saves
+        @return A list of (filename-in-torrent, disk filename) tuples.
+        """
+        return DownloadImpl.get_dest_files(self, exts)
+        
+    #
+    # Cooperative download
+    #
+    def ask_coopdl_helpers(self,permidlist):
+        """ Ask the specified list of peers to help speed up this download """
+        # called by any thread
+        self.dllock.acquire()
+        try:
+            # ARNOCOMMENT: WE NEED PERMID+IP FOR COOP DL. How to access DB? Can't
+            # do it on main thread, can't do it on network thread.
+            
+            peerreclist = self.session.lm.peer_db.getPeers(permidlist, ['permid','ip','port'])
+            
+            if self.sd is not None:
+                ask_coopdl_helpers_lambda = lambda:self.sd is not None and self.sd.ask_coopdl_helpers(peerreclist)
+                self.session.lm.rawserver.add_task(ask_coopdl_helpers_lambda,0)
+            else:
+                raise OperationNotPossibleWhenStoppedException()
+        finally:
+            self.dllock.release()
+        
+    # To retrieve the list of current helpers, see DownloadState
+    
+    def stop_coopdl_helpers(self,permidlist):
+        """ Ask the specified list of peers to stop helping speed up this 
+        download """
+        # called by any thread
+        self.dllock.acquire()
+        try:
+            # ARNOCOMMENT: WE NEED PERMID+IP FOR COOP DL. How to access DB? Can't
+            # do it on main thread, can't do it on network thread.
+            peerreclist = self.session.lm.peer_db.getPeers(permidlist, ['permid','ip','port'])
+                       
+            if self.sd is not None:
+                stop_coopdl_helpers_lambda = lambda:self.sd is not None and self.sd.stop_coopdl_helpers(peerreclist)
+                self.session.lm.rawserver.add_task(stop_coopdl_helpers_lambda,0)
+            else:
+                raise OperationNotPossibleWhenStoppedException()
+        finally:
+            self.dllock.release()
+    
+# SelectiveSeeding_
+    def set_seeding_policy(self,smanager):
+        """ Assign the seeding policy to use for this Download.
+        @param smanager An instance of Tribler.Policies.SeedingManager 
+        """
+        self.dllock.acquire()
+        try:
+            if self.sd is not None:
+                set_seeding_smanager_lambda = lambda:self.sd is not None and self.sd.get_bt1download().choker.set_seeding_manager(smanager)
+                self.session.lm.rawserver.add_task(set_seeding_smanager_lambda,0)
+            else:
+                raise OperationNotPossibleWhenStoppedException()
+        finally:
+            self.dllock.release()
+# _SelectiveSeeding
+
+    def get_peer_id(self):
+        """ Return the BitTorrent peer ID used by this Download, or None, when
+        the download is STOPPED.
+        @return 20-byte peer ID. 
+        """
+        self.dllock.acquire()
+        try:
+            if self.sd is not None:
+                return self.sd.peerid
+            else:
+                return None
+        finally:
+            self.dllock.release()
diff --git a/instrumentation/next-share/BaseLib/Core/DownloadConfig.py b/instrumentation/next-share/BaseLib/Core/DownloadConfig.py
new file mode 100644 (file)
index 0000000..8151991
--- /dev/null
@@ -0,0 +1,885 @@
+# Written by Arno Bakker, George Milescu 
+# see LICENSE.txt for license information
+""" Controls how a TorrentDef is downloaded (rate, where on disk, etc.) """
+
+#
+# WARNING: When extending this class:
+#
+# 1. Add a JavaDoc description for each method you add.
+# 2. Also add the methods to APIImplementation/DownloadRuntimeConfig.py  
+# 3. Document your changes in API.py
+#
+#
+
+import sys
+import os
+#import time
+import copy
+import pickle
+from types import StringType
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.defaults import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.Base import *
+from BaseLib.Core.APIImplementation.miscutils import *
+
+from BaseLib.Core.osutils import getfreespace, get_desktop_dir
+
+
+class DownloadConfigInterface:
+    """
+    (key,value) pair config of per-torrent runtime parameters,
+    e.g. destdir, file-allocation policy, etc. Also options to advocate
+    torrent, e.g. register in DHT, advertise via Buddycast.
+    
+    Use DownloadStartupConfig to manipulate download configs before download 
+    startup time. This is just a parent class.
+     
+    cf. libtorrent torrent_handle
+    """
+    def __init__(self,dlconfig=None):
+        
+        if dlconfig is not None: # copy constructor
+            self.dlconfig = dlconfig
+            return
+        
+        self.dlconfig = {}
+        
+        # Define the built-in default here
+        self.dlconfig.update(dldefaults)
+
+        self.dlconfig['saveas'] = get_default_dest_dir()
+
+
+    def set_dest_dir(self,path):
+        """ Sets the directory where to save this Download.
+        @param path A path of a directory.
+        """
+        self.dlconfig['saveas'] = path
+
+    def get_dest_dir(self):
+        """ Gets the directory where to save this Download.
+        """
+        return self.dlconfig['saveas']
+
+    def set_video_event_callback(self,usercallback,dlmode=DLMODE_VOD):
+        """ Download the torrent in Video-On-Demand mode or as live stream.
+        When a playback event occurs, the usercallback function will be 
+        called, with the following list of arguments:
+        <pre>
+            Download,event,params
+        </pre>
+        In which event is a string, and params a dictionary. The following
+        events are supported:
+        <pre>
+        VODEVENT_START:
+            The params dictionary will contain the fields
+        
+                mimetype,stream,filename,length,bitrate
+        
+            If the filename is set, the video can be read from there. If not,
+            the video can be read from the stream, which is a file-like object 
+            supporting the read(),seek(), and close() operations. The MIME type
+            of the video is given by "mimetype", the length of the stream in
+            bytes by "length" which may be None if the length is unknown (e.g.
+            when live streaming). bitrate is either the bitrate as specified
+            in the TorrentDef, or if that was lacking an dynamic estimate 
+            calculated using the videoanalyser (e.g. ffmpeg), see
+            SessionConfig.set_video_analyser_path()
+        
+            To fetch a specific file from a multi-file torrent, use the 
+            set_selected_files() method. This method sets the mode to DLMODE_VOD 
+
+        VODEVENT_PAUSE:
+            The download engine would like video playback to be paused as the
+            data is not coming in fast enough / the data due is not available
+            yet.
+            
+            The params dictionary contains the fields
+            
+                autoresume
+                
+            "autoresume" indicates whether or not the Core will generate
+            a VODEVENT_RESUME when it is ready again, or that this is left
+            to the core user.
+                    
+        VODEVENT_RESUME:
+            The download engine would like video playback to resume.
+        </pre>
+        The usercallback should ignore events it does not support.
+  
+        The usercallback will be called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+        
+        @param usercallback  A function with the above signature.
+        @param dlmode        The download mode to start in (_VOD or _SVC)
+        """
+        self.dlconfig['mode'] = dlmode
+        self.dlconfig['vod_usercallback'] = usercallback        
+
+
+    def set_video_events(self,events=[]):
+        """ Sets which events will be supported with the usercallback set
+        by set_video_event_callback. Supporting the VODEVENT_START event is 
+        mandatory, and can therefore be omitted from the list.
+
+        @param events        A list of supported VODEVENT_* events.
+        """
+
+        # create a copy to avoid loosing the info
+        self.dlconfig['vod_userevents'] = events[:]
+
+    def set_video_source(self,videosource,authconfig=None,restartstatefilename=None):
+        """ Provides the live video source for this torrent from an external
+        source.
+        
+        @param videosource  A file-like object providing the live video stream
+        (i.e., supports read() and close())
+        @param authconfig The key information for source authentication of
+        packets. See LiveSourceAuthConfig and TorrentDef.create_live_torrent()
+        @param restartstatefilename A filename to read/write state needed for a
+        graceful restart of the source.
+        """
+        self.dlconfig['video_source'] = videosource
+        if authconfig is None:
+            from BaseLib.Core.LiveSourceAuthConfig import LiveSourceAuthConfig
+
+            authconfig = LiveSourceAuthConfig(LIVE_AUTHMETHOD_NONE)
+        self.dlconfig['video_source_authconfig'] = authconfig
+        self.dlconfig['video_source_restartstatefilename'] = restartstatefilename
+
+    def set_video_ratelimit(self,ratelimit):
+        """ Sets a limit on the speed at which the video stream is to be read.
+        Useful when creating a live stream from file or any other faster-than-live
+        data stream.
+        
+        @param ratelimit    The maximum speed at which to read from the stream (bps)
+        """
+        self.dlconfig['video_ratelimit'] = ratelimit
+
+    def set_mode(self,mode):
+        """ Sets the mode of this download. 
+        @param mode DLMODE_NORMAL/DLMODE_VOD """
+        self.dlconfig['mode'] = mode 
+
+    def set_live_aux_seeders(self,seeders):
+        """ Sets a number of live seeders, auxiliary servers that
+        get high priority at the source server to distribute its content
+        to others.
+        @param seeders A list of [IP address,port] lists.
+        """
+        self.dlconfig['live_aux_seeders'] = seeders
+
+    def get_mode(self):
+        """ Returns the mode of this download. 
+        @return DLMODE_NORMAL/DLMODE_VOD """
+        return self.dlconfig['mode']
+
+    def get_video_event_callback(self):
+        """ Returns the function that was passed to set_video_event_callback().
+        @return A function.
+        """
+        return self.dlconfig['vod_usercallback']
+
+    def get_video_events(self):
+        """ Returns the function that was passed to set_video_events().
+        @return A list of events.
+        """
+        return self.dlconfig['vod_userevents']
+
+    def get_video_source(self):
+        """ Returns the object that was passed to set_video_source().
+        @return A file-like object.
+        """
+        return self.dlconfig['video_source']
+
+    def get_video_ratelimit(self):
+        """ Returns the speed at which the video stream is read (bps).
+        @return An integer.
+        """
+        return self.dlconfig['video_ratelimit']
+
+    def get_live_aux_seeders(self):
+        """ Returns the aux. live seeders set. 
+        @return A list of [IP address,port] lists. """
+        return self.dlconfig['live_aux_seeders']
+
+
+    def set_selected_files(self,files):
+        """ Select which files in the torrent to download. The filenames must 
+        be the names as they appear in the torrent def. Trivially, when the 
+        torrent contains a file 'sjaak.avi' the files parameter must 
+        be 'sjaak.avi'. When the torrent contains multiple files and is named 
+        'filecollection', the files parameter must be 
+            os.path.join('filecollection','sjaak.avi')  
+        
+        @param files Can be a single filename or a list of filenames (e.g. 
+        ['harry.avi','sjaak.avi']). 
+        """
+        # TODO: can't check if files exists, don't have tdef here.... bugger
+        if type(files) == StringType: # convenience
+            files = [files] 
+            
+        if self.dlconfig['mode'] == DLMODE_VOD and len(files) > 1:
+            raise ValueError("In Video-On-Demand mode only 1 file can be selected for download")
+        
+        # Ric: added svc case
+        elif self.dlconfig['mode'] == DLMODE_SVC and len(files) < 2:
+            raise ValueError("In SVC Video-On-Demand mode at least 2 files have to be selected for download")
+                
+        
+        self.dlconfig['selected_files'] = files
+        
+
+    def get_selected_files(self):
+        """ Returns the list of files selected for download.
+        @return A list of strings. """
+        return self.dlconfig['selected_files']
+
+    
+
+    #
+    # Common download performance parameters
+    #
+    def set_max_speed(self,direct,speed):
+        """ Sets the maximum upload or download speed for this Download. 
+        @param direct The direction (UPLOAD/DOWNLOAD) 
+        @param speed The speed in KB/s.
+        """
+        if direct == UPLOAD:
+            self.dlconfig['max_upload_rate'] = speed
+        else:
+            self.dlconfig['max_download_rate'] = speed
+
+    def get_max_speed(self,direct):
+        """ Returns the configured maximum speed.
+        Returns the speed in KB/s. """
+        if direct == UPLOAD:
+            return self.dlconfig['max_upload_rate']
+        else:
+            return self.dlconfig['max_download_rate']
+
+    def set_max_conns_to_initiate(self,nconns):
+        """ Sets the maximum number of connections to initiate for this 
+        Download.
+        @param nconns A number of connections. 
+        """
+        self.dlconfig['max_initiate'] = nconns
+
+    def get_max_conns_to_initiate(self):
+        """ Returns the configured maximum number of connections to initiate.
+        @return A number of connections.
+        """
+        return self.dlconfig['max_initiate']
+
+    def set_max_conns(self,nconns):
+        """ Sets the maximum number of connections to connections for this 
+        Download.
+        @param nconns A number of connections.
+        """
+        self.dlconfig['max_connections'] = nconns
+
+    def get_max_conns(self):
+        """ Returns the configured maximum number of connections.
+        @return A number of connections. 
+        """
+        return self.dlconfig['max_connections']
+
+    #
+    # ProxyService_ parameters
+    #
+    def get_coopdl_role(self):
+        """ Returns the role which the download plays in a cooperative download,
+        <pre>
+        - COOPDL_ROLE_COORDINATOR: other peers help this download
+        - COOPDL_ROLE_HELPER: this download helps another peer download faster.
+        </pre>
+        The default is coordinator, and it is set to helper by the
+        set_coopdl_coordinator_permid() method. 
+        """
+        return self.dlconfig['coopdl_role']
+
+    def set_coopdl_coordinator_permid(self,permid):
+        """ Calling this method makes this download a helper in a cooperative
+        download, helping the peer identified by the specified permid. This peer
+        acts as coordinator, telling this download which parts of the content
+        to download. 
+        @param permid A PermID.
+        """
+        self.dlconfig['coopdl_role'] = COOPDL_ROLE_HELPER
+        self.dlconfig['coopdl_coordinator_permid'] = permid
+
+    def get_coopdl_coordinator_permid(self):
+        """ Returns the configured coordinator permid.
+        @return A PermID
+        """
+        return self.dlconfig['coopdl_coordinator_permid'] 
+
+    # See DownloadRuntime config for adding, removing and getting list of
+    # helping peers.
+
+    def set_proxy_mode(self,value):
+        """ Set the proxymode for current download
+        .
+        @param value: the proxyservice mode: PROXY_MODE_OFF, PROXY_MODE_PRIVATE or PROXY_MODE_SPEED
+        """
+        if value == PROXY_MODE_OFF or value == PROXY_MODE_PRIVATE or value == PROXY_MODE_SPEED:
+            self.dlconfig['proxy_mode'] = value
+        else:
+            # If the method is called with an incorrect value, turn off the ProxyMode for this download
+            self.dlconfig['proxy_mode'] = PROXY_MODE_OFF
+
+    def get_proxy_mode(self):
+        """ Returns the proxymode of the client.
+        @return: one of the possible three values: PROXY_MODE_OFF, PROXY_MODE_PRIVATE, PROXY_MODE_SPEED
+        """
+        return self.dlconfig['proxy_mode']
+    
+    def set_no_helpers(self,value):
+        """ Set the maximum number of helpers used for a download.
+        @param value: a positive integer number
+        """
+        if value >= 0:
+            self.dlconfig['max_helpers'] = value
+        else:
+            self.dlconfig['max_helpers'] = 0
+
+    def get_no_helpers(self):
+        """ Returns the maximum number of helpers used for a download. 
+        @return: a positive integer number
+        """
+        return self.dlconfig['max_helpers']
+    #
+    # _ProxyService
+    #
+        
+
+    #
+    # Advanced download parameters
+    # 
+    def set_max_uploads(self,value):
+        """ Set the maximum number of uploads to allow at once. 
+        @param value A number.
+        """
+        self.dlconfig['max_uploads'] = value
+
+    def get_max_uploads(self):
+        """ Returns the maximum number of uploads.
+        @return A number. """
+        return self.dlconfig['max_uploads']
+
+    def set_keepalive_interval(self,value):
+        """ Set the number of seconds to pause between sending keepalives.
+        @param value An interval """
+        self.dlconfig['keepalive_interval'] = value
+
+    def get_keepalive_interval(self):
+        """ Returns the keepalive interval.
+        @return A number of seconds. """
+        return self.dlconfig['keepalive_interval']
+
+    def set_download_slice_size(self,value):
+        """ Set how many bytes to query for per request. 
+        @param value A number of bytes. 
+        """
+        self.dlconfig['download_slice_size'] = value
+
+    def get_download_slice_size(self):
+        """ Returns the number of bytes to query per request.
+        @return A number of bytes. """
+        return self.dlconfig['download_slice_size']
+
+    def set_upload_unit_size(self,value):
+        """ When limiting upload rate, how many bytes to send at a time.
+        @param value A number of bytes. """
+        self.dlconfig['upload_unit_size'] = value
+
+    def get_upload_unit_size(self):
+        """ Returns the set upload unit size.
+        @returns A number of bytes.
+        """
+        return self.dlconfig['upload_unit_size']
+
+    def set_request_backlog(self,value):
+        """ Maximum number of requests to keep in a single pipe at once. 
+        @param value A number of requests. 
+        """
+        self.dlconfig['request_backlog'] = value
+
+    def get_request_backlog(self):
+        """ Returns the request backlog.
+        @return A number of requests.
+        """
+        return self.dlconfig['request_backlog']
+
+    def set_max_message_length(self,value):
+        """ Maximum message-length prefix to accept over the wire - larger 
+        values get the connection dropped. 
+        @param value A number of bytes. 
+        """
+        self.dlconfig['max_message_length'] = value
+
+    def get_max_message_length(self):
+        """ Returns the maximum message length that is accepted.
+        @return A number of bytes.
+        """
+        return self.dlconfig['max_message_length']
+
+    def set_max_slice_length(self,value):
+        """ Maximum length slice to send to peers, larger requests are ignored.
+        @param value A number of bytes. 
+        """
+        self.dlconfig['max_slice_length'] = value
+
+    def get_max_slice_length(self):
+        """ Returns the maximum slice length that is accepted. 
+        @return A number of bytes.
+        """
+        return self.dlconfig['max_slice_length']
+
+    def set_max_rate_period(self,value):
+        """ Maximum amount of time to guess the current rate estimate. 
+        @param value A number of seconds. """
+        self.dlconfig['max_rate_period'] = value
+
+    def get_max_rate_period(self):
+        """ Returns the maximum rate period. 
+        @return A number of seconds.
+        """
+        return self.dlconfig['max_rate_period']
+
+    def set_upload_rate_fudge(self,value):
+        """ Time equivalent of writing to kernel-level TCP buffer, for rate 
+        adjustment.
+        @param value A number of seconds.
+        """
+        self.dlconfig['upload_rate_fudge'] = value
+
+    def get_upload_rate_fudge(self):
+        """ Returns the upload rate fudge.
+        @return A number of seconds.
+        """
+        return self.dlconfig['upload_rate_fudge']
+
+    def set_tcp_ack_fudge(self,value):
+        """ How much TCP ACK download overhead to add to upload rate 
+        calculations. I.e. when a message is received we add X percent
+        of this message to our upload rate to account for TCP ACKs that
+        were sent during the reception process. (0 = disabled)
+        @param value A percentage 
+        """
+        self.dlconfig['tcp_ack_fudge'] = value
+
+    def get_tcp_ack_fudge(self):
+        """ Returns the TCP ACK fudge.
+        @return A percentage.
+        """
+        return self.dlconfig['tcp_ack_fudge']
+
+    def set_rerequest_interval(self,value):
+        """ Time to wait between requesting more peers from tracker.
+        @param value An interval in seconds. 
+        """
+        self.dlconfig['rerequest_interval'] = value
+
+    def get_rerequest_interval(self):
+        """ Returns the tracker re-request interval.
+        @return A number of seconds.
+        """
+        return self.dlconfig['rerequest_interval']
+
+    def set_min_peers(self,value):
+        """ Minimum number of peers to not do rerequesting.
+        @param value A number of peers.
+         """
+        self.dlconfig['min_peers'] = value
+
+    def get_min_peers(self):
+        """ Returns the minimum number of peers.
+        @return A number of peers.
+        """
+        return self.dlconfig['min_peers']
+
+    def set_http_timeout(self,value):
+        """ Number of seconds to wait before assuming that a HTTP connection 
+        has timed out.
+        @param value A number of seconds.
+         """
+        self.dlconfig['http_timeout'] = value
+
+    def get_http_timeout(self):
+        """ Returns the HTTP timeout.
+        @return A number of seconds.
+        """
+        return self.dlconfig['http_timeout']
+
+    def set_check_hashes(self,value):
+        """ Whether to check the integrity of the data on disk using the 
+        hashes from the torrent definition.
+        @param value Boolean 
+        """
+        self.dlconfig['check_hashes'] = value
+
+    def get_check_hashes(self):
+        """ Returns whether to check hashes.
+        @return Boolean. """
+        return self.dlconfig['check_hashes']
+
+    def set_alloc_type(self,value):
+        """ Set disk-allocation type:
+        <pre>
+        * DISKALLOC_NORMAL:  Allocates space as data is received
+        * DISKALLOC_BACKGROUND: Also adds space in the background
+        * DISKALLOC_PREALLOCATE: Reserves space up front (slow)
+        * DISKALLOC_SPARSE: Is only for filesystems that support it by default 
+          (UNIX)
+        </pre>
+        @param value A DISKALLOC_* policy. 
+        """
+        self.dlconfig['alloc_type'] = value
+
+    def get_alloc_type(self):
+        """ Returns the disk-allocation policy. 
+        @return DISKALLOC_*
+        """
+        return self.dlconfig['alloc_type']
+
+    def set_alloc_rate(self,value):
+        """ Set the rate to allocate space at using background 
+        allocation (DISKALLOC_BACKGROUND).
+        
+        @param value A rate in MB/s.
+        """
+        self.dlconfig['alloc_rate'] = value
+
+    def get_alloc_rate(self):
+        """ Returns the background disk-allocation rate.
+        @return A number of megabytes per second.
+        """ 
+        return self.dlconfig['alloc_rate']
+
+    def set_buffer_reads(self,value):
+        """ Whether to buffer disk reads.
+        @param value Boolean
+        """
+        self.dlconfig['buffer_reads'] = value
+
+    def get_buffer_reads(self):
+        """ Returns whether to buffer reads.
+        @return Boolean. """
+        return self.dlconfig['buffer_reads']
+
+    def set_write_buffer_size(self,value):
+        """ The maximum amount of space to use for buffering disk writes 
+        (0 = disabled).
+        @param value A buffer size in megabytes.
+        """
+        self.dlconfig['write_buffer_size'] = value
+
+    def get_write_buffer_size(self):
+        """ Returns the write buffer size.
+        @return A number of megabytes.
+        """
+        return self.dlconfig['write_buffer_size']
+
+    def set_breakup_seed_bitfield(self,value):
+        """ Whether to send an incomplete BITFIELD and then fills with HAVE
+        messages, in order to get around intellectually-challenged Internet
+        Service Provider manipulation. 
+        @param value Boolean 
+        """
+        self.dlconfig['breakup_seed_bitfield'] = value
+
+    def get_breakup_seed_bitfield(self):
+        """ Returns whether to send an incomplete BITFIELD message.
+        @return Boolean. """
+        return self.dlconfig['breakup_seed_bitfield']
+
+    def set_snub_time(self,value):
+        """ Seconds to wait for data to come in over a connection before 
+        assuming it's semi-permanently choked.
+        @param value  A number of seconds. 
+        """
+        self.dlconfig['snub_time'] = value
+
+    def get_snub_time(self):
+        """ Returns the snub time.
+        @return A number of seconds.
+        """
+        return self.dlconfig['snub_time']
+
+    def set_rarest_first_cutoff(self,value):
+        """ Number of downloads at which to switch from random to rarest first.
+        @param value A number of downloads.
+        """
+        self.dlconfig['rarest_first_cutoff'] = value
+
+    def get_rarest_first_cutoff(self):
+        """ Returns the rarest first cutoff.
+        @return A number of downloads. 
+        """
+        return self.dlconfig['rarest_first_cutoff']
+
+    def set_rarest_first_priority_cutoff(self,value):
+        """ The number of peers which need to have a piece before other 
+        partials take priority over rarest first policy.
+        @param value A number of peers.
+        """
+        self.dlconfig['rarest_first_priority_cutoff'] = value
+
+    def get_rarest_first_priority_cutoff(self):
+        """ Returns the rarest-first priority cutoff.
+        @return A number of peers. """
+        return self.dlconfig['rarest_first_priority_cutoff']
+
+    def set_min_uploads(self,value):
+        """ The number of uploads to fill out to with extra optimistic unchokes.
+        @param value A number of uploads.
+        """
+        self.dlconfig['min_uploads'] = value
+
+    def get_min_uploads(self):
+        """ Returns the minimum number of uploads. 
+        @return A number of uploads. """
+        return self.dlconfig['min_uploads']
+
+    def set_max_files_open(self,value):
+        """ The maximum number of files to keep open at a time, 0 means no 
+        limit. 
+        @param value A number of files.
+        """
+        self.dlconfig['max_files_open'] = value
+
+    def get_max_files_open(self):
+        """ Returns the maximum number of open files. 
+        @return A number of files. """
+        return self.dlconfig['max_files_open']
+
+    def set_round_robin_period(self,value):
+        """ The number of seconds between the client's switching upload targets.
+        @param value A number of seconds.
+        """
+        self.dlconfig['round_robin_period'] = value
+
+    def get_round_robin_period(self):
+        """ Returns the round-robin period.
+        @return A number of seconds. """
+        return self.dlconfig['round_robin_period']
+
+    def set_super_seeder(self,value):
+        """ whether to use special upload-efficiency-maximizing routines (only
+        for dedicated seeds).
+        @param value Boolean
+        """
+        self.dlconfig['super_seeder'] = value
+
+    def get_super_seeder(self):
+        """ Returns hether super seeding is enabled.
+        @return Boolean. """
+        return self.dlconfig['super_seeder']
+
+    def set_security(self,value):
+        """ Whether to enable extra security features intended to prevent abuse,
+        such as checking for multiple connections from the same IP address.
+        @param value Boolean
+        """
+        self.dlconfig['security'] = value
+
+    def get_security(self):
+        """ Returns the security setting.
+        @return Boolean. """
+        return self.dlconfig['security']
+
+    def set_auto_kick(self,value):
+        """ Whether to automatically kick/ban peers that send bad data.
+        @param value Boolean
+        """
+        self.dlconfig['auto_kick'] = value
+
+    def get_auto_kick(self):
+        """ Returns whether autokick is enabled.
+        @return Boolean. """
+        return self.dlconfig['auto_kick']
+
+    def set_double_check_writes(self,value):
+        """ Whether to double-check data being written to the disk for errors 
+        (may increase CPU load).
+        @param value Boolean
+        """
+        self.dlconfig['double_check'] = value
+
+    def get_double_check_writes(self):
+        """ Returns whether double-checking on writes is enabled. """
+        return self.dlconfig['double_check']
+
+    def set_triple_check_writes(self,value):
+        """ Whether to thoroughly check data being written to the disk (may 
+        slow disk access).
+        @param value Boolean """
+        self.dlconfig['triple_check'] = value
+
+    def get_triple_check_writes(self):
+        """ Returns whether triple-checking on writes is enabled. """
+        return self.dlconfig['triple_check']
+
+    def set_lock_files(self,value):
+        """ Whether to lock files the Download is working with. 
+        @param value Boolean """
+        self.dlconfig['lock_files'] = value
+
+    def get_lock_files(self):
+        """ Returns whether locking of files is enabled. """
+        return self.dlconfig['lock_files']
+
+    def set_lock_while_reading(self,value):
+        """ Whether to lock access to files being read.
+        @param value Boolean
+        """
+        self.dlconfig['lock_while_reading'] = value
+
+    def get_lock_while_reading(self):
+        """ Returns whether locking of files for reading is enabled.
+        @return Boolean. """
+        return self.dlconfig['lock_while_reading']
+
+    def set_auto_flush(self,value):
+        """ Minutes between automatic flushes to disk (0 = disabled).
+        @param value A number of minutes.
+        """
+        self.dlconfig['auto_flush'] = value
+
+    def get_auto_flush(self):
+        """ Returns the auto flush interval.
+        @return A number of minutes. """
+        return self.dlconfig['auto_flush']
+
+    def set_exclude_ips(self,value):
+        """ Set a list of IP addresses to be excluded.
+        @param value A list of IP addresses in dotted notation.
+        """
+        self.dlconfig['exclude_ips'] = value
+
+    def get_exclude_ips(self):
+        """ Returns the list of excluded IP addresses. 
+        @return A list of strings. """
+        return self.dlconfig['exclude_ips']
+
+    def set_ut_pex_max_addrs_from_peer(self,value):
+        """ Maximum number of addresses to accept from peer via the uTorrent 
+        Peer Exchange extension (0 = disable PEX)
+        @param value A number of IP addresses.
+        """
+        self.dlconfig['ut_pex_max_addrs_from_peer'] = value
+
+    def get_ut_pex_max_addrs_from_peer(self):
+        """ Returns the maximum number of IP addresses to accept from a peer 
+        via ut_pex. 
+        @return A number of addresses.
+        """
+        return self.dlconfig['ut_pex_max_addrs_from_peer']
+
+    def set_poa(self, poa):
+        if poa:
+            from base64 import encodestring
+            self.dlconfig['poa'] = encodestring(poa.serialize()).replace("\n","")
+            import sys
+            print >> sys.stderr,"POA is set:",self.dlconfig['poa']
+        
+    def get_poa(self):
+        if 'poa' in self.dlconfig:
+            if not self.dlconfig['poa']:
+                raise Exception("No POA specified")
+            from BaseLib.Core.ClosedSwarm import ClosedSwarm
+            from base64 import decodestring
+            print >> sys.stderr,"get_poa:",self.dlconfig['poa']
+            poa = ClosedSwarm.POA.deserialize(decodestring(self.dlconfig['poa']))
+            return poa
+        return None
+        
+    
+    def set_same_nat_try_internal(self,value):
+        """ Whether to try to detect if a peer is behind the same NAT as
+        this Session and then establish a connection over the internal
+        network
+        @param value Boolean
+        """
+        self.dlconfig['same_nat_try_internal'] = value
+
+    def get_same_nat_try_internal(self):
+        """ Returns whether same NAT detection is enabled.
+        @return Boolean """
+        return self.dlconfig['same_nat_try_internal']
+    
+    def set_unchoke_bias_for_internal(self,value):
+        """ Amount to add to unchoke score for peers on the internal network.
+        @param value A number
+        """
+        self.dlconfig['unchoke_bias_for_internal'] = value
+
+    def get_unchoke_bias_for_internal(self):
+        """ Returns the bias for peers on the internal network.
+        @return A number
+        """
+        return self.dlconfig['unchoke_bias_for_internal']
+    
+    
+class DownloadStartupConfig(DownloadConfigInterface,Serializable,Copyable):
+    """
+    (key,value) pair config of per-torrent runtime parameters,
+    e.g. destdir, file-allocation policy, etc. Also options to advocate
+    torrent, e.g. register in DHT, advertise via Buddycast.
+    
+    cf. libtorrent torrent_handle
+    """
+    def __init__(self,dlconfig=None):
+        """ Normal constructor for DownloadStartupConfig (copy constructor 
+        used internally) """
+        DownloadConfigInterface.__init__(self,dlconfig)
+    #
+    # Class method
+    #
+    def load(filename):
+        """
+        Load a saved DownloadStartupConfig from disk.
+        
+        @param filename  An absolute Unicode filename
+        @return DownloadStartupConfig object
+        """
+        # Class method, no locking required
+        f = open(filename,"rb")
+        dlconfig = pickle.load(f)
+        dscfg = DownloadStartupConfig(dlconfig)
+        f.close()
+        return dscfg
+    load = staticmethod(load)
+
+    def save(self,filename):
+        """ Save the DownloadStartupConfig to disk.
+        @param filename  An absolute Unicode filename
+        """
+        # Called by any thread
+        f = open(filename,"wb")
+        pickle.dump(self.dlconfig,f)
+        f.close()
+
+    #
+    # Copyable interface
+    # 
+    def copy(self):
+        config = copy.copy(self.dlconfig)
+        return DownloadStartupConfig(config)
+
+
+def get_default_dest_dir():
+    """ Returns the default dir to save content to.
+    <pre> 
+    * For Win32/MacOS: Desktop\TriblerDownloads
+    * For UNIX: 
+        If Desktop exists: Desktop\TriblerDownloads
+        else: Home\TriblerDownloads
+    </pre>
+    """
+    uhome = get_desktop_dir()
+    return os.path.join(uhome,u'TriblerDownloads')
+    
diff --git a/instrumentation/next-share/BaseLib/Core/DownloadState.py b/instrumentation/next-share/BaseLib/Core/DownloadState.py
new file mode 100644 (file)
index 0000000..990ff10
--- /dev/null
@@ -0,0 +1,386 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+""" Contains a snapshot of the state of the Download at a specific point in time. """
+import time
+
+import sys
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.defaults import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.Base import *
+from BaseLib.Core.DecentralizedTracking.repex import REPEX_SWARMCACHE_SIZE
+
+DEBUG = False
+
+class DownloadState(Serializable):
+    """
+    Contains a snapshot of the state of the Download at a specific
+    point in time. Using a snapshot instead of providing live data and 
+    protecting access via locking should be faster.
+    
+    cf. libtorrent torrent_status
+    """
+    def __init__(self,download,status,error,progress,stats=None,filepieceranges=None,logmsgs=None,coopdl_helpers=[],coopdl_coordinator=None,peerid=None,videoinfo=None,swarmcache=None):
+        """ Internal constructor.
+        @param download The Download this state belongs too.
+        @param status The status of the Download (DLSTATUS_*)
+        @param progress The general progress of the Download.
+        @param stats The BT engine statistics for the Download.
+        @param filepieceranges The range of pieces that we are interested in. 
+        The get_pieces_complete() returns only completeness information about 
+        this range. This is used for playing a video in a multi-torrent file.
+        @param logmsgs A list of messages from the BT engine which may be of 
+        """
+        # Raynor Vliegendhart, TODO: documentation of DownloadState seems incomplete?
+        # RePEX: @param swarmcache The latest SwarmCache known by Download. This
+        #        cache will be used when the download is not running.
+        # RePEX TODO: instead of being passed the latest SwarmCache, DownloadState could
+        # also query it from Download? Perhaps add get_swarmcache to Download(Impl)?
+        
+        self.download = download
+        self.filepieceranges = filepieceranges # NEED CONC CONTROL IF selected_files RUNTIME SETABLE
+        self.logmsgs = logmsgs
+        self.coopdl_helpers = coopdl_helpers
+        self.coopdl_coordinator = coopdl_coordinator
+        
+        # RePEX: stored swarmcache from Download and store current time
+        if swarmcache is not None:
+            self.swarmcache = dict(swarmcache)
+        else:
+            self.swarmcache = None
+        self.time = time.time()
+        
+        if stats is None:
+            # No info available yet from download engine
+            self.error = error # readonly access
+            self.progress = progress
+            if self.error is not None:
+                self.status = DLSTATUS_STOPPED_ON_ERROR
+            else:
+                self.status = status
+            self.stats = None
+        elif error is not None:
+            self.error = error # readonly access
+            self.progress = 0.0 # really want old progress
+            self.status = DLSTATUS_STOPPED_ON_ERROR
+            self.stats = None
+        elif status is not None and status != DLSTATUS_REPEXING:
+            # For HASHCHECKING and WAITING4HASHCHECK
+            self.error = error
+            self.status = status
+            if self.status == DLSTATUS_WAITING4HASHCHECK:
+                self.progress = 0.0
+            else:
+                self.progress = stats['frac']
+            self.stats = None
+        else:
+            # Copy info from stats
+            self.error = None
+            self.progress = stats['frac']
+            if stats['frac'] == 1.0:
+                self.status = DLSTATUS_SEEDING
+            else:
+                self.status = DLSTATUS_DOWNLOADING
+            #print >>sys.stderr,"STATS IS",stats
+            
+            # Safe to store the stats dict. The stats dict is created per
+            # invocation of the BT1Download returned statsfunc and contains no
+            # pointers.
+            #
+            self.stats = stats
+            
+            # for pieces complete
+            statsobj = self.stats['stats']
+            if self.filepieceranges is None:
+                self.haveslice = statsobj.have # is copy of network engine list
+            else:
+                # Show only pieces complete for the selected ranges of files
+                totalpieces =0
+                for t,tl,f in self.filepieceranges:
+                    diff = tl-t
+                    totalpieces += diff
+                    
+                #print >>sys.stderr,"DownloadState: get_pieces_complete",totalpieces
+                
+                haveslice = [False] * totalpieces
+                haveall = True
+                index = 0
+                for t,tl,f in self.filepieceranges:
+                    for piece in range(t,tl):
+                        haveslice[index] = statsobj.have[piece]
+                        if haveall and haveslice[index] == False:
+                            haveall = False
+                        index += 1 
+                self.haveslice = haveslice
+                if haveall and len(self.filepieceranges) > 0:
+                    # we have all pieces of the selected files
+                    self.status = DLSTATUS_SEEDING
+                    self.progress = 1.0
+            
+            # RePEX: REPEXING status overrides SEEDING/DOWNLOADING status.
+            if status is not None and status == DLSTATUS_REPEXING:
+                self.status = DLSTATUS_REPEXING
+            
+
+    def get_download(self):
+        """ Returns the Download object of which this is the state """
+        return self.download
+    
+    def get_progress(self):
+        """ The general progress of the Download as a percentage. When status is 
+         * DLSTATUS_HASHCHECKING it is the percentage of already downloaded 
+           content checked for integrity.
+         * DLSTATUS_DOWNLOADING/SEEDING it is the percentage downloaded.
+        @return Progress as a float (0..1).
+        """
+        return self.progress
+        
+    def get_status(self):
+        """ Returns the status of the torrent.
+        @return DLSTATUS_* """
+        return self.status
+
+    def get_error(self):
+        """ Returns the Exception that caused the download to be moved to 
+        DLSTATUS_STOPPED_ON_ERROR status.
+        @return Exception
+        """
+        return self.error
+
+    #
+    # Details
+    # 
+    def get_current_speed(self,direct):
+        """
+        Returns the current up or download speed.
+        @return The speed in KB/s, as float.
+        """
+        if self.stats is None:
+            return 0.0
+        if direct == UPLOAD:
+            return self.stats['up']/1024.0
+        else:
+            return self.stats['down']/1024.0
+
+    def get_total_transferred(self,direct):
+        """
+        Returns the total amount of up or downloaded bytes.
+        @return The amount in bytes.
+        """
+        if self.stats is None:
+            return 0L
+        # self.stats:          BitTornado.BT1.DownloaderFeedback.py (return from gather method)
+        # self.stats["stats"]: BitTornado.BT1.Statistics.py (Statistics_Response instance)
+        if direct == UPLOAD:
+            return self.stats['stats'].upTotal
+        else:
+            return self.stats['stats'].downTotal
+    
+    def get_eta(self):
+        """
+        Returns the estimated time to finish of download.
+        @return The time in ?, as ?.
+        """
+        if self.stats is None:
+            return 0.0
+        else:
+            return self.stats['time']
+        
+    def get_num_peers(self):
+        """ 
+        Returns the download's number of active connections. This is used
+        to see if there is any progress when non-fatal errors have occured
+        (e.g. tracker timeout).
+        @return An integer.
+        """
+        if self.stats is None:
+            return 0
+
+        # Determine if we need statsobj to be requested, same as for spew
+        statsobj = self.stats['stats']
+        return statsobj.numSeeds+statsobj.numPeers
+        
+    def get_num_seeds_peers(self):
+        """
+        Returns the sum of the number of seeds and peers. This function
+        works only if the Download.set_state_callback() / 
+        Session.set_download_states_callback() was called with the getpeerlist 
+        parameter set to True, otherwise returns (None,None)  
+        @return A tuple (num seeds, num peers)
+        """
+        if self.stats is None or self.stats['spew'] is None:
+            return (None,None)
+        
+        total = len(self.stats['spew'])
+        seeds = len([i for i in self.stats['spew'] if i['completed'] == 1.0])
+        return seeds, total-seeds
+    
+    def get_pieces_complete(self):
+        """ Returns a list of booleans indicating whether we have completely
+        received that piece of the content. The list of pieces for which 
+        we provide this info depends on which files were selected for download
+        using DownloadStartupConfig.set_selected_files().
+        @return A list of booleans
+        """
+        if self.stats is None:
+            return []
+        else:
+            return self.haveslice
+
+    def get_vod_prebuffering_progress(self):
+        """ Returns the percentage of prebuffering for Video-On-Demand already 
+        completed.
+        @return A float (0..1) """
+        if self.stats is None:
+            if self.status == DLSTATUS_STOPPED and self.progress == 1.0:
+                return 1.0
+            else:
+                return 0.0
+        else:
+            return self.stats['vod_prebuf_frac']
+    
+    def is_vod(self):
+        """ Returns if this download is currently in vod mode 
+        
+        @return A Boolean"""
+        if self.stats is None:
+            return False
+        else:
+            return self.stats['vod']
+    
+    def get_vod_playable(self):
+        """ Returns whether or not the Download started in Video-On-Demand
+        mode has sufficient prebuffer and download speed to be played out
+        to the user. 
+        @return Boolean.
+        """
+        if self.stats is None:
+            return False
+        else:
+            return self.stats['vod_playable']
+
+    def get_vod_playable_after(self):
+        """ Returns the estimated time until the Download started in Video-On-Demand
+        mode can be started to play out to the user. 
+        @return A number of seconds.
+        """
+        if self.stats is None:
+            return float(2 ** 31)
+        else:
+            return self.stats['vod_playable_after']
+        
+    def get_vod_stats(self):
+        """ Returns a dictionary of collected VOD statistics. The keys contained are:
+        <pre>
+        'played' = number of pieces played. With seeking this may be more than npieces
+        'late' = number of pieces arrived after they were due
+        'dropped' = number of pieces lost
+        'stall' = estimation of time the player stalled, waiting for pieces (seconds)
+        'pos' = playback position, as an absolute piece number
+        'prebuf' = amount of prebuffering time that was needed (seconds,
+                   set when playback starts)
+        'firstpiece' = starting absolute piece number of selected file
+        'npieces' = number of pieces in selected file
+        </pre>, or no keys if no VOD is in progress.
+        @return Dict.
+        """
+        if self.stats is None:
+            return {}
+        else:
+            return self.stats['vod_stats']
+
+
+
+    def get_log_messages(self):
+        """ Returns the last 10 logged non-fatal error messages.
+        @return A list of (time,msg) tuples. Time is Python time() format. """
+        if self.logmsgs is None:
+            return []
+        else:
+            return self.logmsgs
+
+    def get_peerlist(self):
+        """ Returns a list of dictionaries, one for each connected peer
+        containing the statistics for that peer. In particular, the
+        dictionary contains the keys:
+        <pre>
+        'id' = PeerID or 'http seed'
+        'ip' = IP address as string or URL of httpseed
+        'optimistic' = True/False
+        'direction' = 'L'/'R' (outgoing/incoming)
+        'uprate' = Upload rate in KB/s
+        'uinterested' = Upload Interested: True/False
+        'uchoked' = Upload Choked: True/False
+        'downrate' = Download rate in KB/s
+        'dinterested' = Download interested: True/Flase
+        'dchoked' = Download choked: True/False
+        'snubbed' = Download snubbed: True/False
+        'utotal' = Total uploaded from peer in KB
+        'dtotal' = Total downloaded from peer in KB
+        'completed' = Fraction of download completed by peer (0-1.0) 
+        'speed' = The peer's current total download speed (estimated)
+        </pre>
+        """
+        if self.stats is None or 'spew' not in self.stats:
+            return []
+        else:
+            return self.stats['spew']
+
+
+    def get_coopdl_helpers(self):
+        """ Returns the peers currently helping.
+        @return A list of PermIDs.
+        """
+        if self.coopdl_helpers is None:
+            return []
+        else:
+            return self.coopdl_helpers 
+
+    def get_coopdl_coordinator(self):
+        """ Returns the permid of the coordinator when helping that peer
+        in a cooperative download
+        @return A PermID.
+        """
+        return self.coopdl_coordinator
+
+    #
+    # RePEX: get swarmcache
+    #
+    def get_swarmcache(self):
+        """
+        Gets the SwarmCache of the Download. If the Download was RePEXing,
+        the latest SwarmCache is returned. If the Download was running 
+        normally, a sample of the peerlist is merged with the last
+        known SwarmCache. If the Download was stopped, the last known
+        SwarmCache is returned.
+        
+        @return The latest SwarmCache for this Download, which is a dict 
+        mapping dns to a dict with at least 'last_seen' and 'pex' keys.
+        """
+        swarmcache = {}
+        if self.status == DLSTATUS_REPEXING and self.swarmcache is not None:
+            # the swarmcache given at construction comes from RePEXer 
+            swarmcache = self.swarmcache
+        elif self.status in [DLSTATUS_DOWNLOADING, DLSTATUS_SEEDING]:
+            # get local PEX peers from peerlist and fill swarmcache
+            peerlist = [p for p in self.get_peerlist() if p['direction']=='L' and p.get('pex_received',0)][:REPEX_SWARMCACHE_SIZE]
+            swarmcache = {}
+            for peer in peerlist:
+                dns = (peer['ip'], peer['port'])
+                swarmcache[dns] = {'last_seen':self.time,'pex':[]}
+            # fill remainder with peers from old swarmcache
+            if self.swarmcache is not None:
+                for dns in self.swarmcache.keys()[:REPEX_SWARMCACHE_SIZE-len(swarmcache)]:
+                    swarmcache[dns] = self.swarmcache[dns]
+            
+            # TODO: move peerlist sampling to a different module?
+            # TODO: perform swarmcache computation only once?
+        elif self.swarmcache is not None:
+            # In all other cases, use the old swarmcache
+            swarmcache = self.swarmcache
+            # TODO: rearrange if statement to merge 1st and 3rd case?
+            
+        return swarmcache
+        
diff --git a/instrumentation/next-share/BaseLib/Core/LiveSourceAuthConfig.py b/instrumentation/next-share/BaseLib/Core/LiveSourceAuthConfig.py
new file mode 100644 (file)
index 0000000..161cd17
--- /dev/null
@@ -0,0 +1,125 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+from BaseLib.Core.simpledefs import *
+import BaseLib.Core.Overlay.permid as permidmod 
+from BaseLib.Core.Utilities.Crypto import RSA_keypair_to_pub_key_in_der 
+from M2Crypto import RSA
+
+
+class LiveSourceAuthConfig:
+    """ Base class for configuring authentication methods for data from the
+    source in live streaming.
+    """
+    def __init__(self,authmethod):
+        self.authmethod = authmethod
+        
+    def get_method(self):
+        return self.authmethod
+    
+    
+class ECDSALiveSourceAuthConfig(LiveSourceAuthConfig):
+    """ Class for configuring the ECDSA authentication method for data from the
+    source in live streaming. The ECDSA method adds a ECDSA signature to each
+    piece that is generated.
+    """
+    def __init__(self,keypair=None):
+        """ Constructor for LIVE_AUTHMETHOD_ECDSA authentication of the 
+        live source. If no keypair is specified, one is generated.
+        
+        @param keypair  (Optional) An M2Crypto.EC keypair.
+        """
+        LiveSourceAuthConfig.__init__(self,LIVE_AUTHMETHOD_ECDSA)
+        if keypair is None:
+            self.keypair = permidmod.generate_keypair()
+        else:
+            self.keypair = keypair
+
+    def get_pubkey(self):
+        return str(self.keypair.pub().get_der())
+    
+    def get_keypair(self):
+        return self.keypair
+    
+    #
+    # Class method
+    #
+    def load(filename):
+        """
+        Load a saved ECDSALiveSourceAuthConfig from disk.
+        
+        @param filename  An absolute Unicode filename
+        @return ECDSALiveSourceAuthConfig object
+        """
+        keypair = permidmod.read_keypair(filename)
+        return ECDSALiveSourceAuthConfig(keypair)
+    load = staticmethod(load)
+
+    def save(self,filename):
+        """ Save the ECDSALiveSourceAuthConfig to disk.
+        @param filename  An absolute Unicode filename
+        """
+        permidmod.save_keypair(self.keypair,filename)
+    
+    
+class RSALiveSourceAuthConfig(LiveSourceAuthConfig):
+    """ Class for configuring the RSA authentication method for data from the
+    source in live streaming. The RSA method adds a RSA signature to each
+    piece that is generated.
+    """
+    def __init__(self,keypair=None):
+        """ Constructor for LIVE_AUTHMETHOD_RSA authentication of the 
+        live source. If no keypair is specified, one is generated.
+        
+        @param keypair  (Optional) An M2Crypto.RSA keypair.
+        """
+        LiveSourceAuthConfig.__init__(self,LIVE_AUTHMETHOD_RSA)
+        if keypair is None:
+            self.keypair = rsa_generate_keypair()
+        else:
+            self.keypair = keypair
+
+    def get_pubkey(self):
+        return RSA_keypair_to_pub_key_in_der(self.keypair)
+    
+    def get_keypair(self):
+        return self.keypair
+    
+    #
+    # Class method
+    #
+    def load(filename):
+        """
+        Load a saved RSALiveSourceAuthConfig from disk.
+        
+        @param filename  An absolute Unicode filename
+        @return RSALiveSourceAuthConfig object
+        """
+        keypair = rsa_read_keypair(filename)
+        return RSALiveSourceAuthConfig(keypair)
+    load = staticmethod(load)
+
+    def save(self,filename):
+        """ Save the RSALiveSourceAuthConfig to disk.
+        @param filename  An absolute Unicode filename
+        """
+        rsa_write_keypair(self.keypair,filename)
+    
+    
+    
+def rsa_generate_keypair():
+    """ Create keypair using default params, use __init__(keypair) parameter
+    if you want to use custom params.
+    """
+    # Choose fast exponent e. See Handbook of applied cryptography $8.2.2(ii)
+    # And small keysize, attackers have duration of broadcast to reverse 
+    # engineer key. 
+    e = 3
+    keysize = 768
+    return RSA.gen_key(keysize,e)
+    
+def rsa_read_keypair(filename):
+    return RSA.load_key(filename)
+
+def rsa_write_keypair(keypair,filename):
+    keypair.save_key(filename,cipher=None)
diff --git a/instrumentation/next-share/BaseLib/Core/Merkle/__init__.py b/instrumentation/next-share/BaseLib/Core/Merkle/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/Merkle/merkle.py b/instrumentation/next-share/BaseLib/Core/Merkle/merkle.py
new file mode 100644 (file)
index 0000000..167d6bc
--- /dev/null
@@ -0,0 +1,272 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+""" 
+Reference Implementation of Merkle hash torrent extension, as now 
+standardized in http://www.bittorrent.org/beps/bep_0030.html (yay!)
+"""
+
+from math import log,pow,floor
+from BaseLib.Core.Utilities.Crypto import sha
+import sys
+
+DEBUG = False
+
+# External classes
+
+class MerkleTree:
+    
+    def __init__(self,piece_size,total_length,root_hash=None,hashes=None):
+        """
+        Create a Merkle hash tree
+
+        When creating a .torrent:
+            root_hash is None and hashes is not None
+        When creating an initial seeder:
+            root_hash is None and hashes is not None
+            (root_hash is None to allow comparison with the calculated
+             root hash and the one in the .torrent)
+        When creating a downloader:
+            root_hash is not None and hashes is None
+        """
+        self.npieces = len2npieces(piece_size,total_length)
+        self.treeheight = get_tree_height(self.npieces)
+        self.tree = create_tree(self.treeheight)
+        if hashes is None:
+            self.root_hash = root_hash
+        else:
+            fill_tree(self.tree,self.treeheight,self.npieces,hashes)
+            # root_hash is None during .torrent generation
+            if root_hash is None:
+                self.root_hash = self.tree[0]
+            else:
+                raise AssertionError, "merkle: if hashes not None, root_hash must be"
+
+    def get_root_hash(self):
+        return self.root_hash
+
+    def compare_root_hashes(self,other):
+        return self.root_hash == other
+
+    def get_hashes_for_piece(self,index):
+        return get_hashes_for_piece(self.tree,self.treeheight,index)
+
+    def check_hashes(self,hashlist):
+        return check_tree_path(self.root_hash,self.treeheight,hashlist)
+
+    def update_hash_admin(self,hashlist,piece_hashes):
+        update_hash_admin(hashlist,self.tree,self.treeheight,piece_hashes)
+
+    def get_piece_hashes(self):
+        """
+        Get the pieces' hashes from the bottom of the hash tree. Used during
+        a graceful restart of a client that already downloaded stuff.
+        """
+        return get_piece_hashes(self.tree,self.treeheight,self.npieces)
+
+def create_fake_hashes(info):
+    total_length = calc_total_length(info)
+    npieces = len2npieces(info['piece length'],total_length)
+    return ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * npieces
+
+
+# Internal functions
+# Design choice: all algoritmics have been returned into stateless functions,
+# i.e. they operate on the input parameters only. This to keep them extremely
+# clear.
+
+def len2npieces(piece_size,total_length):
+    npieces = total_length / piece_size
+    if piece_size*npieces < total_length:
+        npieces += 1
+    return npieces
+
+
+def calc_total_length(info):
+    # Merkle: Calculate total length from .torrent info
+    if info.has_key('length'):
+        return info['length']
+    # multi-file torrent
+    files = info['files']
+    total_length = 0
+    for i in range(0,len(files)):
+        total_length += files[i]['length']
+    return total_length
+
+
+def get_tree_height(npieces):
+    if DEBUG:
+        print >> sys.stderr,"merkle: number of pieces is",npieces
+    height = log(npieces,2)
+    if height - floor(height) > 0.0:
+        height = int(height)+1
+    else:
+        height = int(height)
+    if DEBUG:
+        print >> sys.stderr,"merkle: tree height is",height
+    return height
+
+def create_tree(height):
+    # Create tree that has enough leaves to hold all hashes
+    treesize = int(pow(2,height+1)-1) # subtract unused tail
+    if DEBUG:
+        print >> sys.stderr,"merkle: treesize",treesize
+    tree = ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * treesize
+    return tree
+
+def fill_tree(tree,height,npieces,hashes):
+    # 1. Fill bottom of tree with hashes
+    startoffset = int(pow(2,height)-1)
+    if DEBUG:
+        print >> sys.stderr,"merkle: bottom of tree starts at",startoffset
+    for offset in range(startoffset,startoffset+npieces):
+        #print >> sys.stderr,"merkle: copying",offset
+        #print >> sys.stderr,"merkle: hashes[",offset-startoffset,"]=",str(hashes[offset-startoffset])
+        tree[offset] = hashes[offset-startoffset]
+    # 2. Note that unused leaves are NOT filled. It may be a good idea to fill
+    # them as hashing 0 values may create a security problem. However, the
+    # filler values would have to be known to any initial seeder, otherwise it 
+    # will not be able build the same hash tree as the other initial seeders.
+    # Assume anyone should be able to autonomously become a seeder, the filler 
+    # must be public info. I don't know whether having public info as filler 
+    # instead of 0s is any safer, cryptographically speaking. Hence, we stick 
+    # with 0 for the moment
+
+    # 3. Calculate higher level hashes from leaves
+    for level in range(height,0,-1):
+        if DEBUG:
+            print >> sys.stderr,"merkle: calculating level",level
+        for offset in range(int(pow(2,level)-1),int(pow(2,level+1)-2),2):
+            #print >> sys.stderr,"merkle: data offset",offset
+            [ parentstartoffset, parentoffset ] = get_parent_offset(offset,level)
+            #print >> sys.stderr,"merkle: parent offset",parentoffset                
+            data = tree[offset]+tree[offset+1]
+            digester = sha()
+            digester.update(data)
+            digest = digester.digest()
+            tree[parentoffset] = digest
+    #for offset in range(0,treesize-1):
+    #        print offset,"HASH",str(tree[offset])
+    return tree
+
+
+def get_hashes_for_piece(tree,height,index):
+    startoffset = int(pow(2,height)-1)
+    myoffset = startoffset+index
+    if DEBUG:
+        print >> sys.stderr,"merkle: myoffset",myoffset
+    # 1. Add piece's own hash
+    hashlist = [ [myoffset,tree[myoffset]] ]
+    # 2. Add hash of piece's sibling, left or right
+    if myoffset % 2 == 0:
+        siblingoffset = myoffset-1
+    else:
+        siblingoffset = myoffset+1
+    if DEBUG:
+        print >> sys.stderr,"merkle: siblingoffset",siblingoffset
+    if siblingoffset != -1:
+        hashlist.append([siblingoffset,tree[siblingoffset]])
+    # 3. Add hashes of uncles
+    uncleoffset = myoffset
+    for level in range(height,0,-1):
+        uncleoffset = get_uncle_offset(uncleoffset,level)
+        if DEBUG:
+            print >> sys.stderr,"merkle: uncleoffset",uncleoffset
+        hashlist.append( [uncleoffset,tree[uncleoffset]] )
+    return hashlist
+
+
+def check_tree_path(root_hash,height,hashlist):
+    """
+    The hashes should be in the right order in the hashlist, otherwise
+    the peer will be kicked. The hashlist parameter is assumed to be
+    of the right type, and contain values of the right type as well.
+    The exact values should be checked for validity here.
+    """
+    maxoffset = int(pow(2,height+1)-2)
+    mystartoffset = int(pow(2,height)-1)
+    i=0
+    a = hashlist[i]
+    if a[0] < 0 or a[0] > maxoffset:
+        return False
+    i += 1
+    b = hashlist[i]
+    if b[0] < 0 or b[0] > maxoffset:
+        return False
+    i += 1
+    myindex = a[0]-mystartoffset
+    sibindex = b[0]-mystartoffset
+    for level in range(height,0,-1):
+        if DEBUG:
+            print >> sys.stderr,"merkle: checking level",level
+        a = check_fork(a,b,level)
+        b = hashlist[i]
+        if b[0] < 0 or b[0] > maxoffset:
+            return False
+        i += 1
+    if DEBUG:
+        print >> sys.stderr,"merkle: ROOT HASH",`str(root_hash)`,"==",`str(a[1])`
+    if a[1] == root_hash:
+        return True
+    else:
+        return False
+
+def update_hash_admin(hashlist,tree,height,hashes):
+    mystartoffset = int(pow(2,height)-1)
+    for i in range(0,len(hashlist)):
+        if i < 2:
+            # me and sibling real hashes of piece data, save them
+            index = hashlist[i][0]-mystartoffset
+            # ignore siblings that are just tree filler
+            if index < len(hashes):
+                if DEBUG:
+                    print >> sys.stderr,"merkle: update_hash_admin: saving hash of",index
+                hashes[index] = hashlist[i][1]
+        # put all hashes in tree, such that we incrementally learn it 
+        # and can pass them on to others
+        tree[hashlist[i][0]] = hashlist[i][1]
+
+
+def check_fork(a,b,level):
+    myoffset = a[0]
+    siblingoffset = b[0]
+    if myoffset > siblingoffset:
+        data = b[1]+a[1]
+        if DEBUG:
+            print >> sys.stderr,"merkle: combining",siblingoffset,myoffset
+    else:
+        data = a[1]+b[1]
+        if DEBUG:
+            print >> sys.stderr,"merkle: combining",myoffset,siblingoffset
+    digester = sha()
+    digester.update(data)
+    digest = digester.digest()
+    [parentstartoffset, parentoffset ] = get_parent_offset(myoffset,level-1)
+    return [parentoffset,digest]
+
+def get_parent_offset(myoffset,level):
+    parentstartoffset = int(pow(2,level)-1)
+    mystartoffset = int(pow(2,level+1)-1)
+    parentoffset = parentstartoffset + (myoffset-mystartoffset)/2
+    return [parentstartoffset, parentoffset]
+
+
+def get_uncle_offset(myoffset,level):
+    if level == 1:
+        return 0
+    [parentstartoffset,parentoffset ] = get_parent_offset(myoffset,level-1)
+    if DEBUG:
+        print >> sys.stderr,"merkle: parent offset",parentoffset        
+    parentindex = parentoffset-parentstartoffset
+    if parentoffset % 2 == 0:
+        uncleoffset = parentoffset-1
+    else:
+        uncleoffset = parentoffset+1
+    return uncleoffset
+
+def get_piece_hashes(tree,height,npieces):
+    startoffset = int(pow(2,height)-1)
+    hashes = ['\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ] * npieces
+    for offset in range(startoffset,startoffset+npieces):
+        hashes[offset-startoffset] = tree[offset]
+    return hashes
+
diff --git a/instrumentation/next-share/BaseLib/Core/Multicast/Multicast.py b/instrumentation/next-share/BaseLib/Core/Multicast/Multicast.py
new file mode 100644 (file)
index 0000000..d585ac4
--- /dev/null
@@ -0,0 +1,692 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+import socket
+import threading
+import struct
+import select
+import string
+import sys
+import time
+import random # for ping
+from traceback import print_exc
+
+import base64 # Must encode permid
+
+
+from BaseLib.Core.BuddyCast.buddycast import BuddyCastFactory
+
+
+DEBUG = False
+
+class MyLogger:
+
+    """
+    Dummy logger due to code re-use and no use of logger in Tribler
+    
+    """
+    enabled = DEBUG
+    
+    def debug(self, message):
+        if self.enabled:
+            print >> sys.stderr, "pdisc: DEBUG:", message
+    
+    def info(self, message):
+        if self.enabled:
+            print >> sys.stderr, "pdisc: INFO:", message
+
+    def warning(self, message):
+        if self.enabled:
+            print >> sys.stderr, "pdisc: WARNING:", message
+
+    def error(self, message):
+        if self.enabled:
+            print >> sys.stderr, "pdisc: ERROR:", message
+
+    def fatal(self, message):
+        if self.enabled:
+            print >> sys.stderr, "pdisc: FATAL:", message
+
+    def exception(self, message):
+        if self.enabled:
+            print >> sys.stderr, "pdisc: EXCEPTION:", message
+            import traceback
+            traceback.print_exc()
+            
+class Multicast:
+
+    """
+    This class allows nodes to communicate on a local network
+    using IP multicast
+    
+    """
+
+    def __init__(self, config, overlay_bridge, myport, myselversion, peerdb,
+                 logger=None, capabilities=None):
+        """
+        Initialize the multicast channel.  Parameters:
+          - multicast_ipv4_enabled
+          - multicast_ipv6_enabled
+          - multicast_port
+          - multicast_announce - True if the node should announce itself
+          - permid - The ID of the node
+          - multicast_ipv4_address
+          - multicast_ipv6_address
+          
+        If both ipv4_enabled and ipv6_enabled is false, the channel
+        will not do anything.
+
+        Other parameters:
+        logger - Send logs (debug/info/warning/error/exceptions) to a logger
+        capabilities - Announce a set of capabilities for this node.  Should
+                       be a list
+        
+        """
+        self.myport = myport
+        self.myselversion = myselversion
+        self.overlay_bridge = overlay_bridge
+        self.peer_db = peerdb
+        
+        if logger:
+            self.log = logger
+        else:
+            self.log = MyLogger()
+
+        self.config = config
+        self.capabilities = capabilities
+
+        self.enabled = False
+        self.announceHandlers = []
+        self.on_node_announce = None
+        self.incoming_pongs = {}
+
+        self.interfaces = []
+
+        self.address_family = socket.AF_INET
+        if self.config['multicast_ipv6_enabled']:
+            if not socket.has_ipv6:
+                self.log.warning("Missing IPv6 support")
+            else:
+                self.address_family = socket.AF_INET6
+        
+        self.sock = socket.socket(self.address_family,
+                                  socket.SOCK_DGRAM)
+        
+        self.sock.setsockopt(socket.SOL_SOCKET,
+                             socket.SO_REUSEADDR, 1)
+                
+        for res in socket.getaddrinfo(None,
+                                      self.config['multicast_port'],
+                                      self.address_family,
+                                      socket.SOCK_DGRAM, 0,
+                                      socket.AI_PASSIVE):
+            
+            af, socktype, proto, canonname, sa = res
+                
+            try:
+                self.sock.bind(sa)
+            except:
+                self.log.exception("Error binding")
+
+        try:
+            if self.config['multicast_ipv6_enabled']:
+                self.interfaces = self._joinMulticast(self.config['multicast_ipv6_address'],
+                                                      self.config['multicast_port'],
+                                                      self.sock)
+                self.enabled = True
+        except:
+            self.log.exception("Exception during IPv6 multicast join")
+
+        try:
+            if self.config['multicast_ipv4_enabled']:
+                self._joinMulticast(self.config['multicast_ipv4_address'],
+                                    self.config['multicast_port'],
+                                    self.sock)
+                self.enabled = True
+        except:
+            self.log.exception("Exception during IPv4 multicast join")
+
+        
+    def _getCapabilities(self, elements):
+        """
+        Return a list of capabilities from a list of elements - internal function
+        """
+        capabilities = []
+        for elem in elements:
+            if elem.startswith("c:"):
+                capabilities.append(elem[2:])
+        return capabilities
+
+    def getSocket(self):
+        return self.sock
+
+    def _joinMulticast(self, addr, port, sock):
+        """
+        Join a multicast channel - internal function
+        """
+        import struct
+        
+        for res in socket.getaddrinfo(addr,
+                                      port,
+                                      socket.AF_UNSPEC,
+                                      socket.SOCK_DGRAM):
+            
+            af, socktype, proto, canonname, sa = res
+            
+            break
+
+        if af == socket.AF_INET6:
+            # Smurf, must manually reconstruct "::"???
+            # Count the number of colons in the address
+            num_colons = addr.count(":")
+            
+            new_colons = ":"
+            
+            # Replace double colon with the appropriate number (7)
+            for i in range(num_colons, 8):
+                new_colons = "%s0:" % new_colons
+                
+            addr = addr.replace("::", new_colons)
+                
+            addr_pack = ''
+        
+            for l in addr.split(":"):
+                word = int(l,16)
+                addr_pack = addr_pack + struct.pack('!H', word)
+
+            # Now we try to join the first 32 interfaces
+            # Not too nice, but it is absolutely portable :-)
+            interfaces = []
+            for i in range (1, 32):
+                try:
+                    mreq = addr_pack + struct.pack('l', i)
+                
+                    # We're ready, at last
+                    sock.setsockopt(socket.IPPROTO_IPV6,
+                                    socket.IPV6_JOIN_GROUP,
+                                    mreq)
+                    ok = True
+                    self.log.debug("Joined IPv6 multicast on interface %d"%i)
+
+                    # We return the interface indexes that worked
+                    interfaces.append(i)
+                except Exception,e:
+                    pass
+
+            if len(interfaces) == 0:
+                self.log.fatal("Could not join on any interface")
+                raise Exception("Could not join multicast on any interface")
+
+            return interfaces
+
+        if af == socket.AF_INET:
+            
+            addr_pack = ''
+            grpaddr = 0
+            bytes = map(int, string.split(addr, "."))
+            for byte in bytes:
+                grpaddr = (grpaddr << 8) | byte
+                
+            # Construct struct mreq from grpaddr and ifaddr
+            ifaddr = socket.INADDR_ANY
+            mreq = struct.pack('ll',
+                               socket.htonl(grpaddr),
+                               socket.htonl(ifaddr))
+            
+            # Add group membership
+            try:
+                self.sock.setsockopt(socket.IPPROTO_IP,
+                                     socket.IP_ADD_MEMBERSHIP,
+                                     mreq)
+            except Exception,e:
+                self.log.exception("Exception joining IPv4 multicast")
+                
+            return []
+
+
+    def data_came_in(self, addr, data):
+        """
+        Callback function for arriving data.  This is non-blocking
+        and will return immediately after queuing the operation for
+        later processing. Called by NetworkThread
+        """
+        # Must queue this for actual processing, we're not allowed
+        # to block here
+        process_data_func = lambda:self._data_came_in_callback(addr, data)
+        self.overlay_bridge.add_task(process_data_func, 0)
+        
+        
+    def _data_came_in_callback(self, addr, data):
+        """
+        Handler function for when data arrives
+        """
+        
+        self.log.debug("Got a message from %s"%str(addr))
+        # Look at message
+        try:
+            elements = data.split("\n")
+
+            if elements[0] == "NODE_DISCOVER":
+                if len(elements) < 3:
+                    raise Exception("Too few elements")
+
+                # Only reply if I'm announcing
+                if not self.config["multicast_announce"]:
+                    self.log.debug("Not announcing myself")
+                    return
+
+                remotePermID = elements[2]
+                self.log.debug("Got node discovery from %s"%remotePermID)
+                # TODO: Do we reply to any node?
+
+                # Reply with information about me
+                permid_64 = base64.b64encode(self.config['permid']).replace("\n","")
+                msg = "NODE_ANNOUNCE\n%s"%permid_64
+
+                # Add capabilities
+                if self.capabilities:
+                    for capability in self.capabilities:
+                        msg += "\nc:%s"%capability
+                try:
+                    self.sock.sendto(msg, addr)
+                except Exception,e:
+                    self.log.error("Could not send announce message to %s: %s"%(str(addr), e))
+                    return
+                
+            elif elements[0] == "ANNOUNCE":
+                self.handleAnnounce(addr, elements)
+            elif elements[0] == "NODE_ANNOUNCE":
+                # Some node announced itself - handle callbacks if
+                # the app wants it
+                if self.on_node_announce:
+                    try:
+                        self.on_node_announce(elements[1], addr,
+                                              self._getCapabilities(elements))
+                    except Exception,e:
+                        self.log.exception("Exception handling node announce")
+            elif elements[0] == "PING":
+                permid = base64.b64decode(elements[1])
+                if permid == self.config["permid"]:
+                    # I should reply
+                    msg = "PONG\n%s\n%s"%(elements[1], elements[2])
+                    self._sendMulticast(msg)
+            elif elements[0] == "PONG":
+                nonce = int(elements[2])
+                if self.outstanding_pings.has_key(nonce):
+                    self.incoming_pongs[nonce] = time.time()
+            else:
+                self.log.warning("Got bad discovery message from %s"%str(addr))
+        except Exception,e:
+            self.log.exception("Illegal message '%s' from '%s'"%(data, addr[0]))
+
+               
+    def _send(self, addr, msg):
+        """
+        Send a message - internal function
+        """
+        
+        for res in socket.getaddrinfo(addr, self.config['multicast_port'],
+                                      socket.AF_UNSPEC,
+                                      socket.SOCK_DGRAM):
+            
+            af, socktype, proto, canonname, sa = res
+        try:
+            sock =  socket.socket(af, socktype)
+            sock.sendto(msg, sa)
+        except Exception,e:
+            self.log.warning("Error sending '%s...' to %s: %s"%(msg[:8], str(sa), e))
+
+        return sock
+
+    def discoverNodes(self, timeout=3.0, requiredCapabilities=None):
+        """
+        Try to find nodes on the local network and return them in a list
+        of touples on the form
+        (permid, addr, capabilities)
+
+        Capabilities can be an empty list
+
+        if requiredCapabilities is specified, only nodes matching one
+        or more of these will be returned
+        
+        """
+
+        # Create NODE_DISCOVER message
+        msg = "NODE_DISCOVER\nTr_OVERLAYSWARM node\npermid:%s"%\
+              base64.b64encode(self.config['permid']).replace("\n","")
+
+        # First send the discovery message
+        addrList = []
+        sockList = []
+        if self.config['multicast_ipv4_enabled']:
+            sockList.append(self._send(self.config['multicast_ipv4_address'], msg))
+            
+        if self.config['multicast_ipv6_enabled']:
+            for iface in self.interfaces:
+                sockList.append(self._send("%s%%%s"%(self.config['multicast_ipv6_address'], iface), msg))
+            
+        nodeList = []
+        endAt = time.time() + timeout
+        while time.time() < endAt:
+
+            # Wait for answers (these are unicast)
+            SelectList = sockList[:]
+
+            (InList, OutList, ErrList) = select.select(SelectList, [], [], 1.0)
+
+            if len(ErrList) < 0:
+                self.log.warning("Select gives error...")
+
+            while len(InList) > 0:
+
+                sock2 = InList.pop(0)
+
+                try:
+                    (data, addr) = sock2.recvfrom(1450)
+                except socket.error, e:
+                    self.log.warning("Exception receiving: %s"%e)
+                    continue
+                except Exception,e:
+                    print_exc()
+                    self.log.warning("Unknown exception receiving")
+                    continue
+
+                try:
+                    elements = data.split("\n")
+                    if len(elements) < 2:
+                        self.log.warning("Bad message from %s: %s"%(addr, data))
+                        continue
+
+                    if elements[0] != "NODE_ANNOUNCE":
+                        self.log.warning("Unknown message from %s: %s"%(addr, data))
+                        continue
+
+                    permid = base64.b64decode(elements[1])
+                    self.log.info("Discovered node %s at (%s)"%(permid, str(addr)))
+                    capabilities = self._getCapabilities(elements)
+                    if requiredCapabilities:
+                        ok = False
+                        for rc in requiredCapabilities:
+                            if rc in capabilities:
+                                ok = True
+                                break
+                        if not ok:
+                            continue
+                    nodeList.append((permid, addr, capabilities))
+                except Exception,e:
+                    self.log.warning("Could not understand message: %s"%e)
+                        
+        return nodeList
+
+    def sendNodeAnnounce(self):
+
+        """
+        Send a node announcement message on multicast
+
+        """
+
+        msg = "NODE_ANNOUNCE\n%s"%\
+              base64.b64encode(self.config['permid']).replace("\n","")
+
+        if self.capabilities:
+            for capability in self.capabilities:
+                msg += "\nc:%s"%capability
+        try:
+            self._sendMulticast(msg)
+        except:
+            self.log.error("Could not send announce message")
+
+
+    def setNodeAnnounceHandler(self, handler):
+
+        """
+        Add a handler function for multicast node announce messages
+
+        Will get a parameters (permid, address, capabilities)
+        
+        """
+        self.on_node_announce = handler
+        
+    def addAnnounceHandler(self, handler):
+
+        """
+        Add an announcement handler for announcement messages (not
+
+        node discovery)
+
+        The callback function will get parameters:
+           (permid, remote_address, parameter_list)
+
+        """
+        self.announceHandlers.append(handler)
+
+    def removeAnnouncehandler(self, handler):
+
+        """
+        Remove an announce handler (if present)
+        
+        """
+        try:
+            self.announceHandlers.remove(handler)
+        except:
+            #handler not in list, ignore
+            pass
+        
+    def handleAnnounce(self, addr, elements):
+
+        """
+        Process an announcement and call any callback handlers
+        
+        """
+
+        if elements[0] != "ANNOUNCE":
+            raise Exception("Announce handler called on non-announce: %s"%\
+                            elements[0])
+
+        # Announce should be in the form:
+        # ANNOUNCE
+        # base64 encoded permid
+        # numElements
+        # element1
+        # element2
+        # ...
+        if len(elements) < 3:
+            raise Exception("Bad announce, too few elements in message")
+
+        try:
+            permid = base64.b64decode(elements[1])
+            numElements = int(elements[2])
+        except:
+            raise Exception("Bad announce message")
+
+        if len(elements) < 3 + numElements:
+            raise Exception("Incomplete announce message")
+        
+        _list = elements[3:3+numElements]
+        
+        # Loop over list to longs if numbers
+        list = []
+        for elem in _list:
+            if elem.isdigit():
+                list.append(long(elem))
+            else:
+                list.append(elem)
+
+        if len(self.announceHandlers) == 0:
+            self.log.warning("Got node-announce, but I'm missing announce handlers")
+            
+        # Handle the message
+        for handler in self.announceHandlers:
+            try:
+                self.log.debug("Calling callback handler")
+                handler(permid, addr, list)
+            except:
+                self.log.exception("Could not activate announce handler callback '%s'"%handler)
+        
+
+    def handleOVERLAYSWARMAnnounce(self, permid, addr, params):
+        """ Callback function to handle multicast node announcements
+
+        This one will trigger an overlay connection and then initiate a buddycast
+        exchange
+        """
+        # todo: when the port or selversion change this will NOT be
+        # updated in the database. Solution: change the whole
+        # flag_peer_as_local_to_db into check_and_update_peer_in_db
+        # and let it check for the existance and current value of
+        # is_local, port, and selversion. (at no additional queries I
+        # might add)
+
+        self.log.debug("Got Tr_OVERLAYSWARM announce!")
+        port, selversion = params
+
+        if permid == self.config["permid"]:
+            self.log.debug("Discovered myself")
+            # Discovered myself, which is not interesting
+            return
+
+        if self.flag_peer_as_local_to_db(permid, True):
+            self.log.debug("node flagged as local")
+            # Updated ok
+            return
+
+        # We could not update - this is a new node!
+        try:
+            try:
+                self.log.debug("Adding peer at %s to database"%addr[0])
+                self.add_peer_to_db(permid, (addr[0], port), selversion)
+            except Exception,e:
+                print >> sys.stderr, "pdisc: Could not add node:",e
+
+            try:
+                self.flag_peer_as_local_to_db(permid, True)
+                self.log.debug("node flagged as local")
+            except Exception,e:
+                print >> sys.stderr, "pdisc: Could not flag node as local:",e
+
+            # Now trigger a buddycast exchange
+            bc_core = BuddyCastFactory.getInstance().buddycast_core
+            if bc_core:
+                self.log.debug("Triggering buddycast")
+                bc_core.startBuddyCast(permid)
+        finally:
+                # Also announce myself so that the remote node can see me!
+                params = [self.myport, self.myselversion]
+                self.log.debug("Sending announce myself")
+                try:
+                    self.sendAnnounce(params)
+                except:
+                    self.log.exception("Sending announcement")
+        
+    def sendAnnounce(self, list):
+
+        """
+        Send an announce on local multicast, if enabled
+        
+        """
+
+        if not self.enabled:
+            return
+
+        # Create ANNOUNCE message
+        msg = "ANNOUNCE\n%s\n%d\n"%\
+              (base64.b64encode(self.config['permid']).replace("\n",""), len(list))
+
+        for elem in list:
+            msg += "%s\n"%elem
+
+        self._sendMulticast(msg)
+
+    def _sendMulticast(self, msg):
+
+        """
+        Send a message buffer on the multicast channels
+        
+        """
+        
+        if self.config['multicast_ipv4_enabled']:
+            self._send(self.config['multicast_ipv4_address'], msg)
+        if self.config['multicast_ipv6_enabled']:
+            for iface in self.interfaces:
+                self._send("%s%%%s"%(self.config['multicast_ipv6_address'], iface), msg)
+
+        
+
+    def ping(self, permid, numPings=3):
+        """
+        Ping a node and return (avg time, min, max) or (None, None, None) if no answer
+        Only one node can be pinged at the time - else this function will not work!
+        """
+
+        self.outstanding_pings = {}
+        self.incoming_pongs = {}
+        
+        # Send a PING via multicast and wait for a multicast response.
+        # Using multicast for both just in case it is different from
+        # unicast
+
+        for i in range(0, numPings):
+            nonce = random.randint(0, 2147483647)
+            msg = "PING\n%s\n%s"%(base64.b64encode(permid).replace("\n",""), nonce)
+            self.outstanding_pings[nonce] = time.time()
+            self._sendMulticast(msg)
+            time.sleep(0.250)
+            
+        # Now we gather the results
+        time.sleep(0.5)
+
+        if len(self.incoming_pongs) == 0:
+            return (None, None, None)
+        
+        max = 0
+        min = 2147483647
+        total = 0
+        num = 0
+        for nonce in self.outstanding_pings.keys():
+            if self.incoming_pongs.has_key(nonce):
+                diff = self.incoming_pongs[nonce] - self.outstanding_pings[nonce]
+                if diff > max:
+                    max = diff
+                if diff < min:
+                    min = diff
+                total += diff
+                num += 1
+
+        avg = total/num
+
+        self.outstanding_pings = {}
+        self.incoming_pongs = {}
+        return (avg, min, max)
+
+    def add_peer_to_db(self,permid,dns,selversion):    
+        # todo: should is_local be set to True?
+        now = int(time.time())
+        peer_data = {'permid':permid, 'ip':dns[0], 'port':dns[1], 'oversion':selversion, 'last_seen':now, 'last_connected':now}
+        self.peer_db.addPeer(permid, peer_data, update_dns=True, update_connected=True, commit=True)
+
+    def flag_peer_as_local_to_db(self, permid, is_local):
+        """
+        Sets the is_local flag for PERMID to IS_LOCAL if and only if
+        PERMID exists in the database, in this case it returns
+        True. Otherwise it returns False.
+        """
+        peer = self.peer_db.getPeer(permid, ('is_local',))
+        
+        print >>sys.stderr,"pdisc: flag_peer_as_local returns",peer
+        
+        if not peer is None:
+            # Arno, 2010-02-09: Somehow return value is not std.
+            if isinstance(peer,list):
+                flag = peer[0]
+            else:
+                flag = peer
+            if not flag == is_local:
+                self.peer_db.setPeerLocalFlag(permid, is_local)
+            return True
+        return False
+            
+        # if is_local:
+        #     pass
+            ##print >>sys.stderr,"pdisc: Flagging a peer as local"
+        # return self.peer_db.setPeerLocalFlag(permid, is_local)
+
diff --git a/instrumentation/next-share/BaseLib/Core/Multicast/__init__.py b/instrumentation/next-share/BaseLib/Core/Multicast/__init__.py
new file mode 100644 (file)
index 0000000..9c0a205
--- /dev/null
@@ -0,0 +1,35 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+"""
+A local multicast discovery and communication
+
+Simple usage example:
+
+For config, please view the Multicast documentation.
+
+channel = multicast.Multicast(config)
+for (id, address, capabilities) in channel.discoverNodes():
+   print "Found node",id,"at",address,"with capabilities:",capabilities
+
+# Sending and handling announcements:
+def on_announce(id, addr, list):
+    print 'Got an announcement from node",id,"at",addr,":",list
+    
+channel = multicast.Multicast(config)
+channel.addAnnounceHandler(on_announce)
+channel.sendAnnounce(['element1', 'element2', 'element3'])
+
+# Handle multicast node announcements directly, with capabilities too
+def on_node_announce(addr, id, capabilities):
+    print "Got a node announcement from",id,"at",addr,"with capabilities:",capabilities
+
+myCapabilities = ["CAN_PRINT", "CAN_FAIL"]
+channel = multicast.Multicast(config, capabilities=myCapabilities)
+channel.setNodeAnnounceHandler(on_node_announce)
+
+For more examples, take a look at the unit tests (MulticastTest)
+
+"""
+
+from Multicast import *
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/ConnectionCheck.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/ConnectionCheck.py
new file mode 100644 (file)
index 0000000..5c78a33
--- /dev/null
@@ -0,0 +1,153 @@
+import sys
+from time import sleep
+import thread
+import random
+from BaseLib.Core.NATFirewall.NatCheck import GetNATType
+from BaseLib.Core.NATFirewall.TimeoutCheck import GetTimeout
+
+DEBUG = False
+
+class ConnectionCheck:
+
+    __single = None
+
+    def __init__(self, session):
+        if ConnectionCheck.__single:
+            raise RuntimeError, "ConnectionCheck is singleton"
+        ConnectionCheck.__single = self
+        self._lock = thread.allocate_lock()
+        self._running = False
+        self.session = session
+        self.permid = self.session.get_permid()
+        self.nat_type = None
+        self.nat_timeout = 0
+        self._nat_callbacks = [] # list with callback functions that want to know the nat_type
+        self.natcheck_reply_callbacks = [] # list with callback functions that want to send a natcheck_reply message
+
+    @staticmethod
+    def getInstance(*args, **kw):
+        if ConnectionCheck.__single is None:
+            ConnectionCheck(*args, **kw)
+        return ConnectionCheck.__single
+
+    def try_start(self, reply_callback = None):
+
+        if reply_callback: self.natcheck_reply_callbacks.append(reply_callback)
+
+        if DEBUG:
+            if self._running:
+                print >>sys.stderr, "natcheckmsghandler: the thread is already running"
+            else:
+                print >>sys.stderr, "natcheckmsghandler: starting the thread"
+            
+        if not self._running:
+            thread.start_new_thread(self.run, ())
+
+            while True:
+                sleep(0)
+                if self._running:
+                    break
+
+    def run(self):
+        self._lock.acquire()
+        self._running = True
+        self._lock.release()
+
+        try:
+            self.nat_discovery()
+
+        finally:
+            self._lock.acquire()
+            self._running = False
+            self._lock.release()
+
+    def timeout_check(self, pingback):
+        """
+        Find out NAT timeout
+        """
+        return GetTimeout(pingback)
+
+    def natcheck(self, in_port, server1, server2):
+        """
+        Find out NAT type and public address and port
+        """        
+        nat_type, ex_ip, ex_port, in_ip = GetNATType(in_port, server1, server2)
+        if DEBUG: print >> sys.stderr, "NATCheck:", "NAT Type: " + nat_type[1]
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Public Address: " + ex_ip + ":" + str(ex_port)
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Private Address: " + in_ip + ":" + str(in_port)
+        return nat_type, ex_ip, ex_port, in_ip
+
+    def get_nat_type(self, callback=None):
+        """
+        When a callback parameter is supplied it will always be
+        called. When the NAT-type is already known the callback will
+        be made instantly. Otherwise, the callback will be made when
+        the NAT discovery has finished.
+        """
+        if self.nat_type:
+            if callback:
+                callback(self.nat_type)
+            return self.nat_type
+        else:
+            if callback:
+                self._nat_callbacks.append(callback)
+            self.try_start()
+            return "Unknown NAT/Firewall"
+
+    def _perform_nat_type_notification(self):
+        nat_type = self.get_nat_type()
+        callbacks = self._nat_callbacks
+        self._nat_callbacks = []
+
+        for callback in callbacks:
+            try:
+                callback(nat_type)
+            except:
+                pass
+
+    def nat_discovery(self):
+        """
+        Main method of the class: launches nat discovery algorithm
+        """
+        in_port = self.session.get_puncturing_internal_port()
+        stun_servers = self.session.get_stun_servers()
+        random.seed()
+        random.shuffle(stun_servers)
+        stun1 = stun_servers[1]
+        stun2 = stun_servers[0]
+        pingback_servers = self.session.get_pingback_servers()
+        random.shuffle(pingback_servers)
+
+        if DEBUG: print >> sys.stderr, "NATCheck:", 'Starting ConnectionCheck on %s %s %s' % (in_port, stun1, stun2)
+
+        performed_nat_type_notification = False
+
+        # Check what kind of NAT the peer is behind
+        nat_type, ex_ip, ex_port, in_ip = self.natcheck(in_port, stun1, stun2)
+        self.nat_type = nat_type[1]
+
+        # notify any callbacks interested in the nat_type only
+        self._perform_nat_type_notification()
+        performed_nat_type_notification = True
+
+
+        # If there is any callback interested, check the UDP timeout of the NAT the peer is behind
+        if len(self.natcheck_reply_callbacks):
+
+            if nat_type[0] > 0:
+                for pingback in pingback_servers:
+                    if DEBUG: print >> sys.stderr, "NatCheck: pingback is:", pingback
+                    self.nat_timeout = self.timeout_check(pingback)
+                    if self.nat_timeout <= 0: break
+                if DEBUG: print >> sys.stderr, "NATCheck: Nat UDP timeout is: ", str(self.nat_timeout)
+
+            self.nat_params = [nat_type[1], nat_type[0], self.nat_timeout, ex_ip, int(ex_port), in_ip, in_port]
+            if DEBUG: print >> sys.stderr, "NATCheck:", str(self.nat_params)
+
+            # notify any callbacks interested in sending a natcheck_reply message
+            for reply_callback in self.natcheck_reply_callbacks:
+                reply_callback(self.nat_params)
+            self.natcheck_reply_callbacks = []
+
+        if not performed_nat_type_notification:
+            self._perform_nat_type_notification()
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/DialbackMsgHandler.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/DialbackMsgHandler.py
new file mode 100644 (file)
index 0000000..69b677d
--- /dev/null
@@ -0,0 +1,467 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# The dialback-message extension serves to (1)~see if we're externally reachable
+# and (2)~to tell us what our external IP adress is. When an overlay connection
+# is made when we're in dialback mode, we will send a DIALBACK_REQUEST message
+# over the overlay connection. The peer is then support to initiate a new
+# BT connection with infohash 0x00 0x00 ... 0x01 and send a DIALBACK_REPLY over
+# that connection. Those connections are referred to as ReturnConnections
+#
+# TODO: security problem: if malicious peer connects 7 times to us and tells
+# 7 times the same bad external iP, we believe him. Sol: only use locally 
+# initiated conns + IP address check (BC2 message could be used to attack
+# still)
+#
+# TODO: Arno,2007-09-18: Bittorrent mainline tracker e.g. 
+# http://tracker.publish.bittorrent.com:6969/announce
+# now also returns your IP address in the reply, i.e. there is a
+# {'external ip': '\x82%\xc1@'}
+# in the dict. We should use this info.
+#
+
+import sys
+from time import time
+from random import shuffle
+from traceback import print_exc,print_stack
+from threading import currentThread
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+
+from BaseLib.Core.NATFirewall.ReturnConnHandler import ReturnConnHandler
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_THIRD
+from BaseLib.Core.Utilities.utilities import *
+from BaseLib.Core.simpledefs import *
+
+DEBUG = False
+
+#
+# Constants
+# 
+
+REPLY_WAIT = 60 # seconds
+REPLY_VALIDITY = 2*24*3600.0    # seconds
+
+# Normally, one would allow just one majority to possibly exists. However,
+# as current Buddycast has a lot of stale peer addresses, let's make
+# PEERS_TO_ASK not 5 but 7.
+#
+PEERS_TO_AGREE = 4   # peers have to say X is my IP before I believe them
+YOURIP_PEERS_TO_AGREE = 16 # peers have to say X is my IP via 'yourip' in EXTEND hs before I believe them
+PEERS_TO_ASK   = 7   # maximum number of outstanding requests 
+MAX_TRIES      = 35  # 5 times 7 peers
+
+class DialbackMsgHandler:
+    
+    __single = None
+    
+    def __init__(self):
+        if DialbackMsgHandler.__single:
+            raise RuntimeError, "DialbackMsgHandler is singleton"
+        DialbackMsgHandler.__single = self
+
+        self.peers_asked = {}
+        self.myips = []
+        self.consensusip = None # IP address according to peers
+        self.fromsuperpeer = False
+        self.dbreach = False    # Did I get any DIALBACK_REPLY?
+        self.btenginereach = False # Did BT engine get incoming connections?
+        self.ntries = 0        
+        self.active = False     # Need defaults for test code
+        self.rawserver = None
+        self.launchmany = None
+        self.peer_db = None
+        self.superpeer_db = None
+        self.trust_superpeers = None
+        self.old_ext_ip = None
+        self.myips_according_to_yourip = []
+        self.returnconnhand = ReturnConnHandler.getInstance()
+        
+
+    def getInstance(*args, **kw):
+        if DialbackMsgHandler.__single is None:
+            DialbackMsgHandler(*args, **kw)
+        return DialbackMsgHandler.__single
+    getInstance = staticmethod(getInstance)
+        
+    def register(self,overlay_bridge,launchmany,rawserver,config):
+        """ Called by MainThread """
+        self.overlay_bridge = overlay_bridge
+        self.rawserver = rawserver
+        self.launchmany = launchmany
+        self.peer_db = launchmany.peer_db 
+        self.superpeer_db = launchmany.superpeer_db 
+        self.active = config['dialback_active'],
+        self.trust_superpeers = config['dialback_trust_superpeers']
+        self.returnconnhand.register(self.rawserver,launchmany.multihandler,launchmany.listen_port,config['overlay_max_message_length'])
+        self.returnconnhand.register_conns_callback(self.network_handleReturnConnConnection)
+        self.returnconnhand.register_recv_callback(self.network_handleReturnConnMessage)
+        self.returnconnhand.start_listening()
+
+        self.old_ext_ip = launchmany.get_ext_ip()
+
+
+    def register_yourip(self,launchmany):
+        """ Called by MainThread """
+        self.launchmany = launchmany
+
+
+    def olthread_handleSecOverlayConnection(self,exc,permid,selversion,locally_initiated):
+        """
+        Called from OverlayApps to signal there is an overlay-connection,
+        see if we should ask it to dialback
+        """
+        # Called by overlay thread
+        if DEBUG:
+            print >> sys.stderr,"dialback: handleConnection",exc,"v",selversion,"local",locally_initiated
+        if selversion < OLPROTO_VER_THIRD:
+            return True
+        
+        if exc is not None:
+            try:
+                del self.peers_asked[permid]
+            except:
+                if DEBUG:
+                    print >> sys.stderr,"dialback: handleConnection: Got error on connection that we didn't ask for dialback"
+                pass
+            return
+        
+        if self.consensusip is None:
+            self.ntries += 1
+            if self.ntries >= MAX_TRIES:
+                if DEBUG:
+                    print >> sys.stderr,"dialback: tried too many times, giving up"
+                return True
+            
+            if self.dbreach or self.btenginereach:
+                self.launchmany.set_activity(NTFY_ACT_GET_EXT_IP_FROM_PEERS)
+            else:
+                self.launchmany.set_activity(NTFY_ACT_REACHABLE)
+
+            # Also do this when the connection is not locally initiated.
+            # That tells us that we're connectable, but it doesn't tell us
+            # our external IP address.
+            if self.active:
+                self.olthread_attempt_request_dialback(permid)
+        return True
+            
+    def olthread_attempt_request_dialback(self,permid):
+        # Called by overlay thread
+        if DEBUG:
+            print >> sys.stderr,"dialback: attempt dialback request",show_permid_short(permid)
+                    
+        dns = self.olthread_get_dns_from_peerdb(permid)
+        ipinuse = False
+
+        # 1. Remove peers we asked but didn't succeed in connecting back 
+        threshold = time()-REPLY_WAIT
+        newdict = {}
+        for permid2,peerrec in self.peers_asked.iteritems():
+            if peerrec['reqtime'] >= threshold:
+                newdict[permid2] = peerrec
+            if peerrec['dns'][0] == dns[0]:
+                ipinuse = True
+        self.peers_asked = newdict
+
+        # 2. Already asked?
+        if permid in self.peers_asked or ipinuse or len(self.peers_asked) >= PEERS_TO_ASK:
+            # ipinuse protects a little against attacker that want us to believe
+            # we have a certain IP address.
+            if DEBUG:
+                pipa = permid in self.peers_asked
+                lpa = len(self.peers_asked)
+                print >> sys.stderr,"dialback: No request made to",show_permid_short(permid),"already asked",pipa,"IP in use",ipinuse,"nasked",lpa
+
+            return
+        dns = self.olthread_get_dns_from_peerdb(permid)
+        
+        # 3. Ask him to dialback
+        peerrec = {'dns':dns,'reqtime':time()}
+        self.peers_asked[permid] = peerrec
+        self.overlay_bridge.connect(permid,self.olthread_request_connect_callback)
+    
+    def olthread_request_connect_callback(self,exc,dns,permid,selversion):
+        # Called by overlay thread
+        if exc is None:
+            if selversion >= OLPROTO_VER_THIRD:
+                self.overlay_bridge.send(permid, DIALBACK_REQUEST+'',self.olthread_request_send_callback)
+            elif DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REQUEST: peer speaks old protocol, weird",show_permid_short(permid)
+        elif DEBUG:
+            print >> sys.stderr,"dialback: DIALBACK_REQUEST: error connecting to",show_permid_short(permid),exc
+
+
+    def olthread_request_send_callback(self,exc,permid):
+        # Called by overlay thread
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REQUEST error sending to",show_permid_short(permid),exc
+            pass
+
+    def olthread_handleSecOverlayMessage(self,permid,selversion,message):
+        """
+        Handle incoming DIALBACK_REQUEST messages
+        """
+        # Called by overlay thread
+        t = message[0]
+        
+        if t == DIALBACK_REQUEST:
+            if DEBUG:
+                print >> sys.stderr,"dialback: Got DIALBACK_REQUEST",len(message),show_permid_short(permid)
+            return self.olthread_process_dialback_request(permid, message, selversion)
+        else:
+            if DEBUG:
+                print >> sys.stderr,"dialback: UNKNOWN OVERLAY MESSAGE", ord(t)
+            return False
+
+
+    def olthread_process_dialback_request(self,permid,message,selversion):
+        # Called by overlay thread
+        # 1. Check
+        if len(message) != 1:
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REQUEST: message too big"
+            return False
+
+        # 2. Retrieve peer's IP address
+        dns = self.olthread_get_dns_from_peerdb(permid)
+
+        # 3. Send back reply
+        # returnconnhand uses the network thread to do stuff, so the callback
+        # will be made by the network thread
+        self.returnconnhand.connect_dns(dns,self.network_returnconn_reply_connect_callback)
+
+        # 4. Message processed OK, don't know about sending of reply though
+        return True
+
+
+    def network_returnconn_reply_connect_callback(self,exc,dns):
+        # Called by network thread
+        
+        if not currentThread().getName().startswith("NetworkThread"):
+            print >>sys.stderr,"dialback: network_returnconn_reply_connect_callback: called by",currentThread().getName()," not NetworkThread"
+            print_stack()
+        
+        if exc is None:
+            hisip = str(dns[0]) # Arno, 2010-01-28: protection against DB returning Unicode IPs 
+            try:
+                reply = bencode(hisip)
+                if DEBUG:
+                    print >> sys.stderr,"dialback: DIALBACK_REPLY: sending to",dns
+                self.returnconnhand.send(dns, DIALBACK_REPLY+reply, self.network_returnconn_reply_send_callback)
+            except:
+                print_exc()
+                return False
+        elif DEBUG:
+            print >> sys.stderr,"dialback: DIALBACK_REPLY: error connecting to",dns,exc
+
+    def network_returnconn_reply_send_callback(self,exc,dns):
+        # Called by network thread
+        if DEBUG:
+            print >> sys.stderr,"dialback: DIALBACK_REPLY: send callback:",dns,exc
+
+        
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REPLY: error sending to",dns,exc
+            pass
+
+    #
+    # Receipt of connection that would carry DIALBACK_REPLY 
+    #
+    def network_handleReturnConnConnection(self,exc,dns,locally_initiated):
+        # Called by network thread
+        if DEBUG:
+            print >> sys.stderr,"dialback: DIALBACK_REPLY: Got connection from",dns,exc
+        pass
+
+    def network_handleReturnConnMessage(self,dns,message):
+        # Called by network thread
+        t = message[0]
+        
+        if t == DIALBACK_REPLY:
+            if DEBUG:
+                print >> sys.stderr,"dialback: Got DIALBACK_REPLY",len(message),dns
+
+            # Hand over processing to overlay thread
+            olthread_process_dialback_reply_lambda = lambda:self.olthread_process_dialback_reply(dns, message)
+            self.overlay_bridge.add_task(olthread_process_dialback_reply_lambda,0)
+        
+            # We're done and no longer need the return connection, so
+            # call close explicitly
+            self.returnconnhand.close(dns)
+            return True
+        else:
+            if DEBUG:
+                print >> sys.stderr,"dialback: UNKNOWN RETURNCONN MESSAGE", ord(t)
+            return False
+
+
+    def olthread_process_dialback_reply(self,dns,message):
+        # Called by overlay thread
+        
+        # 1. Yes, we're reachable, now just matter of determining ext IP
+        self.dbreach = True
+        
+        # 2. Authentication: did I ask this peer?
+        permid = self.olthread_permid_of_asked_peer(dns)
+        if permid is None:
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REPLY: Got reply from peer I didn't ask",dns
+            return False
+
+        del self.peers_asked[permid]
+
+        # 3. See what he sent us
+        try:
+            myip = bdecode(message[1:])
+        except:
+            print_exc()
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REPLY: error becoding"
+            return False
+        if not isValidIP(myip):
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REPLY: invalid IP"
+            return False
+
+
+        # 4. See if superpeer, then we're done, trusted source 
+        if self.trust_superpeers:
+            superpeers = self.superpeer_db.getSuperPeers()
+            if permid in superpeers:
+                if DEBUG:
+                    print >> sys.stderr,"dialback: DIALBACK_REPLY: superpeer said my IP address is",myip,"setting it to that"
+                self.consensusip = myip
+                self.fromsuperpeer = True
+        else:
+            # 5, 6. 7, 8. Record this peers opinion and see if we get a 
+            # majority vote.
+            #
+            self.myips,consensusip = tally_opinion(myip,self.myips,PEERS_TO_AGREE)
+            if self.consensusip is None:
+                self.consensusip = consensusip 
+
+        # 8. Change IP address if different
+        if self.consensusip is not None:
+            
+            self.launchmany.dialback_got_ext_ip_callback(self.consensusip)
+            if DEBUG:
+                print >> sys.stderr,"dialback: DIALBACK_REPLY: I think my IP address is",self.old_ext_ip,"others say",self.consensusip,", setting it to latter"
+
+        # 9. Notify GUI that we are connectable
+        self.launchmany.dialback_reachable_callback()
+
+        return True
+    
+
+    #
+    # Information from other modules
+    #
+    def network_btengine_reachable_callback(self):
+        """ Called by network thread """
+        if self.launchmany is not None:
+            self.launchmany.dialback_reachable_callback()
+            
+        # network thread updating our state. Ignoring concurrency, as this is a
+        # one time op.
+        self.btenginereach = True
+
+    def isConnectable(self):
+        """ Called by overlay (BuddyCast) and network (Rerequester) thread 
+        and now also any thread via Session.get_externally_reachable() """
+
+        # network thread updating our state. Ignoring concurrency, as these
+        # variables go from False to True once and stay there, or remain False
+        return self.dbreach or self.btenginereach
+
+
+    def network_btengine_extend_yourip(self,myip):
+        """ Called by Connecter when we receive an EXTEND handshake that 
+        contains an yourip line.
+        
+        TODO: weigh opinion based on whether we locally initiated the connection
+        from a trusted tracker response, or that the address came from ut_pex.
+        """
+        self.myips_according_to_yourip, yourip_consensusip = tally_opinion(myip,self.myips_according_to_yourip,YOURIP_PEERS_TO_AGREE)
+        if DEBUG:
+            print >> sys.stderr,"dialback: yourip: someone said my IP is",myip
+        if yourip_consensusip is not None:
+            self.launchmany.yourip_got_ext_ip_callback(yourip_consensusip)
+            if DEBUG:
+                print >> sys.stderr,"dialback: yourip: I think my IP address is",self.old_ext_ip,"others via EXTEND hs say",yourip_consensusip,"recording latter as option"
+
+    #
+    # Internal methods
+    #
+    def olthread_get_dns_from_peerdb(self,permid):
+        dns = None
+        peer = self.peer_db.getPeer(permid)
+        #print >>sys.stderr,"dialback: get_dns_from_peerdb: Got peer",peer
+        if peer:
+            ip = self.to_real_ip(peer['ip'])
+            dns = (ip, int(peer['port']))
+        return dns
+
+    def to_real_ip(self,hostname_or_ip):
+        """ If it's a hostname convert it to IP address first """
+        ip = None
+        try:
+            """ Speed up: don't go to DNS resolver unnecessarily """
+            socket.inet_aton(hostname_or_ip)
+            ip = hostname_or_ip
+        except:
+            try:
+                ip = socket.gethostbyname(hostname_or_ip)
+            except:
+                print_exc()
+        return ip
+
+        
+    def olthread_permid_of_asked_peer(self,dns):
+        for permid,peerrec in self.peers_asked.iteritems():
+            if peerrec['dns'] == dns:
+                # Yes, we asked this peer
+                return permid
+        return None
+
+
+def tally_opinion(myip,oplist,requiredquorum):
+
+    consensusip = None
+
+    # 5. Ordinary peer, just add his opinion
+    oplist.append([myip,time()])
+    if DEBUG:
+        print >> sys.stderr,"dialback: DIALBACK_REPLY: peer said I have IP address",myip
+
+    # 6. Remove stale opinions
+    newlist = []
+    threshold = time()-REPLY_VALIDITY
+    for pair in oplist:
+        if pair[1] >= threshold:
+            newlist.append(pair)
+    oplist = newlist
+    
+    # 7. See if we have X peers that agree
+    opinions = {}
+    for pair in oplist:
+        ip = pair[0]
+        if not (ip in opinions):
+            opinions[ip] = 1
+        else:
+            opinions[ip] += 1
+
+    for o in opinions:
+        if opinions[o] >= requiredquorum:
+            # We have a quorum
+            if consensusip is None:
+                consensusip = o
+                if DEBUG:
+                    print >> sys.stderr,"dialback: DIALBACK_REPLY: Got consensus on my IP address being",consensusip
+            else:
+                # Hmmmm... more than one consensus
+                pass
+
+    return oplist,consensusip
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/NatCheck.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/NatCheck.py
new file mode 100644 (file)
index 0000000..e42d36f
--- /dev/null
@@ -0,0 +1,211 @@
+# Written by Lucia D'Acunto
+# see LICENSE.txt for license information
+
+import socket
+import sys
+
+DEBUG = False
+
+def Test1(udpsock, serveraddr):
+    """
+    The client sends a request to a server asking it to send the
+    response back to the address and port the request came from
+    """
+
+    retVal = {"resp":False, "ex_ip":None, "ex_port":None}
+    BUFSIZ = 1024
+    reply = ""
+    request = "ping1"
+
+    udpsock.sendto(request, serveraddr)
+
+    try:
+        reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
+    except socket.timeout:
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,)
+        return retVal
+
+    except ValueError, (strerror):
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror)
+        return retVal
+    except socket.error, (errno, strerror):
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror)
+        return retVal
+
+    ex_ip, ex_port = reply.split(":")
+
+    retVal["resp"] = True
+    retVal["ex_ip"] = ex_ip
+    retVal["ex_port"] = ex_port
+
+    return retVal
+
+def Test2(udpsock, serveraddr):
+    """
+    The client sends a request asking to receive an echo from a
+    different address and a different port on the address and port the
+    request came from
+    """
+
+    retVal = {"resp":False}
+    BUFSIZ = 1024
+    request = "ping2"
+
+    udpsock.sendto(request, serveraddr)
+
+    try:
+        reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
+    except socket.timeout:        
+        #if DEBUG: print >> sys.stderr, "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,)
+        return retVal
+    except ValueError, (strerror):
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror)
+        return retVal
+    except socket.error, (errno, strerror):
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror)
+        return retVal
+
+    retVal["resp"] = True
+
+    return retVal
+
+def Test3(udpsock, serveraddr):
+    """
+    The client sends a request asking to receive an echo from the same
+    address but from a different port on the address and port the
+    request came from
+    """
+
+    retVal = {"resp":False, "ex_ip":None, "ex_port":None}
+    BUFSIZ = 1024
+    reply = ""
+    request = "ping3"
+
+    udpsock.sendto(request, serveraddr)
+
+    try:
+        reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
+    except socket.timeout:
+        #if DEBUG: print >> sys.stderr, "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,)
+        return retVal
+    except ValueError, (strerror):
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror)
+        return retVal
+    except socket.error, (errno, strerror):
+        if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror)
+        return retVal
+
+    ex_ip, ex_port = reply.split(":")
+
+    retVal["resp"] = True
+    retVal["ex_ip"] = ex_ip
+    retVal["ex_port"] = ex_port
+
+    return retVal
+
+# Returns information about the NAT the client is behind
+def GetNATType(in_port, serveraddr1, serveraddr2):
+    """
+    Returns the NAT type according to the STUN algorithm, as well as the external
+    address (ip, port) and the internal address of the host
+    """
+
+    serveraddr1 = ('stun1.tribler.org',6701)
+    serveraddr2 = ('stun2.tribler.org',6702)
+    
+    nat_type, ex_ip, ex_port, in_ip = [-1, "Unknown"], "0.0.0.0", "0", "0.0.0.0"
+
+    # Set up the socket
+    udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    udpsock.settimeout(5)
+    try:
+        udpsock.bind(('',in_port))
+    except socket.error, err:
+        print >> sys.stderr, "Couldn't bind a udp socket on port %d : %s" % (in_port, err)
+        return (nat_type, ex_ip, ex_port, in_ip)
+    try:
+        # Get the internal IP address
+        connectaddr = ('tribler.org',80)
+        s = socket.socket()
+        s.connect(connectaddr)
+        in_ip = s.getsockname()[0]
+        del s
+        if DEBUG: print >> sys.stderr, "NATCheck: getting the internal ip address by connecting to tribler.org:80", in_ip
+    except socket.error, err:
+        print >> sys.stderr, "Couldn't connect to %s:%i" % (connectaddr[0], connectaddr[1])
+        return (nat_type, ex_ip, ex_port, in_ip)
+
+    """
+        EXECUTE THE STUN ALGORITHM
+    """
+
+    # Do Test I
+    ret = Test1(udpsock, serveraddr1)
+
+    if DEBUG: print >> sys.stderr, "NATCheck:", "Test I reported: " + str(ret)
+
+    if ret["resp"] == False:
+        nat_type[1] = "Blocked"
+
+    else:
+        ex_ip = ret["ex_ip"]
+        ex_port = ret["ex_port"]
+
+        if ret["ex_ip"] == in_ip: # No NAT: check for firewall
+
+            if DEBUG: print >> sys.stderr, "NATCheck:", "No NAT"
+
+            # Do Test II
+            ret = Test2(udpsock, serveraddr1)
+            if DEBUG: print >> sys.stderr, "NATCheck:", "Test II reported: " + str(ret)
+
+            if ret["resp"] == True:
+                nat_type[0] = 0
+                nat_type[1] = "Open Internet"
+            else:
+                if DEBUG: print >> sys.stderr, "NATCheck:", "There is a Firewall"
+
+                # Do Test III
+                ret = Test3(udpsock, serveraddr1)
+                if DEBUG: print >> sys.stderr, "NATCheck:", "Test III reported: " + str(ret)
+
+                if ret["resp"] == True:
+                    nat_type[0] = 2
+                    nat_type[1] = "Restricted Cone Firewall"
+                else:
+                    nat_type[0] = 3
+                    nat_type[1] = "Port Restricted Cone Firewall"
+
+        else: # There is a NAT
+            if DEBUG: print >> sys.stderr, "NATCheck:", "There is a NAT"
+
+            # Do Test II
+            ret = Test2(udpsock, serveraddr1)
+            if DEBUG: print >> sys.stderr, "NATCheck:", "Test II reported: " + str(ret)
+            if ret["resp"] == True:
+                nat_type[0] = 1
+                nat_type[1] = "Full Cone NAT"
+            else:
+                #Do Test I using a different echo server
+                ret = Test1(udpsock, serveraddr2)
+                if DEBUG: print >> sys.stderr, "NATCheck:", "Test I reported: " + str(ret)
+
+                if ex_ip == ret["ex_ip"] and ex_port == ret["ex_port"]: # Public address is constant: consistent translation
+
+                    # Do Test III
+                    ret = Test3(udpsock, serveraddr1)
+                    if DEBUG: print >> sys.stderr, "NATCheck:", "Test III reported: " + str(ret)
+
+                    if ret["resp"] == True:
+                        nat_type[0] = 2
+                        nat_type[1] = "Restricted Cone NAT"
+                    else:
+                        nat_type[0] = 3
+                        nat_type[1] = "Port Restricted Cone NAT"
+
+                else:
+                    nat_type[0] = -1
+                    nat_type[1] = "Symmetric NAT"
+
+    udpsock.close()
+    return (nat_type, ex_ip, ex_port, in_ip)
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/NatCheckMsgHandler.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/NatCheckMsgHandler.py
new file mode 100644 (file)
index 0000000..1eae139
--- /dev/null
@@ -0,0 +1,427 @@
+# Written by Lucia D'Acunto
+# see LICENSE.txt for license information
+
+from time import strftime
+from traceback import print_exc
+import datetime
+import random
+import socket
+import sys
+import thread
+
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_NATCHECK, CRAWLER_NATTRAVERSAL
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.NATFirewall.ConnectionCheck import ConnectionCheck
+from BaseLib.Core.NATFirewall.NatTraversal import tryConnect, coordinateHolePunching
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH, OLPROTO_VER_NINETH, SecureOverlay
+from BaseLib.Core.Statistics.Crawler import Crawler
+from BaseLib.Core.Utilities.utilities import show_permid, show_permid_short
+from types import IntType, StringType, ListType, TupleType
+from BaseLib.Core.simpledefs import *
+
+DEBUG = False
+
+PEERLIST_LEN = 100
+
+class NatCheckMsgHandler:
+
+    __single = None
+
+    def __init__(self):
+        if NatCheckMsgHandler.__single:
+            raise RuntimeError, "NatCheckMsgHandler is singleton"
+        NatCheckMsgHandler.__single = self
+        self.crawler_reply_callbacks = []
+        self._secure_overlay = SecureOverlay.getInstance()
+
+        self.crawler = Crawler.get_instance()
+        if self.crawler.am_crawler():
+            self._file = open("natcheckcrawler.txt", "a")
+            self._file.write("\n".join(("# " + "*" * 80, strftime("%Y/%m/%d %H:%M:%S"), "# Crawler started\n")))
+            self._file.flush()
+            self._file2 = open("nattraversalcrawler.txt", "a")
+            self._file2.write("\n".join(("# " + "*" * 80, strftime("%Y/%m/%d %H:%M:%S"), "# Crawler started\n")))
+            self._file2.flush()
+            self.peerlist = []
+            self.holePunchingIP = socket.gethostbyname(socket.gethostname())
+            self.trav = {}
+
+        else:
+            self._file = None
+
+    @staticmethod
+    def getInstance(*args, **kw):
+        if NatCheckMsgHandler.__single is None:
+            NatCheckMsgHandler(*args, **kw)
+        return NatCheckMsgHandler.__single
+
+    def register(self, launchmany):
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: register"
+
+        self.session = launchmany.session
+        self.doNatCheckSender = None
+        self.registered = True
+
+    def doNatCheck(self, target_permid, selversion, request_callback):
+        """
+        The nat-check initiator_callback
+        """
+
+        # for Tribler versions < 4.5.0 : do nothing
+        # TODO: change OLPROTO_VER_EIGHTH to OLPROTO_VER_SEVENTH
+        if selversion < OLPROTO_VER_NINETH:
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler: Tribler version too old for NATCHECK: do nothing"
+            return False
+            
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: do NATCHECK"
+            
+        # send the message
+        request_callback(CRAWLER_NATCHECK, "", callback=self.doNatCheckCallback)
+
+        return True
+
+    def doNatCheckCallback(self, exc, permid):
+
+        if exc is not None:
+            return False
+            if DEBUG:
+                print >> sys.stderr, "NATCHECK_REQUEST was sent to", show_permid_short(permid), exc
+
+        # Register peerinfo on file
+        self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"),
+                                    "REQUEST",
+                                    show_permid(permid),
+                                    str(self._secure_overlay.get_dns_from_peerdb(permid)),
+                                    "\n")))
+        self._file.flush()
+        return True
+
+    def gotDoNatCheckMessage(self, sender_permid, selversion, channel_id, payload, reply_callback):
+        """
+        The handle-request callback
+        """
+
+        self.doNatCheckSender = sender_permid
+        self.crawler_reply_callbacks.append(reply_callback)
+
+        try:
+            if DEBUG:
+                print >>sys.stderr,"NatCheckMsgHandler: start_nat_type_detect()"
+            conn_check = ConnectionCheck.getInstance(self.session)
+            conn_check.try_start(self.natthreadcb_natCheckReplyCallback)
+        except:
+            print_exc()
+            return False
+
+        return True
+        
+    def natthreadcb_natCheckReplyCallback(self, ncr_data):
+        if DEBUG:
+            print >> sys.stderr, "NAT type: ", ncr_data
+
+        # send the message to the peer who has made the NATCHECK request, if any
+        if self.doNatCheckSender is not None:
+            try:
+                ncr_msg = bencode(ncr_data)
+            except:
+                print_exc()
+                if DEBUG: print >> sys.stderr, "error ncr_data:", ncr_data
+                return False
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler:", ncr_data
+
+            # todo: make sure that natthreadcb_natCheckReplyCallback is always called for a request
+            # send replies to all the requests that have been received so far
+            for reply_callback in self.crawler_reply_callbacks:
+                reply_callback(ncr_msg, callback=self.natCheckReplySendCallback)
+            self.crawler_reply_callbacks = []
+            
+
+    def natCheckReplySendCallback(self, exc, permid):
+        if DEBUG:
+            print >> sys.stderr, "NATCHECK_REPLY was sent to", show_permid_short(permid), exc
+        if exc is not None:
+            return False
+        return True
+
+    def gotNatCheckReplyMessage(self, permid, selversion, channel_id, channel_data, error, payload, request_callback):
+        """
+        The handle-reply callback
+        """
+        if error:
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler: gotNatCheckReplyMessage"
+                print >> sys.stderr, "NatCheckMsgHandler: error", error
+
+            # generic error: another crawler already obtained these results
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"),
+                                        "  REPLY",
+                                        show_permid(permid),
+                                        str(self._secure_overlay.get_dns_from_peerdb(permid)),
+                                        "ERROR(%d)" % error,
+                                        payload,
+                                        "\n")))
+            self._file.flush()
+
+        else:
+            try:
+                recv_data = bdecode(payload)
+            except:
+                print_exc()
+                print >> sys.stderr, "bad encoded data:", payload
+                return False
+
+            try:    # check natCheckReply message
+                self.validNatCheckReplyMsg(recv_data)
+            except RuntimeError, e:
+                print >> sys.stderr, e
+                return False
+
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler: received NAT_CHECK_REPLY message: ", recv_data
+
+            # Register peerinfo on file
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"),
+                                        "  REPLY",
+                                        show_permid(permid),
+                                        str(self._secure_overlay.get_dns_from_peerdb(permid)),
+                                        ":".join([str(x) for x in recv_data]),
+                                        "\n")))
+            self._file.flush()
+
+            # for Tribler versions < 5.0 : do nothing
+            if selversion < OLPROTO_VER_NINETH:
+                if DEBUG:
+                    print >> sys.stderr, "NatCheckMsgHandler: Tribler version too old for NATTRAVERSAL: do nothing"
+                return True
+                
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler: do NATTRAVERSAL"
+
+            # Save peer in peerlist
+            if len(self.peerlist) == PEERLIST_LEN:
+                del self.peerlist[0]
+            self.peerlist.append([permid,recv_data[1],recv_data[2]])
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler: peerlist length is: ", len(self.peerlist)
+
+            # Try to perform hole punching
+            if len(self.peerlist) >= 2:
+                self.tryHolePunching()
+
+        return True
+
+    def validNatCheckReplyMsg(self, ncr_data):
+
+        if not type(ncr_data) == ListType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. It must be a list of parameters."
+            return False
+            
+        if not type(ncr_data[0]) == StringType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The first element in the list must be a string."
+            return False
+            
+        if not type(ncr_data[1]) == IntType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The second element in the list must be an integer."
+            return False
+            
+        if not type(ncr_data[2]) == IntType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The third element in the list must be an integer."
+            return False
+            
+        if not type(ncr_data[3]) == StringType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The forth element in the list must be a string."
+            return False
+            
+        if not type(ncr_data[4]) == IntType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The fifth element in the list must be an integer."
+            return False
+            
+        if not type(ncr_data[5]) == StringType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The sixth element in the list must be a string."
+            return False
+            
+        if not type(ncr_data[6]) == IntType:
+            raise RuntimeError, "NatCheckMsgHandler: received data is not valid. The seventh element in the list must be an integer."
+            return False
+
+    def tryHolePunching(self):
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: first element in peerlist", self.peerlist[len(self.peerlist)-1]
+            print >> sys.stderr, "NatCheckMsgHandler: second element in peerlist", self.peerlist[len(self.peerlist)-2]
+
+        holePunchingPort = random.randrange(3200, 4200, 1)
+        holePunchingAddr = (self.holePunchingIP, holePunchingPort)
+        
+        peer1 = self.peerlist[len(self.peerlist)-1]
+        peer2 = self.peerlist[len(self.peerlist)-2]
+
+        request_id = str(show_permid_short(peer1[0]) + show_permid_short(peer2[0]) + str(random.randrange(0, 1000, 1)))
+
+        self.udpConnect(peer1[0], request_id, holePunchingAddr)
+        self.udpConnect(peer2[0], request_id, holePunchingAddr)
+
+        # Register peerinfo on file
+        self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"),
+                                    "REQUEST",
+                                    request_id,
+                                    show_permid(peer1[0]),
+                                    str(peer1[1]),
+                                    str(peer1[2]),
+                                    str(self._secure_overlay.get_dns_from_peerdb(peer1[0])),
+                                    show_permid(peer2[0]),
+                                    str(peer2[1]),
+                                    str(peer2[2]),
+                                    str(self._secure_overlay.get_dns_from_peerdb(peer2[0])),
+                                    "\n")))
+        self._file2.flush()
+
+        self.trav[request_id] = (None, None)
+        thread.start_new_thread(coordinateHolePunching, (peer1, peer2, holePunchingAddr))
+
+    def udpConnect(self, permid, request_id, holePunchingAddr):
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: request UDP connection"
+
+        mh_data = request_id + ":" + holePunchingAddr[0] + ":" + str(holePunchingAddr[1])
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: udpConnect message is", mh_data
+
+        try:
+            mh_msg = bencode(mh_data)
+        except:
+            print_exc()
+            if DEBUG: print >> sys.stderr, "NatCheckMsgHandler: error mh_data:", mh_data
+            return False
+
+        # send the message
+        self.crawler.send_request(permid, CRAWLER_NATTRAVERSAL, mh_msg, frequency=0, callback=self.udpConnectCallback)
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: request for", show_permid_short(permid), "sent to crawler"
+
+    def udpConnectCallback(self, exc, permid):
+
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr, "NATTRAVERSAL_REQUEST failed to", show_permid_short(permid), exc
+
+            # Register peerinfo on file
+            self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"),
+                                    "REQUEST FAILED",
+                                    show_permid(permid),
+                                    str(self._secure_overlay.get_dns_from_peerdb(permid)),
+                                    "\n")))
+            return False
+
+        if DEBUG:
+            print >> sys.stderr, "NATTRAVERSAL_REQUEST was sent to", show_permid_short(permid), exc
+        return True
+        
+    def gotUdpConnectRequest(self, permid, selversion, channel_id, mh_msg, reply_callback):
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: gotUdpConnectRequest from", show_permid_short(permid)
+
+        try:
+            mh_data = bdecode(mh_msg)
+        except:
+            print_exc()
+            print >> sys.stderr, "NatCheckMsgHandler: bad encoded data:", mh_msg
+            return False
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: gotUdpConnectRequest is", mh_data
+
+        
+        try:
+            request_id, host, port = mh_data.split(":")
+        except:
+            print_exc()
+            print >> sys.stderr, "NatCheckMsgHandler: error in received data:", mh_data
+            return False
+
+        coordinator = (host, int(port))
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: coordinator address is", coordinator
+
+        mhr_data = request_id + ":" + tryConnect(coordinator)
+
+        # Report back to coordinator
+        try:
+            mhr_msg = bencode(mhr_data)
+        except:
+            print_exc()
+            print >> sys.stderr, "NatCheckMsgHandler: error in encoding data:", mhr_data
+            return False
+
+        reply_callback(mhr_msg, callback=self.udpConnectReplySendCallback)
+
+    def udpConnectReplySendCallback(self, exc, permid):
+
+        if DEBUG:
+            print >> sys.stderr, "NATTRAVERSAL_REPLY was sent to", show_permid_short(permid), exc
+        if exc is not None:
+            return False
+        return True
+
+        
+    def gotUdpConnectReply(self, permid, selversion, channel_id, channel_data, error, mhr_msg, request_callback):
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: gotMakeHoleReplyMessage"
+
+        try:
+            mhr_data = bdecode(mhr_msg)
+        except:
+            print_exc()
+            print >> sys.stderr, "NatCheckMsgHandler: bad encoded data:", mhr_msg
+            return False
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: message is", mhr_data
+
+        try:
+            request_id, reply = mhr_data.split(":")
+        except:
+            print_exc()
+            print >> sys.stderr, "NatCheckMsgHandler: error in received data:", mhr_data
+            return False
+
+        if DEBUG:
+            print >> sys.stderr, "NatCheckMsgHandler: request_id is", request_id
+
+        if request_id in self.trav:
+            if DEBUG:
+                print >> sys.stderr, "NatCheckMsgHandler: request_id is in the list"
+            peer, value = self.trav[request_id]
+            if peer == None: # first peer reply
+                if DEBUG:
+                    print >> sys.stderr, "NatCheckMsgHandler: first peer reply"
+                self.trav[request_id] = ( (permid, self._secure_overlay.get_dns_from_peerdb(permid)), reply )
+            elif type(peer) == TupleType: # second peer reply
+                if DEBUG:
+                    print >> sys.stderr, "NatCheckMsgHandler: second peer reply"
+                    
+                # Register peerinfo on file
+                self._file2.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"),
+                                                    "  REPLY",
+                                                    request_id,
+                                                    show_permid(peer[0]),
+                                                    str(peer[1]),
+                                                    value,
+                                                    show_permid(permid),
+                                                    str(self._secure_overlay.get_dns_from_peerdb(permid)),
+                                                    reply,
+                                                    "\n")))
+
+                del self.trav[request_id]
+
+        self._file2.flush()
+
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/NatTraversal.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/NatTraversal.py
new file mode 100644 (file)
index 0000000..e301ed8
--- /dev/null
@@ -0,0 +1,178 @@
+from time import strftime
+from traceback import print_exc
+import socket
+import sys
+
+DEBUG = False
+
+def coordinateHolePunching(peer1, peer2, holePunchingAddr):
+
+    if DEBUG:
+        print >> sys.stderr, "NatTraversal: coordinateHolePunching at", holePunchingAddr
+
+    # Set up the sockets
+    try :
+        udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        udpsock.bind(holePunchingAddr)
+        udpsock.settimeout(60)
+
+    except socket.error, (errno, strerror) :
+
+        if udpsock :
+            udpsock.close()
+
+        if DEBUG:
+            print >> sys.stderr, "NatTraversal: Could not open socket: %s" % (strerror)
+
+        return
+
+    if DEBUG:
+        print >> sys.stderr, "NatTraversal: waiting for connection..."
+
+    # Receive messages
+    peeraddr2 = None
+    while True:
+
+        try:
+            data, peeraddr1 = udpsock.recvfrom(1024)
+            if not data:
+                continue
+            else:
+                if DEBUG:
+                    print >> sys.stderr, "NatTraversal:", strftime("%Y/%m/%d %H:%M:%S"), "...connected from: ", peeraddr1
+                if peeraddr2 == None:
+                    peeraddr2 = peeraddr1
+                elif peeraddr2 != peeraddr1:        
+                    udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2)
+                    udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2)
+                    udpsock.sendto(peeraddr1[0] + ":" + str(peeraddr1[1]), peeraddr2)
+                    udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1)
+                    udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1)
+                    udpsock.sendto(peeraddr2[0] + ":" + str(peeraddr2[1]), peeraddr1)
+                    break
+
+        except socket.timeout, error:
+            if DEBUG:
+                print >> sys.stderr, "NatTraversal: timeout with peers", error
+            udpsock.close()
+            break
+
+    # Close socket
+    udpsock.close()
+
+def tryConnect(coordinator):
+    
+    # Set up the socket
+    udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    udpsock.settimeout(5)
+
+    # Send messages
+    udpsock.sendto("ping",coordinator)
+    udpsock.sendto("ping",coordinator)
+    udpsock.sendto("ping",coordinator)
+    if DEBUG:
+        print >> sys.stderr, "NatTraversal: sending ping to ", coordinator
+
+    # Wait for response from the coordinator
+
+    while True:
+        data = None
+        addr = None
+        try:
+            data, addr = udpsock.recvfrom(1024)
+        except socket.timeout, (strerror):
+            if DEBUG:
+                print >> sys.stderr, "NatTraversal: timeout with coordinator"
+            return "ERR"
+
+        if addr == coordinator:
+            if DEBUG:
+                print >> sys.stderr, "NatTraversal: received", data, "from coordinator"
+            break
+
+        if DEBUG:
+            print >> sys.stderr, "NatTraversal: received", data, "from", addr
+            
+    #success = False
+    #try:
+    #    host, port = data.split(":")
+    #except:
+    #    print_exc()
+    #    print >> sys.stderr, "NatCheckMsgHandler: error in received data:", data
+    #    return success
+    # peer = (host, int(port))
+    # for i in range(3):
+    #     udpsock.sendto("hello",peer)
+    #     udpsock.sendto("hello",peer)
+    #     udpsock.sendto("hello",peer)
+
+    #     try:
+    #         data, addr = udpsock.recvfrom(1024)
+
+    #     except socket.timeout, (strerror):
+    #         if DEBUG:
+    #             print >> sys.stderr, "NatTraversal: first timeout", strerror
+    #             print >> sys.stderr, "NatTraversal: resend"
+
+    #     else:
+    #         success = True
+    #         break
+
+    try:
+        host, port = data.split(":")
+    except:
+        print_exc()
+        print >> sys.stderr, "NatCheckMsgHandler: error in received data:", data
+        return "ERR"
+
+    peer = (host, int(port))
+    udpsock.sendto("hello",peer)
+    udpsock.sendto("hello",peer)
+    udpsock.sendto("hello",peer)
+
+    # Wait for response
+    data = None
+    addr = None
+
+    while True:
+        try:
+            data, addr = udpsock.recvfrom(1024)
+        except socket.timeout, (strerror):
+            if DEBUG:
+                print >> sys.stderr, "NatTraversal: first timeout", strerror
+                print >> sys.stderr, "NatTraversal: resend"
+
+            udpsock.sendto("hello", peer)
+            udpsock.sendto("hello", peer)
+            udpsock.sendto("hello", peer)
+
+            try:
+                data, addr = udpsock.recvfrom(1024)
+            except socket.timeout, (strerror):
+                if DEBUG:
+                    print >> sys.stderr, "NatTraversal: second timeout", strerror
+
+                return "NO"
+
+        # data received, check address
+        if addr == peer: # peer is not symmetric NAT
+            break
+
+        if addr[0] == peer[0]: # peer has a symmetric NAT
+            peer = addr
+            break
+
+        
+    udpsock.sendto("hello",peer)
+    udpsock.sendto("hello",peer)
+    udpsock.sendto("hello",peer)
+
+    # Close socket
+    udpsock.close()
+        
+    if DEBUG:
+        print >> sys.stderr, "NatTraversal: message from", addr, "is", data
+
+    return "YES"
+
+        
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/ReturnConnHandler.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/ReturnConnHandler.py
new file mode 100644 (file)
index 0000000..904a0f4
--- /dev/null
@@ -0,0 +1,603 @@
+# Written by Arno Bakker, Bram Cohen, Jie Yang
+# see LICENSE.txt for license information
+#
+# This class receives all connections and messages destined for 
+# infohash = 0x00 0x00 ... 0x01
+# The peer sends a DIALBACK_REPLY message, we send no reply.
+#
+
+import sys
+from struct import pack,unpack
+from time import time
+from sets import Set
+from cStringIO import StringIO
+from threading import currentThread
+from socket import gethostbyname
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.BitTornado.__init__ import createPeerID
+from BaseLib.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern,getMessageName
+from BaseLib.Core.BitTornado.BT1.convert import tobinary,toint
+
+
+DEBUG = False
+
+#
+# Public definitions
+#
+dialback_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'
+
+#
+# Private definitions
+#
+
+# States for overlay connection
+STATE_INITIAL = 0
+STATE_HS_FULL_WAIT = 1
+STATE_HS_PEERID_WAIT = 2
+STATE_DATA_WAIT = 4
+STATE_CLOSED = 5
+
+# Misc
+EXPIRE_THRESHOLD =      30    # seconds::  keep consistent with sockethandler
+EXPIRE_CHECK_INTERVAL = 60     # seconds
+
+
+class ReturnConnHandler:
+    __single = None
+
+    def __init__(self):
+        if ReturnConnHandler.__single:
+            raise RuntimeError, "ReturnConnHandler is Singleton"
+        ReturnConnHandler.__single = self 
+
+    #
+    # Interface for upper layer
+    #
+    def getInstance(*args, **kw):
+        if ReturnConnHandler.__single is None:
+            ReturnConnHandler(*args, **kw)
+        return ReturnConnHandler.__single
+    getInstance = staticmethod(getInstance)
+
+    def register(self,rawserver,multihandler,mylistenport,max_len):
+        """ Called by MainThread """
+        self.rawserver = rawserver # real rawserver, not overlay_bridge
+        self.sock_hand = self.rawserver.sockethandler
+        self.multihandler = multihandler
+        self.dialback_rawserver = multihandler.newRawServer(dialback_infohash, 
+                                              self.rawserver.doneflag,
+                                              protocol_name)
+        self.myid = create_my_peer_id(mylistenport)
+        self.max_len = max_len
+        self.iplport2oc = {}    # (IP,listen port) -> ReturnConnection
+        self.usermsghandler = None
+        self.userconnhandler = None
+
+    def resetSingleton(self):
+        """ For testing purposes """
+        ReturnConnHandler.__single = None 
+
+    def start_listening(self):
+        """ Called by MainThread """
+        self.dialback_rawserver.start_listening(self)
+
+    def connect_dns(self,dns,callback):
+        """ Connects to the indicated endpoint. Non-blocking.
+            
+            Pre: "dns" must be an IP address, not a hostname.
+            
+            Network thread calls "callback(exc,dns)" when the connection
+            is established or when an error occurs during connection 
+            establishment. In the former case, exc is None, otherwise
+            it contains an Exception.
+
+            The established connection will auto close after EXPIRE_THRESHOLD
+            seconds of inactivity.
+        """
+        # Called by overlay thread
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: connect_dns",dns
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        task = Task(self._connect_dns,dns,callback)
+        self.rawserver.add_task(task.start, 0) 
+
+
+    def send(self,dns,msg,callback):
+        """ Sends a message to the indicated dns. Non-blocking.
+            
+            Pre: connection to permid must have been established successfully.
+
+            Network thread calls "callback(exc,dns)" when the message is sent
+            or when an error occurs during sending. In the former case, exc 
+            is None, otherwise it contains an Exception.
+        """
+        # Called by overlay thread
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        task = Task(self._send,dns,msg,callback)
+        self.rawserver.add_task(task.start, 0)
+
+
+
+    def close(self,dns):
+        """ Closes any connection to indicated permid. Non-blocking.
+            
+            Pre: connection to permid must have been established successfully.
+
+            Network thread calls "callback(exc,permid,selver)" when the connection
+            is closed.
+        """
+        # Called by overlay thread
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        task = Task(self._close,dns)
+        self.rawserver.add_task(task.start, 0)
+
+
+    def register_recv_callback(self,callback):
+        """ Register a callback to be called when receiving a message from 
+            any permid. Non-blocking.
+
+            Network thread calls "callback(exc,permid,selver,msg)" when a message 
+            is received. The callback is not called on errors e.g. remote 
+            connection close.
+        """
+        self.usermsghandler = callback
+
+    def register_conns_callback(self,callback):
+        """ Register a callback to be called when receiving a connection from 
+            any permid. Non-blocking.
+
+            Network thread calls "callback(exc,permid,selver,locally_initiated)" 
+            when a connection is established (locally initiated or remote), or
+            when a connection is closed locally or remotely. In the former case, 
+            exc is None, otherwise it contains an Exception.
+
+            Note that this means that if a callback is registered via this method,
+            both this callback and the callback passed to a connect() method 
+            will be called.
+        """
+        self.userconnhandler = callback
+
+
+    #
+    # Internal methods
+    #
+    def _connect_dns(self,dns,callback):
+        # Called by network thread
+        try:
+            if DEBUG:
+                print >> sys.stderr,"dlbreturn: actual connect_dns",dns
+            iplport = ip_and_port2str(dns[0],dns[1])
+            oc = None
+            try:
+                oc = self.iplport2oc[iplport]
+            except KeyError:
+                pass
+            if oc is None:
+                oc = self.start_connection(dns)
+                self.iplport2oc[iplport] = oc
+                oc.queue_callback(dns,callback)
+            else:
+                callback(None,dns)
+        except Exception,exc:
+            if DEBUG:
+                print_exc(file=sys.stderr)
+            callback(exc,dns)
+
+    def _send(self,dns,message,callback):
+        # Called by network thread
+        try:
+            iplport = ip_and_port2str(dns[0],dns[1])
+            oc = None
+            try:
+                oc = self.iplport2oc[iplport]
+            except KeyError:
+                pass
+            if oc is None:
+                callback(KeyError('Not connected to dns'),dns)
+            else:
+                oc.send_message(message)
+                callback(None,dns)
+        except Exception,exc:
+            if DEBUG:
+                print_exc(file=sys.stderr)
+            callback(exc,dns)
+
+
+    def _close(self,dns):
+        # Called by network thread
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: actual close",dns
+        try:
+            iplport = ip_and_port2str(dns[0],dns[1])
+            oc = None
+            try:
+                oc = self.iplport2oc[iplport]
+            except KeyError:
+                pass
+            if oc is None:
+                if DEBUG:
+                    print >> sys.stderr,"dlbreturn: error - actual close, but no connection to peer in admin"
+            else:
+                oc.close()
+        except Exception,e:
+            print_exc(file=sys.stderr)
+
+    #
+    # Interface for SocketHandler
+    #
+    def external_connection_made(self,singsock):
+        """ incoming connection (never used) """
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: external_connection_made",singsock.get_ip(),singsock.get_port()
+        oc = ReturnConnection(self,singsock,self.rawserver)
+        singsock.set_handler(oc)
+
+    def connection_flushed(self,singsock):
+        """ sockethandler flushes connection """
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: connection_flushed",singsock.get_ip(),singsock.get_port()
+        pass
+
+    #
+    # Interface for ServerPortHandler
+    #
+    def externally_handshaked_connection_made(self, singsock, options, msg_remainder):
+        """ incoming connection, handshake partially read to identity 
+            as an it as overlay connection (used always)
+        """
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: externally_handshaked_connection_made",\
+                singsock.get_ip(),singsock.get_port()
+        oc = ReturnConnection(self,singsock,self.rawserver,ext_handshake = True, options = options)
+        singsock.set_handler(oc)
+        if msg_remainder:
+            oc.data_came_in(singsock,msg_remainder)
+        return True
+
+
+    #
+    # Interface for ReturnConnection
+    #
+    def got_connection(self,oc):
+        
+        if DEBUG:
+            print >>sys.stderr,"dlbreturn: Got connection from",oc.get_ip(),"listen",oc.get_listen_port()
+        
+        ret = True
+        iplport = ip_and_port2str(oc.get_ip(),oc.get_listen_port())
+        known = iplport in self.iplport2oc
+        if not known:
+            self.iplport2oc[iplport] = oc
+        elif known and not oc.is_locally_initiated():
+            # Locally initiated connections will already be registered,
+            # so if it's not a local connection and we already have one 
+            # we have a duplicate, and we close the new one.
+            if DEBUG:
+                print >> sys.stderr,"dlbreturn: got_connection:", \
+                    "closing because we already have a connection to",iplport
+            self.cleanup_admin_and_callbacks(oc,
+                     Exception('closing because we already have a connection to peer'))
+            ret = False
+            
+        if ret:
+            oc.dequeue_callbacks()
+            if self.userconnhandler is not None:
+                try:
+                    self.userconnhandler(None,(oc.get_ip(),oc.get_listen_port()),oc.is_locally_initiated())
+                except:
+                    # Catchall
+                    print_exc(file=sys.stderr)
+        return ret
+
+    def local_close(self,oc):
+        """ our side is closing the connection """
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: local_close"
+        self.cleanup_admin_and_callbacks(oc,Exception('local close'))
+
+    def connection_lost(self,oc):
+        """ overlay connection telling us to clear admin """
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: connection_lost"
+        self.cleanup_admin_and_callbacks(oc,Exception('connection lost'))
+
+    def got_message(self,dns,message):
+        """ received message from peer, pass to upper layer """
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: got_message",getMessageName(message[0])
+        if self.usermsghandler is None:
+            if DEBUG:
+                print >> sys.stderr,"dlbreturn: User receive callback not set"
+            return
+        try:
+            ret = self.usermsghandler(dns,message)
+            if ret is None:
+                if DEBUG:
+                    print >> sys.stderr,"dlbreturn: INTERNAL ERROR:", \
+                        "User receive callback returned None, not True or False"
+                ret = False
+            return ret
+        except:
+            # Catch all
+            print_exc(file=sys.stderr)
+            return False
+
+
+    def get_max_len(self):
+        return self.max_len
+   
+    def get_my_peer_id(self):
+        return self.myid
+    
+    def measurefunc(self,length):
+        pass
+
+    def start_connection(self,dns):
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: Attempt to connect to",dns
+        singsock = self.sock_hand.start_connection(dns)
+        oc = ReturnConnection(self,singsock,self.rawserver,
+                               locally_initiated=True,specified_dns=dns)
+        singsock.set_handler(oc)
+        return oc
+
+    def cleanup_admin_and_callbacks(self,oc,exc):
+        oc.cleanup_callbacks(exc)
+        self.cleanup_admin(oc)
+        if self.userconnhandler is not None:
+            self.userconnhandler(exc,(oc.get_ip(),oc.get_listen_port()),oc.is_locally_initiated())
+
+    def cleanup_admin(self,oc):
+        iplports = []
+        d = 0
+        for key in self.iplport2oc.keys():
+            #print "***** iplport2oc:", key, self.iplport2oc[key]
+            if self.iplport2oc[key] == oc:
+                del self.iplport2oc[key]
+                #print "*****!!! del", key, oc
+                d += 1
+        
+
+class Task:
+    def __init__(self,method,*args, **kwargs):
+        self.method = method
+        self.args = args
+        self.kwargs = kwargs
+
+    def start(self):
+        if DEBUG:
+            print >> sys.stderr,"dlbreturn: task: start",self.method
+            #print_stack(file=sys.stderr)
+        self.method(*self.args,**self.kwargs)
+    
+
+class ReturnConnection:
+    def __init__(self,handler,singsock,rawserver,locally_initiated = False,
+                 specified_dns = None, ext_handshake = False,options = None):
+        # Called by network thread
+        self.handler = handler        
+        self.singsock = singsock # for writing
+        self.rawserver = rawserver
+        self.buffer = StringIO()
+        self.cb_queue = []
+        self.listen_port = None
+        self.options = None
+        self.locally_initiated = locally_initiated
+        self.specified_dns = specified_dns
+        self.last_use = time()
+
+        self.state = STATE_INITIAL
+        self.write(chr(len(protocol_name)) + protocol_name + 
+                option_pattern + dialback_infohash + self.handler.get_my_peer_id())
+        if ext_handshake:
+            self.state = STATE_HS_PEERID_WAIT
+            self.next_len = 20
+            self.next_func = self.read_peer_id
+            self.set_options(options)
+        else:
+            self.state = STATE_HS_FULL_WAIT
+            self.next_len = 1
+            self.next_func = self.read_header_len
+            
+        # Leave autoclose here instead of ReturnConnHandler, as that doesn't record
+        # remotely-initiated ReturnConnections before authentication is done.
+        self.rawserver.add_task(self._dlbconn_auto_close, EXPIRE_CHECK_INTERVAL)
+
+    #
+    # Interface for SocketHandler
+    #
+    def data_came_in(self, singsock, data):
+        """ sockethandler received data """
+        # now we got something we can ask for the peer's real port
+        dummy_port = singsock.get_port(True)
+
+        if DEBUG:
+            print >> sys.stderr,"dlbconn: data_came_in",singsock.get_ip(),singsock.get_port()
+        self.handler.measurefunc(len(data))
+        self.last_use = time()
+        while 1:
+            if self.state == STATE_CLOSED:
+                return
+            i = self.next_len - self.buffer.tell()
+            if i > len(data):
+                self.buffer.write(data)
+                return
+            self.buffer.write(data[:i])
+            data = data[i:]
+            m = self.buffer.getvalue()
+            self.buffer.reset()
+            self.buffer.truncate()
+            try:
+                #if DEBUG:
+                #    print >> sys.stderr,"dlbconn: Trying to read",self.next_len,"using",self.next_func
+                x = self.next_func(m)
+            except:
+                self.next_len, self.next_func = 1, self.read_dead
+                if DEBUG:
+                    print_exc(file=sys.stderr)
+                raise
+            if x is None:
+                if DEBUG:
+                    print >> sys.stderr,"dlbconn: next_func returned None",self.next_func
+                self.close()
+                return
+            self.next_len, self.next_func = x
+
+    def connection_lost(self,singsock):
+        """ kernel or socket handler reports connection lost """
+        if DEBUG:
+            print >> sys.stderr,"dlbconn: connection_lost",singsock.get_ip(),singsock.get_port(),self.state
+        if self.state != STATE_CLOSED:
+            self.state = STATE_CLOSED
+            self.handler.connection_lost(self)
+
+    def connection_flushed(self,singsock):
+        """ sockethandler flushes connection """
+        pass
+
+    # 
+    # Interface for ReturnConnHandler
+    #
+    def send_message(self,message):
+        self.last_use = time()
+        s = tobinary(len(message))+message
+        if DEBUG:
+            print >> sys.stderr,"dlbconn: Sending message",len(message)
+        self.write(s)
+
+    def is_locally_initiated(self):
+        return self.locally_initiated
+
+    def get_ip(self):
+        return self.singsock.get_ip()
+
+    def get_port(self):
+        return self.singsock.get_port()
+
+    def get_listen_port(self):
+        return self.listen_port
+
+    def queue_callback(self,dns,callback):
+        if callback is not None:
+            self.cb_queue.append(callback)
+
+    def dequeue_callbacks(self):
+        try:
+            for callback in self.cb_queue:
+                callback(None,self.specified_dns)
+            self.cb_queue = []
+        except Exception,e:
+            print_exc(file=sys.stderr)
+
+
+    def cleanup_callbacks(self,exc):
+        if DEBUG:
+            print >> sys.stderr,"dlbconn: cleanup_callbacks: #callbacks is",len(self.cb_queue)
+        try:
+            for callback in self.cb_queue:
+                ## Failure connecting
+                if DEBUG:
+                   print >> sys.stderr,"dlbconn: cleanup_callbacks: callback is",callback
+                callback(exc,self.specified_dns)
+        except Exception,e:
+            print_exc(file=sys.stderr)
+
+    #
+    # Internal methods
+    #
+    def read_header_len(self, s):
+        if ord(s) != len(protocol_name):
+            return None
+        return len(protocol_name), self.read_header
+
+    def read_header(self, s):
+        if s != protocol_name:
+            return None
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        if DEBUG:
+            print >> sys.stderr,"dlbconn: Reserved bits:", `s`
+        self.set_options(s)
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if s != dialback_infohash:
+            return None
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        self.unauth_peer_id = s
+        self.listen_port = decode_listen_port(self.unauth_peer_id)
+        self.state = STATE_DATA_WAIT
+        if not self.got_connection():
+            self.close()
+            return
+        return 4, self.read_len
+    
+    
+    def got_connection(self):
+        return self.handler.got_connection(self)
+
+    def read_len(self, s):
+        l = toint(s)
+        if l > self.handler.get_max_len():
+            return None
+        return l, self.read_message
+
+    def read_message(self, s):
+        
+        if DEBUG:
+            print >>sys.stderr,"dlbconn: read_message len",len(s),self.state
+        
+        if s != '':
+            if self.state == STATE_DATA_WAIT:
+                if not self.handler.got_message((self.get_ip(),self.get_listen_port()),s):
+                    return None
+            else:
+                if DEBUG:
+                    print >> sys.stderr,"dlbconn: Received message while in illegal state, internal error!"
+                return None
+        return 4, self.read_len
+
+    def read_dead(self, s):
+        return None
+
+    def write(self,s):
+        self.singsock.write(s)
+
+    def set_options(self,options):
+        self.options = options
+
+    def close(self):
+        if DEBUG:
+            print >> sys.stderr,"dlbconn: we close()",self.get_ip(),self.get_port()
+        self.state_when_error = self.state
+        if self.state != STATE_CLOSED:
+            self.state = STATE_CLOSED
+            self.handler.local_close(self)
+            self.singsock.close()
+        return
+
+    def _dlbconn_auto_close(self):
+        if (time() - self.last_use) > EXPIRE_THRESHOLD:
+            self.close()
+        else:
+            self.rawserver.add_task(self._dlbconn_auto_close, EXPIRE_CHECK_INTERVAL)
+
+def create_my_peer_id(my_listen_port):
+    myid = createPeerID()
+    myid = myid[:14] + pack('<H', my_listen_port) + myid[16:]
+    return myid
+
+def decode_listen_port(peerid):
+    bin = peerid[14:16]
+    tup = unpack('<H', bin)
+    return tup[0]
+
+def ip_and_port2str(ip,port):
+    return ip+':'+str(port)
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/TimeoutCheck.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/TimeoutCheck.py
new file mode 100644 (file)
index 0000000..d383a7e
--- /dev/null
@@ -0,0 +1,93 @@
+# Written by Lucia D'Acunto
+# see LICENSE.txt for license information
+
+from socket import *
+import sys
+import thread
+import threading
+
+
+DEBUG = False
+
+
+to = -1 # timeout default value
+lck = threading.Lock()
+evnt = threading.Event()
+
+
+# Sending pings to the pingback server and waiting for a reply
+def pingback(ping, pingbacksrvr):
+
+    global to, lck, evnt
+
+    # Set up the socket
+    udpsock = socket(AF_INET, SOCK_DGRAM)
+    udpsock.connect(pingbacksrvr)
+    udpsock.settimeout(ping+10)
+    
+    if DEBUG: print >> sys.stderr, "TIMEOUTCHECK:", "-> ping"
+
+    # Send the ping to the server specifying the delay of the reply
+    pingMsg = (str("ping:"+str(ping)))
+    udpsock.send(pingMsg)
+    udpsock.send(pingMsg)
+    udpsock.send(pingMsg)
+
+    # Wait for reply from the server
+    while True:
+
+        rcvaddr = None
+
+        try:
+            reply = udpsock.recv(1024)
+
+        except timeout: # No reply from the server: timeout passed
+
+            if udpsock:
+                udpsock.close()
+
+            if DEBUG: print >> sys.stderr, "TIMEOUTCHECK:", "UDP connection to the pingback server has timed out for ping", ping
+
+            lck.acquire()
+            evnt.set()
+            evnt.clear()
+            lck.release()
+            break
+
+        if DEBUG: print >> sys.stderr, pingbacksrvr
+        if DEBUG: print >> sys.stderr, rcvaddr
+
+        if reply:
+            data = reply.split(':')
+            if DEBUG: print >> sys.stderr, data, "received from the pingback server"
+
+            if data[0] == "pong":
+                if DEBUG: print >> sys.stderr, "TIMEOUTCHECK:", "<-", data[0], "after", data[1], "seconds"
+                to = ping
+                if int(data[1])==145:
+                    lck.acquire()
+                    evnt.set()
+                    evnt.clear()
+                    lck.release()
+                return
+
+        return
+
+
+# Main method of the library: launches nat-timeout discovery algorithm
+def GetTimeout(pingbacksrvr):
+    """
+    Returns the NAT timeout for UDP traffic
+    """
+    
+    pings = [25, 35, 55, 85, 115, 145]
+
+    # Send pings and wait for replies
+    for ping in pings:
+        thread.start_new_thread(pingback, (ping, pingbacksrvr))
+
+    global evnt
+    evnt.wait()
+
+    if DEBUG: print >> sys.stderr, "TIMEOUTCHECK: timeout is", to
+    return to
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/TimeoutFinder.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/TimeoutFinder.py
new file mode 100644 (file)
index 0000000..cd25024
--- /dev/null
@@ -0,0 +1,112 @@
+# Written by Gertjan Halkes
+# see LICENSE.txt for license information
+
+
+import struct
+import time
+import sys
+
+DEBUG = False
+
+class TimeoutFinder:
+    PINGBACK_TIMES = [ 245, 235, 175, 115, 85, 55, 25, 10 ]
+    PINGBACK_ADDRESS = ("m23trial-udp.tribler.org", 7396)
+
+    def __init__(self, rawserver, initial_ping, reportback = None):
+        self.sockets = []
+        self.rawserver = rawserver
+        self.timeout_found = -1
+        self.initial_ping = initial_ping
+        self.reportback = reportback
+        self.timeout_index = 0
+
+        # Stagger the pings by 1 second to unsure minimum impact on other traffic
+        rawserver.add_task(self.ping, 1)
+        rawserver.add_task(self.report_done, TimeoutFinder.PINGBACK_TIMES[0] + 5)
+
+
+    def ping(self):
+        sock = self.rawserver.create_udpsocket(0, "0.0.0.0")
+        self.sockets.append(sock)
+        self.rawserver.start_listening_udp(sock, self)
+        if self.initial_ping:
+            sock.sendto(struct.pack("!Id", 0, float(TimeoutFinder.PINGBACK_TIMES[self.timeout_index])),
+                TimeoutFinder.PINGBACK_ADDRESS)
+        else:
+            sock.sendto(struct.pack("!Id", TimeoutFinder.PINGBACK_TIMES[self.timeout_index],
+                time.time()), TimeoutFinder.PINGBACK_ADDRESS)
+        self.timeout_index += 1
+        if self.timeout_index < len(TimeoutFinder.PINGBACK_TIMES):
+            self.rawserver.add_task(self.ping, 1)
+
+
+    def data_came_in(self, address, data):
+        if len(data) != 12:
+            return
+        #FIXME: the address should be checked, but that can only be done if
+        # the address is in dotted-decimal notation
+        #~ if address != TimeoutFinder.PINGBACK_ADDRESS:
+            #~ return
+
+        timeout = struct.unpack("!Id", data)
+        if timeout[0] == 0:
+            to_find = int(timeout[1])
+            for i in range(0, len(TimeoutFinder.PINGBACK_TIMES)):
+                if to_find == TimeoutFinder.PINGBACK_TIMES[i]:
+                    self.sockets[i].sendto(struct.pack("!Id", to_find, time.time()), TimeoutFinder.PINGBACK_ADDRESS)
+                    break
+        else:
+            if DEBUG:
+                print >>sys.stderr, ("Received ping with %d delay" % (timeout[0]))
+            self.timeout_found = timeout[0]
+            #FIXME: log reception of packet
+
+    def report_done(self):
+        for i in self.sockets:
+            self.rawserver.stop_listening_udp(i)
+            i.close()
+
+        if self.reportback:
+            self.reportback(self.timeout_found, self.initial_ping)
+
+
+if __name__ == "__main__":
+    import BaseLib.Core.BitTornado.RawServer as RawServer
+    from threading import Event
+    import thread
+    from traceback import print_exc
+    import os
+
+    def fail(e):
+        print "Fatal error: " + str(e)
+        print_exc()
+
+    def error(e):
+        print "Non-fatal error: " + str(e)
+
+    def report(timeout, initial_ping):
+        if initial_ping:
+            with_ = "with"
+        else:
+            with_ = "without"
+
+        if DEBUG:
+            print >>sys.stderr, ("Timeout %s initial ping: %d" % (with_, timeout))
+
+    DEBUG = True
+
+    log = open("log-timeout.txt", "w")
+
+    rawserver_ = RawServer.RawServer(Event(),
+                           60.0,
+                           300.0,
+                           False,
+                           failfunc = fail,
+                           errorfunc = error)
+    thread.start_new_thread(rawserver_.listen_forever, (None,))
+    time.sleep(0.5)
+    TimeoutFinder(rawserver_, False, report)
+    TimeoutFinder(rawserver_, True, report)
+
+    print "TimeoutFinder started, press enter to quit"
+    sys.stdin.readline()
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/UDPPuncture.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/UDPPuncture.py
new file mode 100644 (file)
index 0000000..9e43f00
--- /dev/null
@@ -0,0 +1,1065 @@
+# Written by Gertjan Halkes
+# see LICENSE.txt for license information
+#
+# NATSwarm implementation for testing NAT/firewall puncturing
+# This module creates UDP "connections" and tries to connect to other
+# peers in the NATSwarm. PEX is used to find more peers.
+
+import guessip
+import time
+import socket
+import sys
+import errno
+import random
+from collections import deque
+import TimeoutFinder
+
+DEBUG = False
+
+#NOTE: the current implementation allows PEX_ADD and PEX_DEL messages to name
+#  the same peer. Although these events will be rare, we may want to do something
+#  about it.
+
+
+# Packet format definitions:
+# Each packet starts with a single byte packet type. After this, the contents
+# is type dependent:
+# Connect: 1 byte version number, 4 byte ID, 1 byte NAT/fw state,
+#    1 byte NAT/fw state version.
+# Your IP: 4 bytes IPv4 address, 2 bytes port number.
+# Forward connection request: 4 bytes ID.
+# Reverse connect: 4 bytes ID, 4 bytes IPv4 address, 2 bytes port number,
+#    1 byte NAT/fw state, 1 byte NAT/fw state version.
+#  NAT/fw state may not yet be known through PEX, but we need it for future PEX.
+#  Furthermore, we may not learn it through the remote peer's connect, as that may
+#  not reach us due to filtering.
+# PEX add: 1 byte number of addresses. Per address:
+#    4 bytes ID, 4 bytes IPv4 address, 2 bytes port, 1 byte NAT/fw state,
+#    1 byte NAT/fw state version.
+# PEX del: 1 byte number of addresses. Per address:
+#     4 bytes ID.
+# Close: 1 byte reason
+# Update NAT/fw state: 1 byte NAT/fw state, 1 byte NAT/fw state version.
+# Peer unknown: 4 bytes ID.
+#
+# NAT/fw state is encoded as follows: the least significant 2 bits (0 and 1)
+# encode the NAT state: 0 UNKNOWN, 1 NONE, 2 A(P)DM. Bits 2 and 3 encode
+# the filtering state: 0 UNKNOWN, 1 EIF/NONE, 2 A(P)DF
+
+# Packet sequence for connection setup through rendez-vous:
+# A -> B  CONNECT (in all likelyhood dropped at NAT/fw)
+# A -> R  FW_CONNECT_REQ
+# R -> B  REV_CONNECT
+# B -> A  CONNECT
+# A -> B  YOUR_IP
+# B -> A  YOUR_IP
+#
+# NOTE: it is important that three packets are exchanged on the connection,
+# because some NAT/firewalls (most notably linux based ones) use an increased
+# timeout if they detect that the 'connection' is more than a simple
+# transaction.
+
+# Information to keep for each peer:
+# - IP/port/NATfw state
+# - List of peers through which we heard of this peer
+# - Whether a connection attempt was already made
+# - To which other peers we have advertised this peer, and the FW state we
+#   advertised so updates can be sent
+
+# WARNING: copied from SocketHandler. An import would be better, to keep this
+# definition in one place
+if sys.platform == 'win32':
+    SOCKET_BLOCK_ERRORCODE=10035    # WSAEWOULDBLOCK
+else:
+    SOCKET_BLOCK_ERRORCODE=errno.EWOULDBLOCK
+
+
+class UDPHandler:
+    TRACKER_ADDRESS = "m23trial-udp.tribler.org"
+    #~ TRACKER_ADDRESS = "localhost"
+
+    # Define message types
+    CONNECT = chr(0)  # Connection request, directly sent to target
+    YOUR_IP = chr(1)  # Information regarding remote ip as seen by local peer
+    FW_CONNECT_REQ = chr(2)  # Request to forward a reverse connection request
+    REV_CONNECT = chr(3)  # Reverse connection request, for NAT/firewall state setup
+    PEX_ADD = chr(4)  # Notify peer of other known peers
+    PEX_DEL = chr(5)  # Notify peer of peers that are no longer available
+    CLOSE = chr(6)  # Close connection
+    UPDATE_NATFW_STATE = chr(7)  # Notify peer of changed NAT state
+    PEER_UNKNOWN = chr(8)  # Response to FW_CONNECT_REQ if the requested peer is unknown
+    KEEP_ALIVE = chr(9)  # Simple keep-alive message
+
+    # Connection reset error codes
+    CLOSE_NORMAL = chr(0)
+    CLOSE_TOO_MANY = chr(1)
+    CLOSE_LEN = chr(2)
+    CLOSE_PROTO_VER, = chr(3)
+    CLOSE_GARBAGE = chr(4)
+    CLOSE_NOT_CONNECTED = chr(5)
+    CLOSE_STATE_CORRUPT = chr(6)
+
+    # Enumerate NAT states
+    # Note that the difference EIM and NONE is irrelevant for our purposes,
+    # as both are connectable if combined with EIF
+    NAT_UNKNOWN, NAT_NONE, NAT_APDM = range(0, 3)
+    # There is a distinction between EIF and no filtering, because the latter
+    # does not require keep-alives. However, we need keep-alives anyway for
+    # the connections so the distinction is irrelevant.
+    FILTER_UNKNOWN, FILTER_NONE, FILTER_APDF = range(0, 3)
+
+    # Number of connections to be made before a decision is made about NAT/fw state
+    RECV_CONNECT_THRESHOLD = 4
+    # Number of connections before scaling the numbers (prevent overflow, allow change)
+    RECV_CONNECT_SCALE_THRESHOLD = 64
+    # Fixed threshold above which the filter state is assumed to be FILTER_NONE. This is to
+    # make sure that a few (or rather quite a few) missing packets or TIVs don't screw up a
+    # peer's idea of its filtering type.
+    FIXED_THRESHOLD = 7
+
+    def __init__(self, rawserver, check_crawler, port = 0):
+        # initialise connections now because it is used in shutdown which will
+        # be called for Crawler instances as well
+        self.connections = {}
+        if check_crawler:
+            from BaseLib.Core.Statistics.Crawler import Crawler
+            crawler = Crawler.get_instance()
+            if crawler.am_crawler():
+                return
+
+        # initialise connections now because it is used in shutdown which will
+        # be called for Crawler instances as well
+        self.connections = {}
+
+        # 17/03/10 Boudewijn: obsolete code, see the same code a few
+        # lines above that include the check_crawler if/else check
+        # from BaseLib.Core.Statistics.Crawler import Crawler
+        # crawler = Crawler.get_instance()
+        # if crawler.am_crawler():
+        #     return
+        
+        self.rawserver = rawserver
+        self.socket = rawserver.create_udpsocket(port, "0.0.0.0")
+        self.known_peers = {}
+        self.nat_type = UDPHandler.NAT_UNKNOWN
+        self.filter_type = UDPHandler.FILTER_UNKNOWN
+        self.max_connections = 100
+        self.connect_threshold = 75
+        self.recv_unsolicited = 0
+        self.recv_connect_total = 0
+        self.recv_address = 0
+        self.recv_different_address = 0
+        self.sendqueue = deque([])
+        self.last_connect = 0
+        self.last_info_dump = time.time()
+        self.natfw_version = 1
+        self.keepalive_intvl = 100
+        self.done = False
+        self.reporter = None
+        self.last_sends = {}
+
+        rawserver.start_listening_udp(self.socket, self)
+
+        # Contact NATSwarm tracker peer after 5 seconds
+        if port == 9473:
+            self.tracker = True
+
+            # Tracker needs a known ID, so set it to all zero
+            self.id = "\0\0\0\0"
+            # Tracker should accept many more connections than other nodes
+            self.max_connections = 1000
+            rawserver.add_task(self.check_for_timeouts, 10)
+        else:
+            self.tracker = False
+
+            # Create a 4 byte random ID
+            self.id = (chr(random.getrandbits(8)) + chr(random.getrandbits(8)) +
+                chr(random.getrandbits(8)) + chr(random.getrandbits(8)))
+            if DEBUG:
+                debug("My ID: %s" % self.id.encode('hex'))
+            rawserver.add_task(self.bootstrap, 5)
+            TimeoutFinder.TimeoutFinder(rawserver, False, self.timeout_report)
+            TimeoutFinder.TimeoutFinder(rawserver, True, self.timeout_report)
+
+            if not DEBUG:
+                if check_crawler:
+                    #~ from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
+                    from BaseLib.Core.Statistics.PunctureCrawler import get_reporter_instance
+                    self.reporter = get_reporter_instance()
+
+        if self.reporter:
+            my_wan_ip = guessip.get_my_wan_ip()
+            if my_wan_ip == None and sys.platform == 'win32':
+                try:
+                    import os
+                    for line in os.popen("netstat -nr").readlines():
+                        words = line.split()
+                        if words[0] == '0.0.0.0':
+                            my_wan_ip = words[3]
+                            break
+                except:
+                    pass
+            if my_wan_ip == None:
+                my_wan_ip = 'Unknown'
+            self.reporter.add_event("UDPPuncture", "ID:%s;IP:%s" % (self.id.encode('hex'), my_wan_ip))
+
+    def shutdown(self):
+        self.done = True
+        for connection in self.connections.values():
+            self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NORMAL, connection.address)
+            self.delete_closed_connection(connection)
+
+    def data_came_in(self, address, data):
+        if DEBUG:
+            debug("Data came (%d) in from address %s:%d" % (ord(data[0]), address[0], address[1]))
+        connection = self.connections.get(address)
+        if not connection:
+            if data[0] == UDPHandler.CLOSE:
+                # Prevent stroms of packets, by not responding to this
+                return
+            if data[0] != UDPHandler.CONNECT:
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NOT_CONNECTED, address)
+                return
+            if len(data) != 8:
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN, address)
+                return
+            if data[1] != chr(0):
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_PROTO_VER, address)
+                return
+
+
+            if self.check_connection_count():
+                if self.reporter:
+                    self.reporter.add_event("UDPPuncture", "OCTM:%s,%d,%s" % (address[0], address[1], data[2:6].encode('hex')))
+                
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_TOO_MANY, address)
+                return
+
+            id = data[2:6]
+            connection = self.known_peers.get(id)
+            if not connection:
+                # Create new connection state and add to table
+                connection = UDPConnection(address, id, self)
+                self.known_peers[id] = connection
+            elif connection.address != address:
+                if connection.connection_state == UDPConnection.CONNECT_ESTABLISHED:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_STATE_CORRUPT, address)
+                    return
+
+                # ADPM NAT-boxes will have different address, so if we sent a
+                # connect already we will have done so to a different address.
+                try:
+                    del self.connections[connection.address]
+                except:
+                    pass
+                # As we knew this peer under a different address, we have to
+                # set the address to the one we actually use.
+                connection.address = address
+
+            if not address in self.last_sends:
+                self.incoming_connect(address, True) # Update NAT and Filter states
+            self.connections[address] = connection
+
+        if not connection.handle_msg(data):
+            self.delete_closed_connection(connection)
+
+    def check_connection_count(self):
+        # If we still have open slots, we can simply connect
+        if len(self.connections) < self.max_connections:
+            return False
+
+        if DEBUG:
+            debug("  Connection threshold reached, trying to find an old connection")
+        # Find oldest connection, and close if it is older than 5 minutes
+        oldest = None
+        oldest_time = 1e308
+        for connection in self.connections.itervalues():
+            if (not connection.tracker) and connection.connected_since < oldest_time:
+                oldest_time = connection.connected_since
+                oldest = connection
+
+        if not oldest:
+            return True
+
+        if (not self.tracker) and oldest.connected_since > time.time() - 300:
+            if DEBUG:
+                debug("  All connections are under 5 minutes old")
+            return True
+
+        if DEBUG:
+            debug("  Closing connection to %s %s:%d" % (oldest.id.encode('hex'), oldest.address[0], oldest.address[1]))
+        oldest.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NORMAL)
+        self.delete_closed_connection(oldest)
+        return False
+
+    def incoming_connect(self, address, unsolicited):
+        if self.tracker:
+            return
+
+        if unsolicited:
+            self.recv_unsolicited += 1
+        self.recv_connect_total += 1
+
+        if self.recv_connect_total > UDPHandler.RECV_CONNECT_SCALE_THRESHOLD:
+            self.recv_connect_total >>= 1
+            self.recv_unsolicited >>= 1
+        # Check if we have enough data-points to say something sensible about
+        # our NAT/fw state.
+        if self.recv_connect_total > UDPHandler.RECV_CONNECT_THRESHOLD:
+            if DEBUG:
+                debug("Setting filter state (recv total %d, recv unsol %d)" %
+                    (self.recv_connect_total, self.recv_unsolicited))
+            update_filter = False
+            if self.recv_unsolicited > self.recv_connect_total / 2 or self.recv_unsolicited > UDPHandler.FIXED_THRESHOLD:
+                if self.filter_type != UDPHandler.FILTER_NONE or self.nat_type != UDPHandler.NAT_NONE:
+                    update_filter = True
+                    self.filter_type = UDPHandler.FILTER_NONE
+                    self.nat_type = UDPHandler.NAT_NONE
+            elif self.filter_type != UDPHandler.FILTER_APDF:
+                update_filter = True
+                self.filter_type = UDPHandler.FILTER_APDF
+
+            if update_filter:
+                self.natfw_version += 1
+                if self.natfw_version > 255:
+                    self.natfw_version = 0
+                if self.reporter:
+                    self.reporter.add_event("UDPPuncture", "UNAT:%d,%d,%d" % (self.nat_type,
+                        self.filter_type, self.natfw_version))
+                map(lambda x: x.readvertise_nat(), self.connections.itervalues())
+
+    def incoming_ip(self, address):
+        if self.tracker:
+            return
+
+        self.recv_address += 1
+        if self.recv_address == 1:
+            self.reported_wan_address = address
+            return
+
+        if self.recv_address > UDPHandler.RECV_CONNECT_SCALE_THRESHOLD:
+            self.recv_address >>= 1
+            self.recv_different_address >>= 1
+
+        if self.reported_wan_address != address:
+            self.reported_wan_address = address
+            self.recv_different_address += 1
+
+        # Check if we have enough data-points to say something sensible about
+        # our NAT/fw state.
+        if self.recv_address > UDPHandler.RECV_CONNECT_THRESHOLD:
+            if DEBUG:
+                debug("Setting nat state (recv addr %d, recv diff %d)" %
+                    (self.recv_address, self.recv_different_address))
+            update_nat = False
+            if self.recv_different_address > self.recv_address / 2:
+                if self.nat_type != UDPHandler.NAT_APDM:
+                    update_nat = True
+                    self.nat_type = UDPHandler.NAT_APDM
+                    self.filter_type = UDPHandler.FILTER_APDF
+            elif self.nat_type != UDPHandler.NAT_NONE:
+                update_nat = True
+                self.nat_type = UDPHandler.NAT_NONE
+
+            if update_nat:
+                self.natfw_version += 1
+                if self.natfw_version > 255:
+                    self.natfw_version = 0
+                if self.reporter:
+                    self.reporter.add_event("UDPPuncture", "UNAT:%d,%d,%d" % (self.nat_type,
+                        self.filter_type, self.natfw_version))
+                map(lambda x: x.readvertise_nat(), self.connections.itervalues())
+
+    def bootstrap(self):
+        if DEBUG:
+            debug("Starting bootstrap")
+        try:
+            address = socket.gethostbyname(UDPHandler.TRACKER_ADDRESS)
+        except:
+            return
+        if address == '130.161.211.245':
+            return # Don't connect to catch-all address
+        tracker = UDPConnection((address, 9473), "\0\0\0\0", self)
+        # Make sure this is never removed, by setting an address that we will never receive
+        tracker.advertised_by[("0.0.0.0", 0)] = 1e308
+        tracker.nat_type = UDPHandler.NAT_NONE
+        tracker.filter_type = UDPHandler.FILTER_NONE
+        tracker.tracker = True
+        self.known_peers[tracker.id] = tracker
+        self.check_for_timeouts()
+
+    def sendto(self, data, address):
+        if DEBUG:
+            debug("Sending data (%d) to address %s:%d" % (ord(data[0]), address[0], address[1]))
+        if len(self.sendqueue) > 0:
+            self.sendqueue.append((data, address))
+            return
+
+        try:
+            self.socket.sendto(data, address)
+        except socket.error, error:
+            if error[0] == SOCKET_BLOCK_ERRORCODE:
+                self.sendqueue.append((data, address))
+                self.rawserver.add_task(self.process_sendqueue, 0.1)
+
+    def process_sendqueue(self):
+        while len(self.sendqueue) > 0:
+            data, address = self.sendqueue[0]
+            try:
+                self.socket.sendto(data, address)
+            except socket.error, error:
+                if error[0] == SOCKET_BLOCK_ERRORCODE:
+                    self.rawserver.add_task(self.process_sendqueue, 0.1)
+                    return
+            self.sendqueue.popleft()
+
+    def check_nat_compatible(self, peer):
+        #~ if self.filter_type == UDPHandler.FILTER_APDF and peer.nat_type == UDPHandler.NAT_APDM:
+            #~ return False
+        if self.nat_type == UDPHandler.NAT_APDM and peer.filter_type == UDPHandler.FILTER_APDF:
+            return False
+        return True
+
+    def check_for_timeouts(self):
+        if self.done:
+            return
+
+        now = time.time()
+        
+        # Remove info about last sends after 5 minutes
+        close_list = []
+        for address in self.last_sends.iterkeys():
+            if self.last_sends[address] < now - 300:
+                close_list.append(address)
+        for address in close_list:
+            del self.last_sends[address]
+
+        # Close connections older than 10 minutes, if the number of connections is more
+        # than the connect threshold. However, only discard upto 1/3 of the connect
+        # threshold.
+        if (not self.tracker) and len(self.connections) >= self.connect_threshold:
+            if DEBUG:
+                debug("Closing connections older than 10 minutes")
+            close_list = []
+            for connection in self.connections.itervalues():
+                if (not connection.tracker) and connection.connected_since < now - 600:
+                    if DEBUG:
+                        debug("  Closing connection to %s %s:%d" % (connection.id.encode('hex'),
+                            connection.address[0], connection.address[1]))
+                    close_list.append(connection)
+
+            for connection in close_list:
+                connection.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NORMAL)
+                self.delete_closed_connection(connection)
+                if len(self.connections) < self.connect_threshold / 1.5:
+                    break
+
+        # Check to see if we should try to make new connections
+        if ((not self.tracker) and len(self.connections) < self.connect_threshold and
+                self.last_connect < now - 20):
+            unconnected_peers = list(set(self.known_peers.iterkeys()) - set(ConnectionIteratorByID(self.connections)))
+            random.shuffle(unconnected_peers)
+            while len(unconnected_peers) > 0:
+                peer = self.known_peers[unconnected_peers.pop()]
+                # Only connect to peers that are not connected (should be all, but just in case)
+                if peer.connection_state != UDPConnection.CONNECT_NONE:
+                    continue
+                if not self.check_nat_compatible(peer):
+                    continue
+                # Don't connect to peers with who we have communicated in the last five minutes
+                if peer.last_comm > now - 300:
+                    continue
+
+                if not self.try_connect(peer):
+                    continue
+                self.last_connect = now
+                break
+
+        need_advert_time = now - self.keepalive_intvl
+        timeout_time = now - 250
+        can_advert_time = now - 30
+
+        close_list = []
+        pex_only = 0
+
+        # Find all the connections that have timed out and put them in a separate list
+        for connection in self.connections.itervalues():
+            if (connection.connection_state == UDPConnection.CONNECT_SENT and
+                    connection.last_received < can_advert_time):
+                if connection.connection_tries < 0:
+                    if DEBUG:
+                        debug("Dropping connection with %s:%d (timeout)" %
+                            (connection.address[0], connection.address[1]))
+                    close_list.append(connection)
+                elif not self.try_connect(connection):
+                    if DEBUG:
+                        debug("Too many retries %s:%d" % (connection.address[0], connection.address[1]))
+                    close_list.append(connection)
+            elif connection.last_received < timeout_time:
+                if DEBUG:
+                    debug("Dropping connection with %s:%d (timeout)" %
+                        (connection.address[0], connection.address[1]))
+                close_list.append(connection)
+
+        # Close all the connections
+        for connection in close_list:
+            self.delete_closed_connection(connection)
+
+        # Check whether we need to send keep-alives or PEX messages
+        for connection in self.connections.itervalues():
+            if connection.last_send < need_advert_time:
+                # If there is a need for a keep-alive, first check if we also
+                # have PEX info or changed NAT/fw state, because we might as
+                # well send that instead of an empty keep-alive
+                if (connection.advertise_nat or len(connection.pex_add) != 0 or len(connection.pex_del) != 0):
+                    connection.send_pex() or connection.sendto(UDPHandler.KEEP_ALIVE)
+                else:
+                    connection.sendto(UDPHandler.KEEP_ALIVE)
+            elif (connection.advertise_nat or (len(connection.pex_add) != 0 or len(connection.pex_del) != 0) and
+                    connection.last_advert < can_advert_time and pex_only < 35):
+                if connection.send_pex():
+                    pex_only += 1
+
+        # Reschedule this task in 10 seconds
+        self.rawserver.add_task(self.check_for_timeouts, 10)
+
+        # Debug info
+        if DEBUG:
+            if self.last_info_dump + 60 < now:
+                self.last_info_dump = now
+                for connection in self.known_peers.itervalues():
+                    msg = "Peer %d %s %s:%d,%d,%d: Advertisers:" % (connection.connection_state,
+                        connection.id.encode('hex'), connection.address[0],
+                        connection.address[1], connection.nat_type, connection.filter_type)
+                    for advertiser in connection.advertised_by.iterkeys():
+                        msg += " %s:%d" % (advertiser[0], advertiser[1])
+                    debug(msg)
+
+    def try_connect(self, peer):
+        # Don't try to connect to peers that we can't arange a rendez-vous for
+        # when we think we need it
+        if peer.filter_type != UDPHandler.FILTER_NONE and len(peer.advertised_by) == 0:
+            return False
+        
+        if peer.connection_tries > 2:
+            return False
+        peer.connection_tries += 1
+
+        if DEBUG:
+            debug("Found compatible peer at %s:%d attempt %d" % (peer.address[0], peer.address[1], peer.connection_tries))
+
+        # Always send connect, to ensure the other peer's idea of its firewall
+        # is maintained correctly
+        if self.reporter:
+            self.reporter.add_event("UDPPuncture", "OCON%d:%s,%d,%s,%d,%d,%d" % (peer.connection_tries, peer.address[0],
+                peer.address[1], peer.id.encode('hex'), peer.nat_type, peer.filter_type, peer.natfw_version))
+        peer.sendto(UDPHandler.CONNECT + chr(0) + self.id +
+            natfilter_to_byte(self.nat_type, self.filter_type) + chr(self.natfw_version))
+
+        # Request a rendez-vous
+        if peer.filter_type != UDPHandler.FILTER_NONE:
+            if DEBUG:
+                debug("Rendez-vous needed")
+            # Pick a random advertising peer for rendez vous
+            rendezvous_peers = list(peer.advertised_by.iterkeys())
+            random.shuffle(rendezvous_peers)
+            rendezvous_addr = rendezvous_peers[0]
+            rendezvous = self.connections.get(rendezvous_addr)
+            if rendezvous:
+                if self.reporter:
+                    self.reporter.add_event("UDPPuncture", "OFWC:%s,%d,%s,%s" % (rendezvous.address[0],
+                        rendezvous.address[1], rendezvous.id.encode('hex'), peer.id.encode('hex')))
+                rendezvous.sendto(UDPHandler.FW_CONNECT_REQ + peer.id)
+
+        peer.connection_state = UDPConnection.CONNECT_SENT
+        peer.last_received = time.time()
+        self.connections[peer.address] = peer
+        return True
+
+    def delete_closed_connection(self, connection):
+        del self.connections[connection.address]
+        orig_state = connection.connection_state
+        connection.connection_state = UDPConnection.CONNECT_NONE
+        connection.last_comm = time.time()
+        # Save the fact that we have sent something to this address, to ensure that retries won't be
+        # counted as proper incomming connects without prior communication
+        if connection.last_send > time.time() - 300:
+            self.last_sends[connection.address] = connection.last_send
+        connection.last_send = 0
+        connection.last_received = 0
+        connection.last_advert = 0
+        if connection.id == "\0\0\0\0":
+            connection.nat_type = UDPHandler.NAT_NONE
+            connection.filter_type = UDPHandler.FILTER_NONE
+            connection.natfw_version = 0
+        else:
+            connection.nat_type = UDPHandler.NAT_UNKNOWN
+            connection.filter_type = UDPHandler.FILTER_UNKNOWN
+            connection.natfw_version = 0
+        connection.pex_add.clear()
+        connection.pex_del.clear()
+        connection.connection_tries = -1
+        if len(connection.advertised_by) == 0:
+            try:
+                del self.known_peers[connection.id]
+            except:
+                pass
+        map(lambda x: x.remove_advertiser(connection.address), self.known_peers.itervalues())
+        if orig_state == UDPConnection.CONNECT_ESTABLISHED:
+            map(lambda x: x.pex_del.append(connection), self.connections.itervalues())
+
+    def timeout_report(self, timeout, initial_ping):
+        if DEBUG:
+            debug("Timeout reported: %d %d" % (timeout, initial_ping))
+        if self.reporter:
+            self.reporter.add_event("UDPPuncture", "TOUT:%d,%d" % (timeout, initial_ping))
+        if initial_ping:
+            # Don't want to set the timeout too low, even if the firewall is acting funny
+            if timeout > 45 and timeout - 15 < self.keepalive_intvl:
+                self.keepalive_intvl = timeout - 15
+
+class ConnectionIteratorByID:
+    def __init__(self, connections):
+        self.value_iterator = connections.itervalues()
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        value = self.value_iterator.next()
+        return value.id
+
+class UDPConnection:
+    CONNECT_NONE, CONNECT_SENT, CONNECT_ESTABLISHED = range(0, 3)
+
+    def __init__(self, address, id, handler):
+        self.address = address
+        self.handler = handler
+        self.connection_state = UDPConnection.CONNECT_NONE
+        self.nat_type = UDPHandler.NAT_UNKNOWN
+        self.filter_type = UDPHandler.FILTER_UNKNOWN
+        self.natfw_version = 0
+        self.advertised_by = {}
+        self.pex_add = deque([])
+        self.pex_del = deque([])
+        self.last_comm = 0
+        self.last_send = 0
+        self.last_advert = 0
+        self.last_received = 0
+        self.connected_since = 0
+        self.advertise_nat = False
+        self.tracker = False
+        self.id = id
+        self.connection_tries = -1
+
+    def sendto(self, data):
+        self.handler.sendto(data, self.address)
+        self.last_send = time.time()
+
+    def handle_msg(self, data):
+        self.last_received = time.time()
+        if data[0] == UDPHandler.CONNECT:
+            if DEBUG:
+                debug("  Message %d" % ord(data[0]))
+            if len(data) != 8:
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                return False
+
+            if ord(data[1]) != 0:
+                # Protocol version mismatch
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_PROTO_VER)
+                return False
+
+            if data[2:6] != self.id or self.connection_state == UDPConnection.CONNECT_ESTABLISHED:
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_STATE_CORRUPT)
+                return False
+                
+            if self.handler.reporter:
+                self.handler.reporter.add_event("UDPPuncture", "ICON-AC:%s,%d,%s" % (self.address[0],
+                    self.address[1], data[2:6].encode('hex')))
+
+            if self.handler.tracker:
+                peers = self.handler.connections.values()
+                random.shuffle(peers)
+                self.pex_add.extend(peers)
+            else:
+                self.pex_add.extend(self.handler.connections.itervalues())
+
+            self.connected_since = time.time()
+
+            message = UDPHandler.YOUR_IP + address_to_string(self.address)
+            message += self.pex_string(self.pex_add, 1024 - len(message), True)
+            self.sendto(message)
+            self.last_advert = self.connected_since
+            self.nat_type, self.filter_type = byte_to_natfilter(data[6])
+            self.natfw_version = ord(data[7])
+
+            self.connection_state = UDPConnection.CONNECT_ESTABLISHED
+            map(lambda x: x.pex_add.append(self), self.handler.connections.itervalues())
+            self.pex_add.pop() # Remove ourselfves from our own pex_add list
+            return True
+
+        if self.connection_state == UDPConnection.CONNECT_NONE:
+            # Other messages cannot be the first message in the stream. Drop this connection
+            return False
+
+        while len(data) > 0:
+            if DEBUG:
+                debug("  Message %d len %d" % (ord(data[0]), len(data)))
+            if data[0] == UDPHandler.YOUR_IP:
+                if len(data) < 7:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                my_addres = string_to_address(data[1:7])
+                if DEBUG:
+                    debug("    My IP: %s:%d" % (my_addres[0], my_addres[1]))
+                if self.handler.reporter:
+                    self.handler.reporter.add_event("UDPPuncture", "IYIP:%s,%d,%s" % (my_addres[0], my_addres[1], self.id.encode('hex')))
+
+                self.handler.incoming_ip(my_addres)
+
+                if self.connection_state == UDPConnection.CONNECT_SENT:
+                    self.pex_add.extend(self.handler.connections.itervalues())
+
+                    message = UDPHandler.YOUR_IP + address_to_string(self.address)
+                    message += self.pex_string(self.pex_add, 1024 - len(message), True)
+                    self.sendto(message)
+                    self.last_advert = time.time()
+                    self.connected_since = time.time()
+
+                    self.connection_state = UDPConnection.CONNECT_ESTABLISHED
+
+                    map(lambda x: x.pex_add.append(self), self.handler.connections.itervalues())
+                    self.pex_add.pop() # Remove ourselfves from our own pex_add list
+                data = data[7:]
+
+            elif data[0] == UDPHandler.FW_CONNECT_REQ:
+                if len(data) < 5:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                remote = data[1:5]
+                connection = self.handler.known_peers.get(remote)
+                if connection:
+                    if DEBUG:
+                        debug("    Rendez vous requested for peer %s %s:%d" % (
+                            remote.encode('hex'), connection.address[0], connection.address[1]))
+                    if self.handler.reporter:
+                        self.handler.reporter.add_event("UDPPuncture", "IFRQ:%s,%d,%s,%s,%d,%s" % (self.address[0],
+                            self.address[1], self.id.encode('hex'), connection.address[0], connection.address[1],
+                            remote.encode('hex')))
+                else:
+                    if DEBUG:
+                        debug("    Rendez vous requested for peer %s (unknown)" % (
+                            remote.encode('hex')))
+                    if self.handler.reporter:
+                        self.handler.reporter.add_event("UDPPuncture", "IFRQ:%s,%d,%s,Unknown,Unknown,%s" % (self.address[0],
+                            self.address[1], self.id.encode('hex'), remote.encode('hex')))
+
+                if connection:
+                    #FIXME: should we delay this action by some time to ensure the direct connect arives first?
+                    # If we do, we should recheck whether we are connected to the requested peer!
+                    connection.sendto(UDPHandler.REV_CONNECT + self.id + address_to_string(self.address) +
+                        natfilter_to_byte(self.nat_type, self.filter_type) +
+                        chr(self.natfw_version))
+                else:
+                    self.sendto(UDPHandler.PEER_UNKNOWN + remote)
+
+                data = data[5:]
+
+            elif data[0] == UDPHandler.REV_CONNECT:
+                if len(data) < 13:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                remote = string_to_address(data[5:11])
+                if self.handler.reporter:
+                    self.handler.reporter.add_event("UDPPuncture", "IRRQ:%s,%d,%s,%s,%d,%s" % (self.address[0],
+                        self.address[1], self.id.encode('hex'), remote[0], remote[1], data[1:5].encode('hex')))
+                connection = self.handler.connections.get(remote)
+                if connection:
+                    pass
+                elif self.handler.check_connection_count():
+                    if self.handler.reporter:
+                        self.handler.reporter.add_event("UDPPuncture", "OCTM-IRRQ:%s,%d,%s" % (connection.address[0],
+                            connection.address[1], connection.id.encode('hex')))
+                    self.handler.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_TOO_MANY, remote)
+                else:
+                    self.handler.incoming_connect(remote, False) # Update NAT and Filter states
+                    remote_id = data[1:5]
+                    connection = self.handler.known_peers.get(remote_id)
+                    if not connection:
+                        connection = UDPConnection(remote, remote_id, self.handler)
+                        self.handler.known_peers[remote_id] = connection
+                    elif connection.address != remote:
+                        self.sendto(UDPHandler.PEER_UNKNOWN + remote_id)
+                        data = data[13:]
+                        continue
+
+                    if compare_natfw_version(ord(data[12]), connection.natfw_version):
+                        connection.nat_type, connection.filter_type = byte_to_natfilter(data[11])
+                        connection.natfw_version = ord(data[12])
+
+                    self.handler.connections[remote] = connection
+                    connection.connection_state = UDPConnection.CONNECT_SENT
+                    if self.handler.reporter:
+                        self.handler.reporter.add_event("UDPPuncture", "OCON-IRRQ:%s,%d,%s" % (connection.address[0],
+                            connection.address[1], connection.id.encode('hex')))
+                    connection.sendto(UDPHandler.CONNECT + chr(0) + self.handler.id +
+                        natfilter_to_byte(self.handler.nat_type, self.handler.filter_type) +
+                        chr(self.natfw_version))
+                data = data[13:]
+
+            elif data[0] == UDPHandler.PEX_ADD:
+                if len(data) < 2:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                addresses = ord(data[1])
+                if len(data) < 2 + 12 * addresses:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                for i in range(0, addresses):
+                    id = data[2 + i * 12:2 + i * 12 + 4]
+                    address = string_to_address(data[2 + i * 12 + 4:2 + i * 12 + 10])
+                    peer = self.handler.known_peers.get(id)
+                    if not peer:
+                        peer = UDPConnection(address, id, self.handler)
+                        peer.natfw_version = ord(data[2 + i * 12 + 11])
+                        peer.nat_type, peer.filter_type = byte_to_natfilter(data[2 + i * 12 + 10])
+                        self.handler.known_peers[id] = peer
+                    #FIXME: should we check the received address here as well?
+
+                    peer.advertised_by[self.address] = time.time()
+                    if DEBUG:
+                        nat_type, filter_type = byte_to_natfilter(data[2 + i * 12 + 10])
+                        debug("    Received peer %s %s:%d NAT/fw:%d,%d" % (id.encode('hex'),
+                            address[0], address[1], nat_type, filter_type))
+                    if compare_natfw_version(ord(data[2 + i * 12 + 11]), peer.natfw_version):
+                        peer.natfw_version = ord(data[2 + i * 12 + 11])
+                        peer.nat_type, peer.filter_type = byte_to_natfilter(data[2 + i * 12 + 10])
+                        if peer.connection_state == UDPConnection.CONNECT_ESTABLISHED:
+                            map(lambda x: x.pex_add.append(peer), self.handler.connections.itervalues())
+                            peer.pex_add.pop() # Remove ourselfves from our own pex_add list
+
+                data = data[2 + addresses * 12:]
+
+            elif data[0] == UDPHandler.PEX_DEL:
+                if len(data) < 2:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                addresses = ord(data[1])
+                if len(data) < 2 + 4 * addresses:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                for i in range(0, addresses):
+                    id = data[2 + i * 6:2 + i * 6 + 4]
+                    if DEBUG:
+                        debug("    Received peer %s" % (id.encode('hex')))
+                    peer = self.handler.known_peers.get(id)
+                    if not peer or not self.address in peer.advertised_by:
+                        continue
+
+                    del peer.advertised_by[self.address]
+                    if len(peer.advertised_by) == 0 and peer.connection_state == UDPConnection.CONNECT_NONE:
+                        del self.handler.known_peers[id]
+
+                data = data[2 + addresses * 6:]
+
+            elif data[0] == UDPHandler.CLOSE:
+                if DEBUG:
+                    debug("    Reason %d" % ord(data[1]))
+                if len(data) == 2 and data[1] == UDPHandler.CLOSE_TOO_MANY and self.handler.reporter:
+                    self.handler.reporter.add_event("UDPPuncture", "ICLO:%s,%d,%s" % (self.address[0],
+                        self.address[1], self.id.encode('hex')))
+                return False
+            elif data[0] == UDPHandler.UPDATE_NATFW_STATE:
+                if len(data) < 3:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+                if compare_natfw_version(ord(data[2]), self.natfw_version):
+                    self.natfw_version = ord(data[2])
+                    self.nat_type, self.filter_type = byte_to_natfilter(data[1])
+                    if DEBUG:
+                        debug("    Type: %d, %d" % (self.nat_type, self.filter_type))
+                    map(lambda x: x.pex_add.append(self), self.handler.connections.itervalues())
+                    self.pex_add.pop() # Remove ourselfves from our own pex_add list
+                data = data[3:]
+
+            elif data[0] == UDPHandler.PEER_UNKNOWN:
+                # WARNING: there is a big security issue here: we trust the
+                # remote peer to send us the address that we sent it. However,
+                # if the peer is malicious it may send us another address. This
+                # can all be verified, but then we need to keep track of lots
+                # more state which I don't want to do for the current
+                # implementation.
+                if len(data) < 5:
+                    self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN)
+                    return False
+
+                remote = data[1:5]
+                peer = self.handler.known_peers.get(remote)
+                if not peer:
+                    data = data[5:]
+                    continue
+
+                if self.address in peer.advertised_by:
+                    del peer.advertised_by[self.address]
+                    if len(peer.advertised_by) == 0 and peer.connection_state == UDPConnection.CONNECT_NONE:
+                        del self.handler.known_peers[remote]
+                        data = data[5:]
+                        continue
+
+                if len(peer.advertised_by) > 0 and peer.connection_state == UDPConnection.CONNECT_SENT:
+                    rendezvous_addr = peer.advertised_by.iterkeys().next()
+                    rendezvous = self.handler.connections.get(rendezvous_addr)
+                    #FIXME: handle unconnected peers! I.e. delete from advertised_by list and goto next
+                    if rendezvous:
+                        if self.handler.reporter:
+                            self.handler.reporter.add_event("UDPPuncture", "OFWC-RTR:%s,%d,%s,%s" % (rendezvous.address[0],
+                                rendezvous.address[1], rendezvous.id.encode('hex'), peer.id.encode('hex')))
+                        rendezvous.sendto(UDPHandler.FW_CONNECT_REQ + remote)
+
+                data = data[5:]
+            elif data[0] == UDPHandler.KEEP_ALIVE:
+                data = data[1:]
+            else:
+                self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_GARBAGE)
+                return False
+
+        return True
+
+    def readvertise_nat(self):
+        self.advertise_nat = True
+
+    def remove_advertiser(self, address):
+        try:
+            del self.advertised_by[address]
+        except:
+            pass
+
+    def send_pex(self):
+        self.last_advert = time.time()
+
+        message = ""
+        if self.advertise_nat:
+            self.advertise_nat = False
+            message += (UDPHandler.UPDATE_NATFW_STATE +
+                natfilter_to_byte(self.handler.nat_type, self.handler.filter_type) +
+                chr(self.handler.natfw_version))
+
+        if self.tracker:
+            self.pex_add.clear()
+            self.pex_del.clear()
+        else:
+            if len(self.pex_add) > 0:
+                message += self.pex_string(self.pex_add, 1023, True)
+            if len(self.pex_del) > 0:
+                message += self.pex_string(self.pex_del, 1023 - len(message), False)
+        if len(message) > 0:
+            self.sendto(message)
+            return True
+        return False
+
+    def pex_string(self, items, max_size, add):
+        retval = ""
+        num_added = 0
+        added = set()
+        if add:
+            max_size = (max_size - 2) / 12
+        else:
+            max_size = (max_size - 2) / 4
+
+        while len(items) > 0 and max_size > num_added:
+            connection = items.popleft()
+            if DEBUG:
+                debug("- peer %s:%d (%d, %d) state %d" % (connection.address[0], connection.address[1],
+                    connection.nat_type, connection.filter_type, connection.connection_state))
+            if connection != self and (not connection.tracker) and (not connection.address in added) and (
+                    (add and connection.connection_state == UDPConnection.CONNECT_ESTABLISHED) or
+                    ((not add) and connection.connection_state != UDPConnection.CONNECT_ESTABLISHED)):
+                added.add(connection.address)
+                if add:
+                    retval += (connection.id + address_to_string(connection.address) +
+                        natfilter_to_byte(connection.nat_type, connection.filter_type) +
+                        chr(connection.natfw_version))
+                else:
+                    retval += connection.id
+                num_added += 1
+
+        if DEBUG:
+            debug("- created pex string: " + retval.encode('hex'))
+        if num_added == 0:
+            return ""
+        if add:
+            return UDPHandler.PEX_ADD + chr(num_added) + retval
+        else:
+            return UDPHandler.PEX_DEL + chr(num_added) + retval
+
+# Utility functions for often used conversions
+def address_to_string(address):
+    return socket.inet_aton(address[0]) + chr(address[1] >> 8) + chr(address[1] & 255)
+
+def string_to_address(address):
+    return socket.inet_ntoa(address[0:4]), (ord(address[4]) << 8) + ord(address[5])
+
+def natfilter_to_byte(nat_type, filter_type):
+    return chr((nat_type & 3) + ((filter_type & 3) << 2))
+
+def byte_to_natfilter(byte):
+    return ord(byte) & 3, (ord(byte) >> 2) & 3
+
+def compare_natfw_version(a, b):
+    return ((a - b + 256) % 256) < ((b - a + 256) % 256)
+
+if __name__ == "__main__":
+    import BaseLib.Core.BitTornado.RawServer as RawServer
+    from threading import Event
+    import thread
+    from traceback import print_exc
+    import os
+
+    def fail(e):
+        print "Fatal error: " + str(e)
+        print_exc()
+
+    def error(e):
+        print "Non-fatal error: " + str(e)
+
+    DEBUG = True
+    def debug(msg):
+        if 'log' in globals():
+            log.write("%.2f: %s\n" % (time.time(), msg))
+            log.flush()
+        print "%.2f: %s" % (time.time(), msg)
+        sys.stdout.flush()
+
+    if len(sys.argv) == 2:
+        log = open("log-%s.txt" % sys.argv[1], "w")
+    else:
+        log = open("log-%d.txt" % os.getpid(), "w")
+
+    rawserver = RawServer.RawServer(Event(),
+                           60.0,
+                           300.0,
+                           False,
+                           failfunc = fail,
+                           errorfunc = error)
+    thread.start_new_thread(rawserver.listen_forever, (None,))
+    if len(sys.argv) < 2:
+        port = 0
+    else:
+        port = int(sys.argv[1])
+    udp_handler = UDPHandler(rawserver, False, port)
+    
+    if sys.argv == "12345":
+        udp_handler.connect_threshold = 0
+
+    print "UDPHandler started, press enter to quit"
+    sys.stdin.readline()
+    udp_handler.shutdown()
+    print "Log left in " + log.name
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/UPnPThread.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/UPnPThread.py
new file mode 100644 (file)
index 0000000..6bc9c60
--- /dev/null
@@ -0,0 +1,114 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+from threading import Event,Thread
+from traceback import print_exc
+
+from BaseLib.Core.BitTornado.natpunch import UPnPWrapper, UPnPError
+
+DEBUG = False
+
+
+class UPnPThread(Thread):
+    """ Thread to run the UPnP code. Moved out of main startup-
+        sequence for performance. As you can see this thread won't
+        exit until the client exits. This is due to a funky problem
+        with UPnP mode 2. That uses Win32/COM API calls to find and
+        talk to the UPnP-enabled firewall. This mechanism apparently
+        requires all calls to be carried out by the same thread.
+        This means we cannot let the final DeletePortMapping(port) 
+        (==UPnPWrapper.close(port)) be done by a different thread,
+        and we have to make this one wait until client shutdown.
+
+        Arno, 2006-11-12
+    """
+
+    def __init__(self,upnp_type,ext_ip,listen_port,error_func,got_ext_ip_func):
+        Thread.__init__(self)
+        self.setDaemon(True)
+        self.setName( "UPnP"+self.getName() )
+        
+        self.upnp_type = upnp_type
+        self.locally_guessed_ext_ip = ext_ip
+        self.listen_port = listen_port
+        self.error_func = error_func
+        self.got_ext_ip_func = got_ext_ip_func 
+        self.shutdownevent = Event()
+
+    def run(self):
+        if self.upnp_type > 0:
+            self.upnp_wrap = UPnPWrapper.getInstance()
+            self.upnp_wrap.register(self.locally_guessed_ext_ip)
+
+            # Disabled Gertjan's UPnP logging for m24
+            #from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
+            #reporter = get_reporter_instance()
+
+            if self.upnp_wrap.test(self.upnp_type):
+                # Disabled Gertjan's UPnP logging for m24
+                #reporter.add_event("UPnP", "Init'ed")
+                try:
+                    shownerror=False
+                    # Get external IP address from firewall
+                    if self.upnp_type != 1: # Mode 1 doesn't support getting the IP address"
+                        ret = self.upnp_wrap.get_ext_ip()
+                        if ret == None:
+                            shownerror=True
+                            self.error_func(self.upnp_type,self.listen_port,0)
+                        else:
+                            self.got_ext_ip_func(ret)
+
+                    # Do open_port irrespective of whether get_ext_ip()
+                    # succeeds, UPnP mode 1 doesn't support get_ext_ip()
+                    # get_ext_ip() must be done first to ensure we have the 
+                    # right IP ASAP.
+                    
+                    # Open TCP listen port on firewall
+                    ret = self.upnp_wrap.open(self.listen_port,iproto='TCP')
+                    if ret == False and not shownerror:
+                        self.error_func(self.upnp_type,self.listen_port,0)
+
+                    # Open UDP listen port on firewall
+                    ret = self.upnp_wrap.open(self.listen_port,iproto='UDP')
+                    if ret == False and not shownerror:
+                        self.error_func(self.upnp_type,self.listen_port,0,listenproto='UDP')
+                    # Disabled Gertjan's UPnP logging for m24
+                    #reporter.add_event("UPnP", "UDP:%d" % ret)
+                
+                except UPnPError,e:
+                    self.error_func(self.upnp_type,self.listen_port,1,e)
+            else:
+                # Disabled Gertjan's UPnP logging for m24
+                #reporter.add_event("UPnP", "Init failed")
+                if self.upnp_type != 3:
+                    self.error_func(self.upnp_type,self.listen_port,2)
+                elif DEBUG:
+                    print >>sys.stderr,"upnp: thread: Initialization failed, but didn't report error because UPnP mode 3 is now enabled by default"
+
+        # Now that the firewall is hopefully open, activate other services
+        # here. For Buddycast we don't have an explicit notification that it
+        # can go ahead. It will start 15 seconds after client startup, which
+        # is assumed to be sufficient for UPnP to open the firewall.
+        ## dmh.start_active()
+
+        if self.upnp_type > 0:
+            if DEBUG:
+                print >>sys.stderr,"upnp: thread: Waiting till shutdown"
+            self.shutdownevent.wait()
+            # Don't write to sys.stderr, that sometimes doesn't seem to exist
+            # any more?! Python garbage collection funkiness of module sys import?
+            # The GUI is definitely gone, so don't use self.error_func()
+            if DEBUG:
+                print "upnp: thread: Shutting down, closing port on firewall"
+            try:
+                self.upnp_wrap.close(self.listen_port,iproto='TCP')
+                self.upnp_wrap.close(self.listen_port,iproto='UDP')
+            except Exception,e:
+                print "upnp: thread: close port at shutdown threw",e
+                print_exc()
+
+        # End of UPnPThread
+
+    def shutdown(self):
+        self.shutdownevent.set()
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/__init__.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/guessip.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/guessip.py
new file mode 100644 (file)
index 0000000..fea807d
--- /dev/null
@@ -0,0 +1,161 @@
+# Written by Arno Bakker, Jan David Mol
+# see LICENSE.txt for license information
+#
+# Code to guess the IP address of a host by which it is reachable on the
+# Internet, given the host is not behind a firewall or NAT.
+#
+# For all OSes (Linux,Windows,MacOS X) we first look at the routing table to
+# see what the gateway for the default route is. We then try to establish
+# our IP address that's on the same network as the gateway. That is our
+# external/WAN address.
+#
+# This code does not support IPv6, that is, IPv6 address are ignored.
+#
+# Arno, Jan David, 2006-06-30
+#
+import os
+import sys
+import socket
+from traceback import print_exc
+
+DEBUG = False
+
+def get_my_wan_ip():
+    try:
+        if sys.platform == 'win32':
+            return get_my_wan_ip_win32()
+        elif sys.platform == 'darwin':
+            return get_my_wan_ip_darwin()
+        else:
+            return get_my_wan_ip_linux()
+    except:
+        print_exc()
+        return None
+
+def get_my_wan_ip_win32():
+    
+    routecmd = "netstat -nr"
+    ifcmd = "ipconfig /all"
+
+    gwip = None
+    for line in os.popen(routecmd).readlines():
+        words = line.split()
+        if len(words) >= 3:
+            if words[0] == 'Default' and words[1] == 'Gateway:':
+                gwip = words[-1]
+                if DEBUG:
+                    print "netstat found default gateway",gwip
+                break
+
+    myip = None
+    mywanip = None
+    ingw = 0
+    for line in os.popen(ifcmd).readlines():
+        words = line.split()
+        if len(words) >= 3:
+            if (words[0] == 'IP' and words[1] == 'Address.') or (words[1] == 'IP' and words[2] == 'Address.'): # Autoconfiguration entry
+                try:
+                    socket.getaddrinfo(words[-1],None,socket.AF_INET)
+                    myip = words[-1]
+                    if DEBUG:
+                        print "ipconfig found IP address",myip
+                except socket.gaierror:
+                    if DEBUG:
+                        print "ipconfig ignoring IPv6 address",words[-1]
+                    pass
+            elif words[0] == 'Default' and words[1] == 'Gateway':
+                if words[-1] == ':':
+                    if DEBUG:
+                        print "ipconfig ignoring empty default gateway"
+                    pass
+                else:
+                    ingw = 1
+        if ingw >= 1:
+            # Assumption: the "Default Gateway" list can only have 2 entries,
+            # one for IPv4, one for IPv6. Since we don't know the order, look
+            # at both.
+            gwip2 = None
+            ingw = (ingw + 1) % 3
+            try:
+                socket.getaddrinfo(words[-1],None,socket.AF_INET)
+                gwip2 = words[-1]
+                if DEBUG:
+                    print "ipconfig found default gateway",gwip2
+            except socket.gaierror:
+                if DEBUG:
+                    print "ipconfig ignoring IPv6 default gateway",words[-1]
+                pass
+            if gwip == gwip2:
+                mywanip = myip
+                break
+    return mywanip
+
+
+def get_my_wan_ip_linux():
+    routecmd = '/bin/netstat -nr'         
+    ifcmd = '/sbin/ifconfig -a'
+
+    gwif = None
+    gwip = None
+    for line in os.popen(routecmd).readlines():
+        words = line.split()
+        if len(words) >= 3:
+            if words[0] == '0.0.0.0':
+                gwif = words[-1]
+                gwip = words[1]
+                if DEBUG:
+                    print "netstat found default gateway",gwip
+                break
+
+    mywanip = None
+    for line in os.popen(ifcmd).readlines():
+        words = line.split()
+        if len(words) >= 2:
+            if words[0] == gwif:
+                flag = True
+            elif words[0] == 'inet':
+                words2 = words[1].split(':') # "inet addr:130.37.192.1" line
+                if len(words2) == 2:
+                    mywanip = words2[1]
+                    break
+                else:
+                    flag = False
+            else:
+                flag = False
+    return mywanip
+
+
+def get_my_wan_ip_darwin():
+    routecmd = '/usr/sbin/netstat -nr'         
+    ifcmd = '/sbin/ifconfig -a'
+
+    gwif = None
+    gwip = None
+    for line in os.popen(routecmd).readlines():
+        words = line.split()
+        if len(words) >= 3:
+            if words[0] == 'default':
+                gwif = words[-1]
+                gwip = words[1]
+                if DEBUG:
+                    print "netstat found default gateway",gwip
+                break
+
+    mywanip = None
+    flag = False
+    for line in os.popen(ifcmd).readlines():
+        words = line.split()
+        if len(words) >= 2:
+            if words[0] == "%s:" % gwif:
+                flag = True
+            elif words[0] == 'inet' and flag:
+                mywanip = words[1] # "inet 130.37.192.1" line
+                break
+    return mywanip
+
+
+
+if __name__ == "__main__":
+    DEBUG = True
+    ip = get_my_wan_ip()
+    print "External IP address is",ip
diff --git a/instrumentation/next-share/BaseLib/Core/NATFirewall/upnp.py b/instrumentation/next-share/BaseLib/Core/NATFirewall/upnp.py
new file mode 100644 (file)
index 0000000..5eb23aa
--- /dev/null
@@ -0,0 +1,300 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# Platform independent UPnP client
+#
+# References: 
+#   - UPnP Device Architecture 1.0, www.upnp.org
+#   - From Internet Gateway Device IGD V1.0:
+#     * WANIPConnection:1 Service Template Version 1.01
+#
+
+import sys
+import socket
+from cStringIO import StringIO
+import urllib
+import urllib2
+from urlparse import urlparse
+import xml.sax as sax
+from xml.sax.handler import ContentHandler
+from traceback import print_exc
+
+UPNP_WANTED_SERVICETYPES = ['urn:schemas-upnp-org:service:WANIPConnection:1','urn:schemas-upnp-org:service:WANPPPConnection:1']
+
+DEBUG = False
+
+class UPnPPlatformIndependent:
+
+    def __init__(self):
+        # Maps location URL to a dict containing servicetype and control URL
+        self.services = {}
+        self.lastdiscovertime = 0
+
+    def discover(self):
+        """ Attempts to discover any UPnP services for X seconds 
+            If any are found, they are stored in self.services 
+        """
+        #if self.lastdiscovertime != 0 and self.lastdiscovertime + DISCOVER_WAIT < time.time():
+        #    if DEBUG:
+        #        print >> sys.stderr,"upnp: discover: Already did a discovery recently"
+        #    return
+
+        maxwait = 4
+        req = 'M-SEARCH * HTTP/1.1\r\n'
+        req += 'HOST: 239.255.255.250:1900\r\n'
+        req += 'MAN: "ssdp:discover"\r\n'  # double quotes obligatory
+        req += 'MX: '+str(maxwait)+'\r\n'
+        req += 'ST: ssdp:all\r\n'          # no double quotes
+        req += '\r\n\r\n'
+
+        try:
+            self.s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
+            self.s.settimeout(maxwait+2.0)
+            self.s.sendto(req,('239.255.255.250',1900))
+            while True: # exited by socket.timeout exception only
+                if DEBUG:
+                    print >> sys.stderr,"upnp: discover: Wait 4 reply"
+                (rep,sender) = self.s.recvfrom(1024)
+
+                if DEBUG:
+                    print >> sys.stderr,"upnp: discover: Got reply from",sender
+                    #print >> sys.stderr,"upnp: discover: Saying:",rep
+                repio = StringIO(rep)
+                while True:
+                    line = repio.readline()
+                    #print >> sys.stderr,"LINE",line
+                    if line == '':
+                        break
+                    if line[-2:] == '\r\n':
+                        line = line[:-2]
+                    idx = line.find(':')
+                    if idx == -1:
+                        continue
+                    key = line[:idx]
+                    key = key.lower()
+                    #print >> sys.stderr,"key",key
+                    if key.startswith('location'):
+                        # Careful: MS Internet Connection Sharing returns "Location:http://bla", so no space
+                        location = line[idx+1:].strip() 
+                        desc = self.get_description(location)
+                        self.services[location] = self.parse_services(desc)
+
+        except:
+            if DEBUG:
+                print_exc()
+
+    def found_wanted_services(self):
+        """ Return True if WANIPConnection or WANPPPConnection were found by discover() """
+        for location in self.services:
+            for servicetype in UPNP_WANTED_SERVICETYPES:
+                if self.services[location]['servicetype'] == servicetype:
+                    return True
+        return False
+        
+
+    def add_port_map(self,internalip,port,iproto='TCP'):
+        """ Sends an AddPortMapping request to all relevant IGDs found by discover()
+            
+            Raises UPnPError in case the IGD returned an error reply,
+            Raises Exception in case of any other error
+        """
+        srch = self.do_soap_request('AddPortMapping',port,iproto=iproto,internalip=internalip)
+        if srch is not None:
+            se = srch.get_error()
+            if se is not None:
+                raise se
+
+    def del_port_map(self,port,iproto='TCP'):
+        """ Sends a DeletePortMapping request to all relevant IGDs found by discover()
+
+            Raises UPnPError in case the IGD returned an error reply,
+            Raises Exception in case of any other error
+        """
+        srch = self.do_soap_request('DeletePortMapping',port,iproto=iproto)
+        if srch is not None:
+            se = srch.get_error()
+            if se is not None:
+                raise se
+
+    def get_ext_ip(self):
+        """ Sends a GetExternalIPAddress request to all relevant IGDs  found by discover()
+
+            Raises UPnPError in case the IGD returned an error reply,
+            Raises Exception in case of any other error
+        """
+        srch = self.do_soap_request('GetExternalIPAddress')
+        if srch is not None:
+            se = srch.get_error()
+            if se is not None:
+                raise se
+            else:
+                return srch.get_ext_ip()
+
+    #
+    # Internal methods
+    #
+    def do_soap_request(self,methodname,port=-1,iproto='TCP',internalip=None):
+        for location in self.services:
+            for servicetype in UPNP_WANTED_SERVICETYPES:
+                if self.services[location]['servicetype'] == servicetype:
+                    o = urlparse(location)
+                    endpoint = o[0]+'://'+o[1]+self.services[location]['controlurl']
+                    # test: provoke error
+                    #endpoint = o[0]+'://'+o[1]+'/bla'+self.services[location]['controlurl']
+                    if DEBUG:
+                        print >> sys.stderr,"upnp: "+methodname+": Talking to endpoint ",endpoint
+                    (headers,body) = self.create_soap_request(methodname,port,iproto=iproto,internalip=internalip)
+                    #print body
+                    try:
+                        req = urllib2.Request(url=endpoint,data=body,headers=headers)
+                        f = urllib2.urlopen(req)
+                        resp = f.read()
+                    except urllib2.HTTPError,e:
+                        resp = e.fp.read()
+                        if DEBUG:
+                            print_exc()
+                    srch = SOAPResponseContentHandler(methodname)
+                    if DEBUG:
+                        print >> sys.stderr,"upnp: "+methodname+": response is",resp
+                    try:
+                        srch.parse(resp)
+                    except sax.SAXParseException,e:
+                        # Our test linux-IGD appears to return an incompete
+                        # SOAP error reply. Handle this.
+                        se = srch.get_error()
+                        if se is None:
+                            raise e
+                        # otherwise we were able to parse the error reply
+                    return srch
+
+    def get_description(self,url):
+        if DEBUG:
+            print >> sys.stderr,"upnp: discover: Reading description from",url
+        f = urllib.urlopen(url)
+        data = f.read()
+        #print >> sys.stderr,"upnp: description: Got",data
+        return data
+
+    def parse_services(self,desc):
+        dch = DescriptionContentHandler()
+        dch.parse(desc)
+        return dch.services
+
+    def create_soap_request(self,methodname,port=-1,iproto="TCP",internalip=None):
+        headers = {}
+        #headers['Host'] = endpoint
+        #headers['Accept-Encoding'] = 'identity'
+        headers['Content-type'] = 'text/xml; charset="utf-8"'
+        headers['SOAPAction'] = '"urn:schemas-upnp-org:service:WANIPConnection:1#'+methodname+'"'
+        headers['User-Agent'] = 'Mozilla/4.0 (compatible; UPnP/1.0; Windows 9x)'
+
+        body = ''
+        body += '<?xml version="1.0"?>'
+        body += '<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"'
+        body += ' SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
+        body += '<SOAP-ENV:Body><m:'+methodname+' xmlns:m="urn:schemas-upnp-org:service:WANIPConnection:1">'
+        if methodname == 'AddPortMapping':
+            externalport = port
+            internalport = port
+            internalclient = internalip
+            body += '<NewRemoteHost xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="string"></NewRemoteHost>'
+            body += '<NewExternalPort xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="ui2">'+str(externalport)+'</NewExternalPort>'
+            body += '<NewProtocol xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="string">'+iproto+'</NewProtocol>'
+            body += '<NewInternalPort xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="ui2">'+str(internalport)+'</NewInternalPort>'
+            body += '<NewInternalClient xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="string">'+internalclient+'</NewInternalClient>'
+            body += '<NewEnabled xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="boolean">1</NewEnabled>'
+            body += '<NewPortMappingDescription xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="string">Insert description here</NewPortMappingDescription>'
+            body += '<NewLeaseDuration xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="ui4">0</NewLeaseDuration>'
+        elif methodname == 'DeletePortMapping':
+            externalport = port
+            body += '<NewRemoteHost xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="string"></NewRemoteHost>'
+            body += '<NewExternalPort xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="ui2">'+str(externalport)+'</NewExternalPort>'
+            body += '<NewProtocol xmlns:dt="urn:schemas-microsoft-com:datatypes" dt:dt="string">'+iproto+'</NewProtocol>'
+        body += '</m:'+methodname+'></SOAP-ENV:Body>'
+        body += '</SOAP-ENV:Envelope>'
+        return (headers,body)
+
+
+class UPnPError(Exception):
+    def __init__(self,errorcode,errordesc):
+        Exception.__init__(self)
+        self.errorcode = errorcode
+        self.errordesc = errordesc
+
+    def __str__(self):
+        return 'UPnP Error %d: %s' % (self.errorcode, self.errordesc)
+
+
+#
+# Internal classes
+#
+
+class DescriptionContentHandler(ContentHandler):
+
+    def __init__(self):
+        ContentHandler.__init__(self)
+        self.services = {}
+
+    def parse(self,desc):
+        sax.parseString(desc,self)
+
+    def endDocument(self):
+        if DEBUG:
+            print >> sys.stderr,"upnp: discover: Services found",self.services
+
+    def endElement(self, name):
+        #print >> sys.stderr,"endElement",name
+        n = name.lower()
+        if n == 'servicetype':
+            self.services['servicetype'] = self.content
+        elif n == 'controlurl':
+            self.services['controlurl'] = self.content
+            
+    def characters(self, content):
+        # print >> sys.stderr,"content",content
+        self.content = content
+
+
+class SOAPResponseContentHandler(ContentHandler):
+
+    def __init__(self,methodname):
+        ContentHandler.__init__(self)
+        self.methodname = methodname
+        self.ip = None  
+        self.errorset = False
+        self.errorcode = 0
+        self.errordesc = 'No error'
+        self.content = None
+
+    def parse(self,resp):
+        sax.parseString(resp,self)
+
+    def get_ext_ip(self):
+        return self.ip
+
+    def get_error(self):
+        if self.errorset:
+            return UPnPError(self.errorcode,self.methodname+": "+self.errordesc)
+        else:
+            return None
+
+    def endElement(self, name):
+        n = name.lower()
+        if self.methodname == 'GetExternalIPAddress' and n.endswith('newexternalipaddress'):
+            self.ip = self.content
+        elif n== 'errorcode':
+            self.errorset = True
+            self.errorcode = int(self.content)
+        elif n == 'errordescription':
+            self.errorset = True
+            self.errordesc = self.content
+            
+    def characters(self, content):
+        #print >>sys.stderr,"upnp: GOT CHARACTERS",content
+        self.content = content
+
+if __name__ == '__main__':
+    u = UPnPPlatformIndependent()
+    u.discover()
+    print >> sys.stderr,"IGD say my external IP address is",u.get_ext_ip()
+    #u.add_port_map('130.37.193.64',6881)
diff --git a/instrumentation/next-share/BaseLib/Core/Overlay/MetadataHandler.py b/instrumentation/next-share/BaseLib/Core/Overlay/MetadataHandler.py
new file mode 100644 (file)
index 0000000..b144a0b
--- /dev/null
@@ -0,0 +1,633 @@
+# Written by Jie Yang, Arno Bakker
+# see LICENSE.txt for license information
+import sys
+import os
+from BaseLib.Core.Utilities.Crypto import sha
+from time import time, ctime
+from traceback import print_exc, print_stack
+from sets import Set
+from threading import currentThread
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.Utilities.utilities import isValidInfohash, show_permid_short, sort_dictlist, bin2str, get_collected_torrent_filename
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FOURTH, OLPROTO_VER_ELEVENTH 
+from BaseLib.TrackerChecking.TorrentChecking import TorrentChecking
+from BaseLib.Core.osutils import getfreespace,get_readable_torrent_name
+from BaseLib.Core.CacheDB.CacheDBHandler import BarterCastDBHandler
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import PopularityDBHandler
+from BaseLib.Core.TorrentDef import TorrentDef
+
+DEBUG = False
+
+BARTERCAST_TORRENTS = False
+
+# Python no recursive imports?
+# from overlayswarm import overlay_infohash
+overlay_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+Max_Torrent_Size = 2*1024*1024    # 2MB torrent = 6GB ~ 250GB content
+
+
+class MetadataHandler:
+    
+    __single = None
+    
+    def __init__(self):
+        if MetadataHandler.__single:
+            raise RuntimeError, "MetadataHandler is singleton"
+        MetadataHandler.__single = self
+        self.num_torrents = -100
+        self.avg_torrent_size = 25*(2**10)
+        self.initialized = False
+        self.registered = False
+        self.popularity_db = PopularityDBHandler.getInstance()
+
+
+    def getInstance(*args, **kw):
+        if MetadataHandler.__single is None:
+            MetadataHandler(*args, **kw)
+        return MetadataHandler.__single
+    getInstance = staticmethod(getInstance)
+        
+    def register(self, overlay_bridge, dlhelper, launchmany, config):
+        self.registered = True
+        self.overlay_bridge = overlay_bridge
+        self.dlhelper = dlhelper
+        self.launchmany = launchmany
+        self.torrent_db = launchmany.torrent_db 
+        self.config = config
+        self.min_free_space = self.config['stop_collecting_threshold']*(2**20)
+        #if self.min_free_space <= 0:
+        #    self.min_free_space = 200*(2**20)    # at least 200 MB left on disk
+        self.config_dir = os.path.abspath(self.config['state_dir'])
+        self.torrent_dir = os.path.abspath(self.config['torrent_collecting_dir'])
+        print >>sys.stderr,"metadata: collect dir is",self.torrent_dir 
+        assert os.path.isdir(self.torrent_dir)
+        self.free_space = self.get_free_space()
+        print >> sys.stderr, "Available space for database and collecting torrents: %d MB," % (self.free_space/(2**20)), "Min free space", self.min_free_space/(2**20), "MB"
+        self.max_num_torrents = self.init_max_num_torrents = int(self.config['torrent_collecting_max_torrents'])
+        self.upload_rate = 1024 * int(self.config['torrent_collecting_rate'])   # 5KB/s
+        self.num_collected_torrents = 0
+        self.recently_collected_torrents = [] # list of infohashes
+        self.upload_queue = []
+        self.requested_torrents = Set()
+        self.next_upload_time = 0
+        self.initialized = True
+        self.rquerytorrenthandler = None
+        self.delayed_check_overflow(5)
+
+    def register2(self,rquerytorrenthandler):
+        self.rquerytorrenthandler = rquerytorrenthandler
+
+
+    def handleMessage(self,permid,selversion,message):
+        
+        t = message[0]
+        
+        if t == GET_METADATA:   # the other peer requests a torrent
+            if DEBUG:
+                print >> sys.stderr,"metadata: Got GET_METADATA",len(message),show_permid_short(permid)
+            return self.send_metadata(permid, message, selversion)
+        elif t == METADATA:     # the other peer sends me a torrent
+            if DEBUG:
+                print >> sys.stderr,"metadata: Got METADATA",len(message),show_permid_short(permid),selversion, currentThread().getName()
+            return self.got_metadata(permid, message, selversion)
+        else:
+            if DEBUG:
+                print >> sys.stderr,"metadata: UNKNOWN OVERLAY MESSAGE", ord(t)
+            return False
+
+    def send_metadata_request(self, permid, infohash, selversion=-1, caller="BC"):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if DEBUG:
+            print >> sys.stderr,"metadata: Connect to send GET_METADATA to",show_permid_short(permid)
+        if not isValidInfohash(infohash):
+            return False
+        
+        filename,metadata = self.torrent_exists(infohash)
+        if filename is not None:    # torrent already exists on disk
+            if DEBUG:
+                print >> sys.stderr,"metadata: send_meta_req: Already on disk??!"
+            self.notify_torrent_is_in(infohash, metadata, filename)
+            return True
+        
+        if caller == "dlhelp":
+            self.requested_torrents.add(infohash)
+        
+        if self.min_free_space != 0 and (self.free_space - self.avg_torrent_size < self.min_free_space):   # no space to collect
+            self.free_space = self.get_free_space()
+            if self.free_space - self.avg_torrent_size < self.min_free_space:
+                self.warn_disk_full()
+                return True
+
+        try:
+            # Optimization: don't connect if we're connected, although it won't 
+            # do any harm.
+            if selversion == -1: # not currently connected
+                self.overlay_bridge.connect(permid,lambda e,d,p,s:self.get_metadata_connect_callback(e,d,p,s,infohash))
+            else:
+                self.get_metadata_connect_callback(None,None,permid,selversion,infohash)
+            
+        except:
+            print_exc()
+            return False
+        return True
+
+    def torrent_exists(self, infohash):
+        # if the torrent is already on disk, put it in db
+        
+        file_name = get_collected_torrent_filename(infohash)
+        torrent_path = os.path.join(self.torrent_dir, file_name)
+        if not os.path.exists(torrent_path):
+            return None,None
+        else:
+            metadata = self.read_torrent(torrent_path)
+            if not self.valid_metadata(infohash, metadata):
+                return None
+            self.addTorrentToDB(torrent_path, infohash, metadata, source="BC", extra_info={})
+            return file_name, metadata
+
+    def get_metadata_connect_callback(self,exc,dns,permid,selversion,infohash):
+        if exc is None:
+            if DEBUG:
+                print >> sys.stderr,"metadata: Sending GET_METADATA to",show_permid_short(permid)
+            ## Create metadata_request according to protocol version
+            try:
+                metadata_request = bencode(infohash)
+                self.overlay_bridge.send(permid, GET_METADATA + metadata_request,self.get_metadata_send_callback)
+                self.requested_torrents.add(infohash)
+            except:
+                print_exc()
+        elif DEBUG:
+            print >> sys.stderr,"metadata: GET_METADATA: error connecting to",show_permid_short(permid)
+
+    def get_metadata_send_callback(self,exc,permid):
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: error sending to",show_permid_short(permid),exc
+            pass
+        else:
+            pass
+        
+    def send_metadata(self, permid, message, selversion):
+        try:
+            infohash = bdecode(message[1:])
+        except:
+            print_exc()
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: error becoding"
+            return False
+        if not isValidInfohash(infohash):
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: invalid hash"
+            return False
+
+        # TODO:
+        res = self.torrent_db.getOne(('torrent_file_name', 'status_id'), infohash=bin2str(infohash))
+        if not res:
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: not in database", infohash
+            return True    # don't close connection because I don't have the torrent
+        torrent_file_name, status_id = res
+        if status_id == self.torrent_db._getStatusID('dead'):
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: Torrent was dead"
+            return True
+        if not torrent_file_name:
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: no torrent file name"
+            return True
+        torrent_path = os.path.join(self.torrent_dir, torrent_file_name)
+        if not os.path.isfile(torrent_path):
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: not existing", res, torrent_path
+            return True
+        
+        task = {'permid':permid, 'infohash':infohash, 'torrent_path':torrent_path, 'selversion':selversion}
+        self.upload_queue.append(task)
+        if int(time()) >= self.next_upload_time:
+            self.checking_upload_queue()
+        
+        return True
+
+    def read_and_send_metadata(self, permid, infohash, torrent_path, selversion):
+        torrent_data = self.read_torrent(torrent_path)
+        if torrent_data:
+            # Arno: Don't send private torrents
+            try:
+                metainfo = bdecode(torrent_data)
+                if 'info' in metainfo and 'private' in metainfo['info'] and metainfo['info']['private']:
+                    if DEBUG:
+                        print >> sys.stderr,"metadata: Not sending torrent", `torrent_path`,"because it is private"
+                    return 0
+            except:
+                print_exc()
+                return 0
+            
+
+            if DEBUG:
+                print >> sys.stderr,"metadata: sending torrent", `torrent_path`, len(torrent_data)
+                
+            torrent = {}
+            torrent['torrent_hash'] = infohash
+            # P2PURLs: If URL compat then send URL
+            tdef = TorrentDef.load_from_dict(metainfo)
+            if selversion >= OLPROTO_VER_ELEVENTH and tdef.get_url_compat():
+                torrent['metatype'] = URL_MIME_TYPE
+                torrent['metadata'] = tdef.get_url()
+            else:
+                torrent['metatype'] = TSTREAM_MIME_TYPE
+                torrent['metadata'] = torrent_data
+                
+            if selversion >= OLPROTO_VER_FOURTH:
+                data = self.torrent_db.getTorrent(infohash)
+                if data is None:
+                    # DB inconsistency
+                    return 0
+                nleechers = data.get('leecher', -1)
+                nseeders = data.get('seeder', -1)
+                last_check_ago = int(time()) - data.get('last_check_time', 0)    # relative time
+                if last_check_ago < 0:
+                    last_check_ago = 0
+                status = data.get('status', 'unknown')
+                
+                torrent.update({'leecher':nleechers,
+                                'seeder':nseeders,
+                                'last_check_time':last_check_ago,
+                                'status':status})
+
+
+            return self.do_send_metadata(permid, torrent, selversion)
+        else:    # deleted before sending it
+            self.torrent_db.deleteTorrent(infohash, delete_file=True, commit=True)
+            if DEBUG:
+                print >> sys.stderr,"metadata: GET_METADATA: no torrent data to send"
+            return 0
+
+    def do_send_metadata(self, permid, torrent, selversion):
+        metadata_request = bencode(torrent)
+        if DEBUG:
+            print >> sys.stderr,"metadata: send metadata", len(metadata_request)
+        ## Optimization: we know we're currently connected
+        self.overlay_bridge.send(permid,METADATA + metadata_request,self.metadata_send_callback)
+        
+        # BarterCast: add bytes of torrent to BarterCastDB
+        # Save exchanged KBs in BarterCastDB
+        if permid != None and BARTERCAST_TORRENTS:
+            self.overlay_bridge.add_task(lambda:self.olthread_bartercast_torrentexchange(permid, 'uploaded'), 0)
+        
+        return len(metadata_request)
+     
+    def olthread_bartercast_torrentexchange(self, permid, up_or_down):
+        
+        if up_or_down != 'uploaded' and up_or_down != 'downloaded':
+            return
+        
+        bartercastdb = BarterCastDBHandler.getInstance()
+        
+        torrent_kb = float(self.avg_torrent_size) / 1024
+        name = bartercastdb.getName(permid)
+        my_permid = bartercastdb.my_permid
+
+        if DEBUG:
+            print >> sys.stderr, "bartercast: Torrent (%d KB) %s to/from peer %s" % (torrent_kb, up_or_down, `name`)
+
+        if torrent_kb > 0:
+            bartercastdb.incrementItem((my_permid, permid), up_or_down, torrent_kb)
+                
+
+    def metadata_send_callback(self,exc,permid):
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"metadata: METADATA: error sending to",show_permid_short(permid),exc
+            pass
+
+    def read_torrent(self, torrent_path):
+        try:
+            f = open(torrent_path, "rb")
+            torrent_data = f.read()
+            f.close()
+            torrent_size = len(torrent_data)
+            if DEBUG:
+                print >> sys.stderr,"metadata: read torrent", `torrent_path`, torrent_size
+            if torrent_size > Max_Torrent_Size:
+                return None
+            return torrent_data
+        except:
+            print_exc()
+            return None
+
+
+    def addTorrentToDB(self, filename, torrent_hash, metadata, source='BC', extra_info={}, hack=False):
+        """ Arno: no need to delegate to olbridge, this is already run by OverlayThread """
+        # 03/02/10 Boudewijn: addExternalTorrent now requires a
+        # torrentdef, consequently we provide the filename through the
+        # extra_info dictionary
+        torrentdef = TorrentDef.load(filename)
+        if not 'filename' in extra_info:
+            extra_info['filename'] = filename
+        torrent = self.torrent_db.addExternalTorrent(torrentdef, source, extra_info)
+        if torrent is None:
+            return
+
+        # Arno, 2008-10-20: XXX torrents are filtered out in the final display stage
+        self.launchmany.set_activity(NTFY_ACT_GOT_METADATA,unicode('"'+torrent['name']+'"'),torrent['category'])
+
+        if self.initialized:
+            self.num_torrents += 1 # for free disk limitation
+        
+            if not extra_info:
+                self.refreshTrackerStatus(torrent)
+            
+            if len(self.recently_collected_torrents) < 50:    # Queue of 50
+                self.recently_collected_torrents.append(torrent_hash)
+            else:
+                self.recently_collected_torrents.pop(0)
+                self.recently_collected_torrents.append(torrent_hash)
+        
+
+    def set_overflow(self, max_num_torrent):
+        self.max_num_torrents = self.init_max_num_torrents = max_num_torrent
+        
+    def delayed_check_overflow(self, delay=2):
+        if not self.initialized:
+            return
+        self.overlay_bridge.add_task(self.check_overflow, delay)
+        
+    def delayed_check_free_space(self, delay=2):
+        self.free_space = self.get_free_space()
+        
+    def check_overflow(self):    # check if there are too many torrents relative to the free disk space
+        if self.num_torrents < 0:
+            self.num_torrents = self.torrent_db.getNumberCollectedTorrents()
+            #print >> sys.stderr, "**** torrent collectin self.num_torrents=", self.num_torrents
+
+        if DEBUG:
+            print >>sys.stderr,"metadata: check overflow: current", self.num_torrents, "max", self.max_num_torrents
+        
+        if self.num_torrents > self.max_num_torrents:
+            num_delete = int(self.num_torrents - self.max_num_torrents*0.95)
+            print >> sys.stderr, "** limit space::", self.num_torrents, self.max_num_torrents, num_delete
+            self.limit_space(num_delete)
+            
+    def limit_space(self, num_delete):
+        deleted = self.torrent_db.freeSpace(num_delete)
+        if deleted:
+            self.num_torrents = self.torrent_db.getNumberCollectedTorrents()
+            self.free_space = self.get_free_space()
+        
+        
+    def save_torrent(self, infohash, metadata, source='BC', extra_info={}):
+        # check if disk is full before save it to disk and database
+        if not self.initialized:
+            return None
+
+        self.check_overflow()
+            
+        if self.min_free_space != 0 and (self.free_space - len(metadata) < self.min_free_space or self.num_collected_torrents % 10 == 0):
+            self.free_space = self.get_free_space()
+            if self.free_space - len(metadata) < self.min_free_space:
+                self.warn_disk_full()
+                return None
+        
+        file_name = get_collected_torrent_filename(infohash)
+        if DEBUG:
+            print >> sys.stderr,"metadata: Storing torrent", sha(infohash).hexdigest(),"in",file_name
+        
+        save_path = self.write_torrent(metadata, self.torrent_dir, file_name)
+        if save_path:
+            self.num_collected_torrents += 1
+            self.free_space -= len(metadata)
+            self.addTorrentToDB(save_path, infohash, metadata, source=source, extra_info=extra_info)
+            # check if space is enough and remove old torrents
+            
+        return file_name
+        
+        
+    def refreshTrackerStatus(self, torrent):
+        "Upon the reception of a new discovered torrent, directly check its tracker"
+        if DEBUG:
+            print >> sys.stderr, "metadata: checking tracker status of new torrent"
+        check = TorrentChecking(torrent['infohash'])
+        check.start()
+        
+    def write_torrent(self, metadata, dir, name):
+        try:
+            if not os.access(dir,os.F_OK):
+                os.mkdir(dir)
+            save_path = os.path.join(dir, name)
+            file = open(save_path, 'wb')
+            file.write(metadata)
+            file.close()
+            if DEBUG:
+                print >> sys.stderr,"metadata: write torrent", `save_path`, len(metadata), hash(metadata)
+            return save_path
+        except:
+            print_exc()
+            print >> sys.stderr, "metadata: write torrent failed"
+            return None
+
+    def valid_metadata(self, infohash, metadata):
+        try:
+            metainfo = bdecode(metadata)
+            tdef = TorrentDef.load_from_dict(metainfo)
+            got_infohash = tdef.get_infohash()
+            if infohash != got_infohash:
+                print >> sys.stderr, "metadata: infohash doesn't match the torrent " + \
+                "hash. Required: " + `infohash` + ", but got: " + `got_infohash`
+                return False
+            return True
+        except:
+            print_exc()
+            #print >> sys.stderr, "problem metadata:", repr(metadata)
+            return False
+        
+    def got_metadata(self, permid, message, selversion):    
+        """ receive torrent file from others """
+        
+        # Arno, 2007-06-20: Disabled the following code. What's this? Somebody sends 
+        # us something and we refuse? Also doesn't take into account download help 
+        #and remote-query extension.
+        
+        #if self.upload_rate <= 0:    # if no upload, no download, that's the game
+        #    return True    # don't close connection
+        
+        try:
+            message = bdecode(message[1:])
+        except:
+            print_exc()
+            return False
+        if not isinstance(message, dict):
+            return False
+        try:
+            infohash = message['torrent_hash']
+            if not isValidInfohash(infohash):
+                # 19/02/10 Boudewijn: isValidInfohash either returns
+                # True or raises a ValueError. So this part of the
+                # code will never be reached...
+                return False
+            
+            assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+            assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+            
+            #print >>sys.stderr,"metadata: got_metadata: hexinfohash: get_collected_torrent_filename(infohash)
+
+            if not infohash in self.requested_torrents:    # got a torrent which was not requested
+                return True
+            if self.torrent_db.hasMetaData(infohash):
+                return True
+
+            # P2PURL
+            goturl = False
+            if selversion >= OLPROTO_VER_ELEVENTH:
+                if 'metatype' in message and message['metatype'] == URL_MIME_TYPE:
+                    try:
+                        tdef = TorrentDef.load_from_url(message['metadata'])
+                        # Internal storage format is still .torrent file
+                        metainfo = tdef.get_metainfo()
+                        metadata = bencode(metainfo)
+                        goturl = True
+                    except:
+                        print_exc()
+                        return False
+                else:
+                    metadata = message['metadata']
+            else:
+                metadata = message['metadata']
+                    
+            if not self.valid_metadata(infohash, metadata):
+                return False
+            
+            if DEBUG:
+                torrent_size = len(metadata)
+                if goturl:
+                    mdt = "URL"
+                else:
+                    mdt = "torrent" 
+                print >> sys.stderr,"metadata: Recvd",mdt,`infohash`,sha(infohash).hexdigest(), torrent_size
+            
+            extra_info = {}
+            if selversion >= OLPROTO_VER_FOURTH:
+                try:
+                    extra_info = {'leecher': message.get('leecher', -1),
+                              'seeder': message.get('seeder', -1),
+                              'last_check_time': message.get('last_check_time', -1),
+                              'status':message.get('status', 'unknown')}
+                except Exception, msg:
+                    print_exc()
+                    print >> sys.stderr, "metadata: wrong extra info in msg - ", message
+                    extra_info = {}
+                
+            filename = self.save_torrent(infohash, metadata, extra_info=extra_info)
+            self.requested_torrents.remove(infohash)
+            
+            #if DEBUG:
+            #    print >>sys.stderr,"metadata: Was I asked to dlhelp someone",self.dlhelper
+
+            if filename is not None:
+                self.notify_torrent_is_in(infohash,metadata,filename)
+            
+            
+            # BarterCast: add bytes of torrent to BarterCastDB
+            # Save exchanged KBs in BarterCastDB
+            if permid is not None and BARTERCAST_TORRENTS:
+                self.overlay_bridge.add_task(lambda:self.olthread_bartercast_torrentexchange(permid, 'downloaded'), 0)
+                
+                
+        except Exception, e:
+            print_exc()
+            print >> sys.stderr,"metadata: Received metadata is broken",e, message.keys()
+            return False
+        
+        return True
+
+    def notify_torrent_is_in(self,infohash,metadata,filename):
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        if self.dlhelper is not None:
+            self.dlhelper.metadatahandler_received_torrent(infohash, metadata)
+        if self.rquerytorrenthandler is not None:
+            self.rquerytorrenthandler.metadatahandler_got_torrent(infohash,metadata,filename)
+        
+    def get_num_torrents(self):
+        return self.num_torrents
+    
+    def warn_disk_full(self):
+        if DEBUG:
+            print >> sys.stderr,"metadata: send_meta_req: Disk full!"
+        drive,dir = os.path.splitdrive(os.path.abspath(self.torrent_dir))
+        if not drive:
+            drive = dir
+        self.launchmany.set_activity(NTFY_ACT_DISK_FULL, drive)
+        
+    def get_free_space(self):
+        if not self.registered:
+            return 0
+        try:
+            freespace = getfreespace(self.torrent_dir)
+            return freespace
+        except:
+            print >> sys.stderr, "meta: cannot get free space of", self.torrent_dir
+            print_exc()
+            return 0
+
+    def set_rate(self, rate):
+        self.upload_rate = rate * 1024
+        
+    def set_min_free_space(self, min_free_space):
+        self.min_free_space = min_free_space*(2**20)
+
+    def checking_upload_queue(self):
+        """ check the upload queue every 5 seconds, and send torrent out if the queue 
+            is not empty and the max upload rate is not reached.
+            It is used for rate control
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "metadata: checking_upload_queue, length:", len(self.upload_queue), "now:", ctime(time()), "next check:", ctime(self.next_upload_time)
+        if self.upload_rate > 0 and int(time()) >= self.next_upload_time and len(self.upload_queue) > 0:
+            task = self.upload_queue.pop(0)
+            permid = task['permid']
+            infohash = task['infohash']
+            torrent_path = task['torrent_path']
+            selversion = task['selversion']
+            sent_size = self.read_and_send_metadata(permid, infohash, torrent_path, selversion)
+            idel = sent_size / self.upload_rate + 1
+            self.next_upload_time = int(time()) + idel
+            self.overlay_bridge.add_task(self.checking_upload_queue, idel)
+
+    def getRecentlyCollectedTorrents(self, num, selversion):
+        """
+        This method returns a list of collected torrents. It is called by the 
+        method hat creates BC message.
+        @change: changed by Rahim. Since overlay version 10, the returned list should contain the swarm size info for the torrents.
+        @param num: Maximum length of result list. If num=0 it means that the returned list is unlimited.
+        @param selversion: Version of the overlay protocol that two communication nodes agreed on.
+        """
+        if selversion >= OLPROTO_VER_ELEVENTH: ## Amended list with swarm size info is returned. 
+            if not self.initialized:
+                return []
+            else: 
+                collectedList=self.recently_collected_torrents[-1*num:] # this is list of infohashes
+                if len(collectedList) >0:
+                    swarmSizeList= self.popularity_db.calculateSwarmSize(collectedList, content='Infohash' , toBC=True)
+                for index in range(0,len(collectedList)):
+                    collectedList[index]=[collectedList[index]]
+                    collectedList[index].append(swarmSizeList[index][1]) # appends number of seeders
+                    collectedList[index].append(swarmSizeList[index][2]) # appends number of leechers
+                    collectedList[index].append(swarmSizeList[index][3]) # appends current time 
+                    collectedList[index].append(swarmSizeList[index][4]) # appends 
+                return collectedList;
+                
+        else:
+            if not self.initialized:
+                return []
+            return self.recently_collected_torrents[-1*num:]    # get the last ones
+
+            
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/Overlay/OverlayApps.py b/instrumentation/next-share/BaseLib/Core/Overlay/OverlayApps.py
new file mode 100644 (file)
index 0000000..af46bf7
--- /dev/null
@@ -0,0 +1,367 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# All applications on top of the SecureOverlay should be started here.
+#
+from MetadataHandler import MetadataHandler
+from threading import Lock
+from threading import currentThread
+from time import time
+from traceback import print_exc
+import sys
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BuddyCast.buddycast import BuddyCastFactory
+from BaseLib.Core.ProxyService.CoordinatorMessageHandler import CoordinatorMessageHandler
+from BaseLib.Core.ProxyService.HelperMessageHandler import HelperMessageHandler
+from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+from BaseLib.Core.NATFirewall.NatCheckMsgHandler import NatCheckMsgHandler
+from BaseLib.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler 
+from BaseLib.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler
+from BaseLib.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler
+from BaseLib.Core.SocialNetwork.SocialNetworkMsgHandler import SocialNetworkMsgHandler
+from BaseLib.Core.Statistics.Crawler import Crawler
+from BaseLib.Core.Statistics.DatabaseCrawler import DatabaseCrawler
+from BaseLib.Core.Statistics.FriendshipCrawler import FriendshipCrawler
+from BaseLib.Core.Statistics.SeedingStatsCrawler import SeedingStatsCrawler
+from BaseLib.Core.Statistics.VideoPlaybackCrawler import VideoPlaybackCrawler
+from BaseLib.Core.Statistics.RepexCrawler import RepexCrawler
+from BaseLib.Core.Statistics.PunctureCrawler import PunctureCrawler
+from BaseLib.Core.Statistics.ChannelCrawler import ChannelCrawler
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.Subtitles.SubtitlesHandler import SubtitlesHandler
+from BaseLib.Core.Subtitles.SubtitlesSupport import SubtitlesSupport
+from BaseLib.Core.Subtitles.PeerHaveManager import PeersHaveManager
+
+DEBUG = False
+
+class OverlayApps:
+    # Code to make this a singleton
+    __single = None
+
+    def __init__(self):
+        if OverlayApps.__single:
+            raise RuntimeError, "OverlayApps is Singleton"
+        OverlayApps.__single = self 
+        self.coord_handler = None
+        self.help_handler = None
+        self.metadata_handler = None
+        self.buddycast = None
+        self.collect = None
+        self.dialback_handler = None
+        self.socnet_handler = None
+        self.rquery_handler = None
+        self.chquery_handler = None
+        self.friendship_handler = None
+        self.msg_handlers = {}
+        self.connection_handlers = []
+        self.text_mode = None
+        self.requestPolicyLock = Lock()
+        
+    def getInstance(*args, **kw):
+        if OverlayApps.__single is None:
+            OverlayApps(*args, **kw)
+        return OverlayApps.__single
+    getInstance = staticmethod(getInstance)
+
+    def register(self, overlay_bridge, session, launchmany, config, requestPolicy):
+        self.overlay_bridge = overlay_bridge
+        self.launchmany = launchmany
+        self.requestPolicy = requestPolicy
+        self.text_mode = config.has_key('text_mode')
+        
+        # OverlayApps gets all messages, and demultiplexes 
+        overlay_bridge.register_recv_callback(self.handleMessage)
+        overlay_bridge.register_conns_callback(self.handleConnection)
+
+        # Arno, 2010-01-28: Start with crawler support, other mods depend on
+        # that, e.g. BuddyCast
+        i_am_crawler = False
+        if config['crawler']:
+            crawler = Crawler.get_instance(session)
+            self.register_msg_handler([CRAWLER_REQUEST], crawler.handle_request)
+
+            database_crawler = DatabaseCrawler.get_instance()
+            crawler.register_message_handler(CRAWLER_DATABASE_QUERY, database_crawler.handle_crawler_request, database_crawler.handle_crawler_reply)
+            seeding_stats_crawler = SeedingStatsCrawler.get_instance()
+            crawler.register_message_handler(CRAWLER_SEEDINGSTATS_QUERY, seeding_stats_crawler.handle_crawler_request, seeding_stats_crawler.handle_crawler_reply)
+            friendship_crawler = FriendshipCrawler.get_instance(session)
+            crawler.register_message_handler(CRAWLER_FRIENDSHIP_STATS, friendship_crawler.handle_crawler_request, friendship_crawler.handle_crawler_reply)
+            natcheck_handler = NatCheckMsgHandler.getInstance()
+            natcheck_handler.register(launchmany)
+            crawler.register_message_handler(CRAWLER_NATCHECK, natcheck_handler.gotDoNatCheckMessage, natcheck_handler.gotNatCheckReplyMessage)
+            crawler.register_message_handler(CRAWLER_NATTRAVERSAL, natcheck_handler.gotUdpConnectRequest, natcheck_handler.gotUdpConnectReply)
+            videoplayback_crawler = VideoPlaybackCrawler.get_instance()
+            crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, videoplayback_crawler.handle_event_crawler_request, videoplayback_crawler.handle_event_crawler_reply)
+            crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, videoplayback_crawler.handle_info_crawler_request, videoplayback_crawler.handle_info_crawler_reply)
+            repex_crawler = RepexCrawler.get_instance(session)
+            crawler.register_message_handler(CRAWLER_REPEX_QUERY, repex_crawler.handle_crawler_request, repex_crawler.handle_crawler_reply)
+            puncture_crawler = PunctureCrawler.get_instance()
+            crawler.register_message_handler(CRAWLER_PUNCTURE_QUERY, puncture_crawler.handle_crawler_request, puncture_crawler.handle_crawler_reply)
+            channel_crawler = ChannelCrawler.get_instance()
+            crawler.register_message_handler(CRAWLER_CHANNEL_QUERY, channel_crawler.handle_crawler_request, channel_crawler.handle_crawler_reply)
+
+            if crawler.am_crawler():
+                i_am_crawler = True
+                # we will only accept CRAWLER_REPLY messages when we are actully a crawler
+                self.register_msg_handler([CRAWLER_REPLY], crawler.handle_reply)
+                self.register_connection_handler(crawler.handle_connection)
+
+                if "database" in sys.argv:
+                    # allows access to tribler database (boudewijn)
+                    crawler.register_crawl_initiator(database_crawler.query_initiator)
+
+                if "videoplayback" in sys.argv:
+                    # allows access to video-playback statistics (boudewijn)
+                    crawler.register_crawl_initiator(videoplayback_crawler.query_initiator)
+
+                if "seedingstats" in sys.argv:
+                    # allows access to seeding statistics (Boxun)
+                    crawler.register_crawl_initiator(seeding_stats_crawler.query_initiator, frequency=60*30)
+
+                if "friendship" in sys.argv:
+                    # allows access to friendship statistics (Ali)
+                    crawler.register_crawl_initiator(friendship_crawler.query_initiator)
+
+                if "natcheck" in sys.argv:
+                    # allows access to nat-check statistics (Lucia)
+                    crawler.register_crawl_initiator(natcheck_handler.doNatCheck, 3600)
+                
+                if "repex" in sys.argv:
+                    # allows access to RePEX log statistics (Raynor Vliegendhart)
+                    crawler.register_crawl_initiator(repex_crawler.query_initiator)
+
+                if "puncture" in sys.argv:
+                    # allows access to UDPPuncture log statistics (Gertjan)
+                    crawler.register_crawl_initiator(puncture_crawler.query_initiator)
+                
+                if "channel" in sys.argv:
+                    # allows access to tribler channels' database (nitin)
+                    crawler.register_crawl_initiator(channel_crawler.query_initiator)
+        else:
+            self.register_msg_handler([CRAWLER_REQUEST, CRAWLER_REPLY], self.handleDisabledMessage)
+
+
+        # Create handler for metadata messages in two parts, as 
+        # download help needs to know the metadata_handler and we need
+        # to know the download helper handler.
+        # Part 1:
+        self.metadata_handler = MetadataHandler.getInstance()
+
+        if config['download_help']:
+            # Create handler for messages to dlhelp coordinator
+            self.coord_handler = CoordinatorMessageHandler(launchmany)
+            self.register_msg_handler(HelpHelperMessages, self.coord_handler.handleMessage)
+
+            # Create handler for messages to dlhelp helper
+            self.help_handler = HelperMessageHandler()
+            self.help_handler.register(session,self.metadata_handler,config['download_help_dir'],config.get('coopdlconfig', False))
+            self.register_msg_handler(HelpCoordinatorMessages, self.help_handler.handleMessage)
+
+        # Part 2:
+        self.metadata_handler.register(overlay_bridge, self.help_handler, launchmany, config)
+        self.register_msg_handler(MetadataMessages, self.metadata_handler.handleMessage)
+        
+        
+        # 13-04-2010 Andrea: subtitles collecting
+        if not config['subtitles_collecting'] : 
+            self.subtitles_handler = None
+        else:
+            self.subtitles_handler = SubtitlesHandler.getInstance()
+            self.subtitles_handler.register(self.overlay_bridge, self.launchmany.richmetadataDbHandler, self.launchmany.session)
+            
+            self.peersHaveManger = PeersHaveManager.getInstance()
+            if not self.peersHaveManger.isRegistered():
+                self.peersHaveManger.register(self.launchmany.richmetadataDbHandler, self.overlay_bridge)
+            # I'm not sure if this is the best place to init this
+            self.subtitle_support = SubtitlesSupport.getInstance()
+                                                           
+            keypair = self.launchmany.session.keypair
+            permid = self.launchmany.session.get_permid()
+            self.subtitle_support._register(self.launchmany.richmetadataDbHandler,
+                                           self.subtitles_handler, 
+                                           self.launchmany.channelcast_db, permid, 
+                                           keypair, self.peersHaveManger,
+                                           self.overlay_bridge)
+            
+            # cleanup the subtitles database at the first launch  
+            self.subtitle_support.runDBConsinstencyRoutine()
+            
+        
+        
+        if not config['torrent_collecting']:
+            self.torrent_collecting_solution = 0
+        else:
+            self.torrent_collecting_solution = config['buddycast_collecting_solution']
+        
+        if config['buddycast']:
+            # Create handler for Buddycast messages
+            
+            self.buddycast = BuddyCastFactory.getInstance(superpeer=config['superpeer'], log=config['overlay_log'])
+            # Using buddycast to handle torrent collecting since they are dependent
+            self.buddycast.register(overlay_bridge, launchmany, 
+                                    launchmany.rawserver_fatalerrorfunc,
+                                    self.metadata_handler, 
+                                    self.torrent_collecting_solution,
+                                    config['start_recommender'],config['buddycast_max_peers'],i_am_crawler)
+            
+            self.register_msg_handler(BuddyCastMessages, self.buddycast.handleMessage)
+            self.register_connection_handler(self.buddycast.handleConnection)
+
+        if config['dialback']:
+            self.dialback_handler = DialbackMsgHandler.getInstance()
+            # The Dialback mechanism needs the real rawserver, not the overlay_bridge
+            self.dialback_handler.register(overlay_bridge, launchmany, launchmany.rawserver, config)
+            self.register_msg_handler([DIALBACK_REQUEST],
+                                      self.dialback_handler.olthread_handleSecOverlayMessage)
+            self.register_connection_handler(self.dialback_handler.olthread_handleSecOverlayConnection)
+        else:
+            self.register_msg_handler([DIALBACK_REQUEST], self.handleDisabledMessage)
+
+        if config['socnet']:
+            self.socnet_handler = SocialNetworkMsgHandler.getInstance()
+            self.socnet_handler.register(overlay_bridge, launchmany, config)
+            self.register_msg_handler(SocialNetworkMessages,self.socnet_handler.handleMessage)
+            self.register_connection_handler(self.socnet_handler.handleConnection)
+
+            self.friendship_handler = FriendshipMsgHandler.getInstance()
+            self.friendship_handler.register(overlay_bridge, launchmany.session)
+            self.register_msg_handler(FriendshipMessages,self.friendship_handler.handleMessage)
+            self.register_connection_handler(self.friendship_handler.handleConnection)
+
+        if config['rquery']:
+            self.rquery_handler = RemoteQueryMsgHandler.getInstance()
+            self.rquery_handler.register(overlay_bridge,launchmany,config,self.buddycast,log=config['overlay_log'])
+            self.register_msg_handler(RemoteQueryMessages,self.rquery_handler.handleMessage)
+            self.register_connection_handler(self.rquery_handler.handleConnection)
+        
+        if config['subtitles_collecting']:
+            hndl = self.subtitles_handler.getMessageHandler()
+            self.register_msg_handler(SubtitleMessages, hndl)
+        
+        self.rtorrent_handler = RemoteTorrentHandler.getInstance()
+        self.rtorrent_handler.register(overlay_bridge,self.metadata_handler,session)
+        self.metadata_handler.register2(self.rtorrent_handler)
+
+        # Add notifier as connection handler
+        self.register_connection_handler(self.notifier_handles_connection)
+        
+        if config['buddycast']:
+            # Arno: to prevent concurrency between mainthread and overlay
+            # thread where BuddyCast schedules tasks
+            self.buddycast.register2()
+    
+    def early_shutdown(self):
+        """ Called as soon as Session shutdown is initiated. Used to start
+        shutdown tasks that takes some time and that can run in parallel
+        to checkpointing, etc.
+        """
+        # Called by OverlayThread
+        if self.friendship_handler is not None:
+            self.friendship_handler.shutdown()
+            
+        
+    def register_msg_handler(self, ids, handler):
+        """ 
+        ids is the [ID1, ID2, ..] where IDn is a sort of message ID in overlay
+        swarm. Each ID can only be handled by one handler, but a handler can 
+        handle multiple IDs
+        """
+        for id in ids:
+            if DEBUG:
+                print >> sys.stderr,"olapps: Message handler registered for",getMessageName(id)
+            self.msg_handlers[id] = handler
+
+    def register_connection_handler(self, handler):
+        """
+            Register a handler for if a connection is established
+            handler-function is called like:
+            handler(exc,permid,selversion,locally_initiated)
+        """
+        assert handler not in self.connection_handlers, 'This connection_handler is already registered'
+        if DEBUG:
+            print >> sys.stderr, "olapps: Connection handler registered for", handler
+        self.connection_handlers.append(handler)
+
+    def handleMessage(self,permid,selversion,message):
+        """ demultiplex message stream to handlers """
+        
+        # Check auth
+        if not self.requestAllowed(permid, message[0]):
+            if DEBUG:
+                print >> sys.stderr, "olapps: Message not allowed", getMessageName(message[0])
+            return False
+
+        if message[0] in self.msg_handlers:
+            # This is a one byte id. (For instance a regular
+            # BitTorrent message)
+            id_ = message[0]
+        else:
+            if DEBUG:
+                print >> sys.stderr, "olapps: No handler found for", getMessageName(message[0:2])
+            return False
+
+        if DEBUG:
+            print >> sys.stderr, "olapps: handleMessage", getMessageName(id_), "v" + str(selversion)
+
+        try:
+            if DEBUG:
+                st = time()
+                ret = self.msg_handlers[id_](permid, selversion, message)
+                et = time()
+                diff = et - st
+                if diff > 0:
+                    print >> sys.stderr,"olapps: ",getMessageName(id_),"returned",ret,"TOOK %.5f" % diff
+                return ret
+            else:
+                return self.msg_handlers[id_](permid, selversion, message)
+        except:
+            # Catch all
+            print_exc()
+            return False
+
+    def handleDisabledMessage(self, *args):
+        return True
+
+    def handleConnection(self,exc,permid,selversion,locally_initiated):
+        """ An overlay-connection was established. Notify interested parties. """
+
+        if DEBUG:
+            print >> sys.stderr,"olapps: handleConnection",exc,selversion,locally_initiated,currentThread().getName()
+
+        for handler in self.connection_handlers:
+            try:
+                #if DEBUG:
+                #    print >> sys.stderr,"olapps: calling connection handler:",'%s.%s' % (handler.__module__, handler.__name__)
+                handler(exc,permid,selversion,locally_initiated)
+            except:
+                print >> sys.stderr, 'olapps: Exception during connection handler calling'
+                print_exc()
+    
+    def requestAllowed(self, permid, messageType):
+        self.requestPolicyLock.acquire()
+        try:
+            rp = self.requestPolicy
+        finally:
+            self.requestPolicyLock.release()
+        allowed = rp.allowed(permid, messageType)
+        if DEBUG:
+            if allowed:
+                word = 'allowed'
+            else:
+                word = 'denied'
+            print >> sys.stderr, 'olapps: Request type %s from %s was %s' % (getMessageName(messageType), show_permid_short(permid), word)
+        return allowed
+    
+    def setRequestPolicy(self, requestPolicy):
+        self.requestPolicyLock.acquire()
+        try:
+            self.requestPolicy = requestPolicy
+        finally:
+            self.requestPolicyLock.release()
+        
+    
+    def notifier_handles_connection(self, exc,permid,selversion,locally_initiated):
+        # Notify interested parties (that use the notifier/observer structure) about a connection
+        self.launchmany.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, True)
diff --git a/instrumentation/next-share/BaseLib/Core/Overlay/OverlayThreadingBridge.py b/instrumentation/next-share/BaseLib/Core/Overlay/OverlayThreadingBridge.py
new file mode 100644 (file)
index 0000000..672b266
--- /dev/null
@@ -0,0 +1,228 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# This class bridges between the OverlayApps class and the SecureOverlay
+# and ensures that all upcalls made by the NetworkThread via the SecureOverlay
+# are handed over to a different thread, the OverlayThread that propagates the
+# upcall to the OverlayApps.
+# 
+
+import sys
+from threading import currentThread
+from traceback import print_exc
+
+from BaseLib.Core.Overlay.SecureOverlay import CloseException
+from BaseLib.Core.BitTornado.BT1.MessageID import getMessageName
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Utilities.TimedTaskQueue import TimedTaskQueue
+import threading
+
+DEBUG = False
+
+class OverlayThreadingBridge:
+
+    __single = None
+    lock = threading.Lock()
+
+    def __init__(self):
+        if OverlayThreadingBridge.__single:
+            raise RuntimeError, "OverlayThreadingBridge is Singleton"
+        OverlayThreadingBridge.__single = self 
+        
+        self.secover = None
+        self.olapps = None
+        self.olappsmsghandler = None
+        self.olappsconnhandler = None
+
+        # Current impl of wrapper: single thread
+        self.tqueue = TimedTaskQueue(nameprefix="Overlay")
+
+    def getInstance(*args, **kw):
+        # Singleton pattern with double-checking
+        if OverlayThreadingBridge.__single is None:
+            OverlayThreadingBridge.lock.acquire()   
+            try:
+                if OverlayThreadingBridge.__single is None:
+                    OverlayThreadingBridge(*args, **kw)
+            finally:
+                OverlayThreadingBridge.lock.release()
+        return OverlayThreadingBridge.__single
+    getInstance = staticmethod(getInstance)
+
+    def resetSingleton(self):
+        """ For testing purposes """
+        OverlayThreadingBridge.__single = None 
+
+    def register_bridge(self,secover,olapps):
+        """ Called by MainThread """
+        self.secover = secover
+        self.olapps = olapps
+        
+        secover.register_recv_callback(self.handleMessage)
+        secover.register_conns_callback(self.handleConnection)
+
+    #
+    # SecOverlay interface
+    #
+    def register(self,launchmanycore,max_len):
+        """ Called by MainThread """
+        self.secover.register(launchmanycore,max_len)
+
+        # FOR TESTING ONLY
+        self.iplport2oc = self.secover.iplport2oc
+
+    def get_handler(self):
+        return self.secover
+
+    def start_listening(self):
+        """ Called by MainThread """
+        self.secover.start_listening()
+
+    def register_recv_callback(self,callback):
+        """ Called by MainThread """
+        self.olappsmsghandler = callback
+
+    def register_conns_callback(self,callback):
+        """ Called by MainThread """
+        self.olappsconnhandler = callback
+
+    def handleConnection(self,exc,permid,selversion,locally_initiated,hisdns):
+        """ Called by NetworkThread """
+        # called by SecureOverlay.got_auth_connection() or cleanup_admin_and_callbacks()
+        if DEBUG:
+            print >>sys.stderr,"olbridge: handleConnection",exc,show_permid_short(permid),selversion,locally_initiated,hisdns,currentThread().getName()
+        
+        def olbridge_handle_conn_func():
+            # Called by OverlayThread
+
+            if DEBUG:
+                print >>sys.stderr,"olbridge: handle_conn_func",exc,show_permid_short(permid),selversion,locally_initiated,hisdns,currentThread().getName()
+             
+            try:
+                if hisdns:
+                    self.secover.add_peer_to_db(permid,hisdns,selversion)
+                    
+                if self.olappsconnhandler is not None:    # self.olappsconnhandler = OverlayApps.handleConnection 
+                    self.olappsconnhandler(exc,permid,selversion,locally_initiated)
+            except:
+                print_exc()
+                
+            if isinstance(exc,CloseException):
+                self.secover.update_peer_status(permid,exc.was_auth_done())
+                
+        self.tqueue.add_task(olbridge_handle_conn_func,0)
+        
+    def handleMessage(self,permid,selversion,message):
+        """ Called by NetworkThread """
+        #ProxyService_
+        #
+        # DEBUG
+        #print "### olbridge: handleMessage", show_permid_short(permid), selversion, getMessageName(message[0]), currentThread().getName()
+        #
+        #_ProxyService
+        
+        if DEBUG:
+            print >>sys.stderr,"olbridge: handleMessage",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName()
+        
+        def olbridge_handle_msg_func():
+            # Called by OverlayThread
+            
+            if DEBUG:
+                print >>sys.stderr,"olbridge: handle_msg_func",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName()
+             
+            try:
+                if self.olappsmsghandler is None:
+                    ret = True
+                else:
+                    ret = self.olappsmsghandler(permid,selversion,message)
+            except:
+                print_exc()
+                ret = False
+            if ret == False:
+                if DEBUG:
+                    print >>sys.stderr,"olbridge: olbridge_handle_msg_func closing!",show_permid_short(permid),selversion,getMessageName(message[0]),currentThread().getName()
+                self.close(permid)
+                
+        self.tqueue.add_task(olbridge_handle_msg_func,0)
+        return True
+
+
+    def connect_dns(self,dns,callback):
+        """ Called by OverlayThread/NetworkThread """
+        
+        if DEBUG:
+            print >>sys.stderr,"olbridge: connect_dns",dns
+        
+        def olbridge_connect_dns_callback(cexc,cdns,cpermid,cselver):
+            # Called by network thread
+
+            if DEBUG:
+                print >>sys.stderr,"olbridge: connect_dns_callback",cexc,cdns,show_permid_short(cpermid),cselver
+             
+            olbridge_connect_dns_callback_lambda = lambda:callback(cexc,cdns,cpermid,cselver)
+            self.add_task(olbridge_connect_dns_callback_lambda,0)
+            
+        self.secover.connect_dns(dns,olbridge_connect_dns_callback)
+
+
+    def connect(self,permid,callback):
+        """ Called by OverlayThread """
+
+        if DEBUG:
+            print >>sys.stderr,"olbridge: connect",show_permid_short(permid), currentThread().getName()
+        
+        def olbridge_connect_callback(cexc,cdns,cpermid,cselver):
+            # Called by network thread
+            
+            if DEBUG:
+                print >>sys.stderr,"olbridge: connect_callback",cexc,cdns,show_permid_short(cpermid),cselver, callback, currentThread().getName()
+
+             
+            olbridge_connect_callback_lambda = lambda:callback(cexc,cdns,cpermid,cselver)
+            # Jie: postpone to call this callback to schedule it after the peer has been added to buddycast connection list
+            # Arno, 2008-09-15: No-no-no
+            self.add_task(olbridge_connect_callback_lambda,0)    
+            
+        self.secover.connect(permid,olbridge_connect_callback)
+
+
+    def send(self,permid,msg,callback):
+        """ Called by OverlayThread """
+
+        if DEBUG:
+            print >>sys.stderr,"olbridge: send",show_permid_short(permid),len(msg)
+
+        def olbridge_send_callback(cexc,cpermid):
+            # Called by network thread
+            
+            if DEBUG:
+                print >>sys.stderr,"olbridge: send_callback",cexc,show_permid_short(cpermid)
+
+             
+            olbridge_send_callback_lambda = lambda:callback(cexc,cpermid)
+            self.add_task(olbridge_send_callback_lambda,0)
+        
+        self.secover.send(permid,msg,olbridge_send_callback)
+
+    def close(self,permid):
+        """ Called by OverlayThread """
+        self.secover.close(permid)
+        
+    def add_task(self,task,t=0,ident=None):
+        """ Called by OverlayThread """
+        self.tqueue.add_task(task,t,ident)
+        
+#===============================================================================
+#    # Jie: according to Arno's suggestion, commit on demand instead of periodically
+#    def periodic_commit(self):
+#        period = 5*60    # commit every 5 min
+#        try:
+#            db = SQLiteCacheDB.getInstance()
+#            db.commit()
+#        except:
+#            period = period*2
+#        self.add_task(self.periodic_commit, period)
+#        
+#===============================================================================
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Core/Overlay/SecureOverlay.py b/instrumentation/next-share/BaseLib/Core/Overlay/SecureOverlay.py
new file mode 100644 (file)
index 0000000..f8014fa
--- /dev/null
@@ -0,0 +1,950 @@
+# Written by Arno Bakker, Bram Cohen, Jie Yang, George Milescu
+# see LICENSE.txt for license information
+#
+# Please apply networking code fixes also to DialbackConnHandler.py
+
+from cStringIO import StringIO
+from struct import pack,unpack
+from threading import currentThread
+from time import time
+from traceback import print_exc,print_stack
+import sys
+
+from BaseLib.Core.BitTornado.BT1.MessageID import protocol_name,option_pattern,getMessageName
+from BaseLib.Core.BitTornado.BT1.convert import tobinary,toint
+from BaseLib.Core.BitTornado.__init__ import createPeerID
+from BaseLib.Core.CacheDB.sqlitecachedb import safe_dict,bin2str
+from BaseLib.Core.Overlay.permid import ChallengeResponse
+from BaseLib.Core.Utilities.utilities import show_permid_short,hostname_or_ip2ip
+from BaseLib.Core.simpledefs import *
+
+DEBUG = False
+
+#
+# Public definitions
+#
+overlay_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+# Overlay-protocol version numbers in use in the wild
+OLPROTO_VER_FIRST  = 1  # Internally used only.
+OLPROTO_VER_SECOND = 2  # First public release, >= 3.3.4
+OLPROTO_VER_THIRD  = 3  # Second public release, >= 3.6.0, Dialback, BuddyCast2
+OLPROTO_VER_FOURTH = 4  # Third public release, >= 3.7.0, BuddyCast3
+OLPROTO_VER_FIFTH = 5   # Fourth public release, >= 4.0.0, SOCIAL_OVERLAP
+OLPROTO_VER_SIXTH = 6   # Fifth public release, >= 4.1.0, extra BC fields, remote query
+OLPROTO_VER_SEVENTH = 7 # Sixth public release, >= 4.5.0, supports CRAWLER_REQUEST and CRAWLER_REPLY messages
+OLPROTO_VER_EIGHTH = 8  # Seventh public release, >= 5.0, supporting BuddyCast with clicklog info.
+OLPROTO_VER_NINETH = 9  # Eighth public release, >= 5.1, additional torrent_size in remote search query reply.
+OLPROTO_VER_TENTH = 10  # Nineth public release, M18, simplified the VOD statistics (this code is not likely to be used in public, but still).
+OLPROTO_VER_ELEVENTH = 11  # Tenth public release, trial M23, swarm size info part of BC message
+OLPROTO_VER_TWELFTH = 12  # 11th public release M24, SIMPLE+METADATA query + ChannelCast BASE64.
+OLPROTO_VER_THIRTEENTH = 13 # 12th public release >= 5.2, ChannelCast binary.
+OLPROTO_VER_FOURTEENTH = 14 # 13th public release >= M30, ProxyService + Subtitle dissemination through ChannelCast + SUBS and GET_SUBS messages 
+
+# Overlay-swarm protocol version numbers
+OLPROTO_VER_CURRENT = OLPROTO_VER_FOURTEENTH
+
+OLPROTO_VER_LOWEST = OLPROTO_VER_SECOND
+SupportedVersions = range(OLPROTO_VER_LOWEST, OLPROTO_VER_CURRENT+1)
+
+#
+# Private definitions
+#
+
+# States for overlay connection
+STATE_INITIAL = 0
+STATE_HS_FULL_WAIT = 1
+STATE_HS_PEERID_WAIT = 2
+STATE_AUTH_WAIT = 3
+STATE_DATA_WAIT = 4
+STATE_CLOSED = 5
+
+# Misc
+EXPIRE_THRESHOLD =      300    # seconds::  keep consistent with sockethandler
+EXPIRE_CHECK_INTERVAL = 60     # seconds
+NO_REMOTE_LISTEN_PORT_KNOWN = -481
+
+
+class SecureOverlay:
+    __single = None
+
+    def __init__(self):
+        if SecureOverlay.__single:
+            raise RuntimeError, "SecureOverlay is Singleton"
+        SecureOverlay.__single = self 
+        self.olproto_ver_current = OLPROTO_VER_CURRENT
+        self.usermsghandler = None
+        self.userconnhandler = None
+        # ARNOCOMMENT: Remove this, DB should be fast enough. Don't want caches allover
+        self.dns = safe_dict()
+
+       
+    #
+    # Interface for upper layer
+    #
+    def getInstance(*args, **kw):
+        if SecureOverlay.__single is None:
+            SecureOverlay(*args, **kw)
+        return SecureOverlay.__single
+    getInstance = staticmethod(getInstance)
+
+    def register(self,launchmanycore, max_len):
+        self.lm = launchmanycore
+        self.rawserver = self.lm.rawserver
+        self.sock_hand = self.rawserver.sockethandler
+        self.multihandler = self.lm.multihandler
+        self.overlay_rawserver = self.multihandler.newRawServer(overlay_infohash, 
+                                                                self.rawserver.doneflag,
+                                                                protocol_name)
+        self.max_len = max_len
+        self.iplport2oc = {}    # (IP,listen port) -> OverlayConnection
+        self.peer_db = self.lm.peer_db
+        self.mykeypair = self.lm.session.keypair
+        self.permid = self.lm.session.get_permid()
+        self.myip = self.lm.get_ext_ip()
+        self.myport = self.lm.session.get_listen_port()
+        self.myid = create_my_peer_id(self.myport)
+
+        # 25/01/10 boudewijn: because there is no 'keep alive' message
+        # the last_activity check is prone to get false positives.
+        # The higher-ups decided that this feature should be removed
+        # entirely.
+        # self.last_activity = time()
+
+    def resetSingleton(self):
+        """ For testing purposes """
+        SecureOverlay.__single = None 
+
+    def start_listening(self):
+        self.overlay_rawserver.start_listening(self)
+        # self.overlay_rawserver.add_task(self.secover_mon_netwact, 2)
+
+    # 25/01/10 boudewijn: because there is no 'keep alive' message the
+    # last_activity check is prone to get false positives.  The
+    # higher-ups decided that this feature should be removed entirely.
+    # def secover_mon_netwact(self):
+    #     """
+    #     periodically notify the network status
+    #     """
+    #     diff = time() - self.last_activity
+    #     if diff > 120 + 1:
+    #         # 120 is set as the check_period for buddycast until a
+    #         # KEEP_ALIVE message is send
+    #         msg = "no network"
+    #     else:
+    #         msg = "network active"
+    #     self.lm.set_activity(NTFY_ACT_ACTIVE, msg, diff)
+    #     self.overlay_rawserver.add_task(self.secover_mon_netwact, 2)
+
+    def connect_dns(self,dns,callback):
+        """ Connects to the indicated endpoint and determines the permid 
+            at that endpoint. Non-blocking. 
+            
+            Pre: "dns" must be an IP address, not a hostname.
+            
+            Network thread calls "callback(exc,dns,permid,selver)" when the connection
+            is established or when an error occurs during connection 
+            establishment. In the former case, exc is None, otherwise
+            it contains an Exception.
+
+            The established connection will auto close after EXPIRE_THRESHOLD
+            seconds of inactivity.
+        """
+        if DEBUG:
+            print >> sys.stderr,"secover: connect_dns",dns
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        task = Task(self._connect_dns,dns,callback)
+        self.rawserver.add_task(task.start, 0)
+
+
+    def connect(self,permid,callback):
+        """ Connects to the indicated permid. Non-blocking.
+            
+            Network thread calls "callback(exc,dns,permid,selver)" when the connection
+            is established or when an error occurs during connection 
+            establishment. In the former case, exc is None, otherwise
+            it contains an Exception.
+
+            The established connection will auto close after EXPIRE_THRESHOLD
+            seconds of inactivity.
+        """
+        if DEBUG:
+            print >> sys.stderr,"secover: connect",show_permid_short(permid), currentThread().getName()
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        
+        dns = self.get_dns_from_peerdb(permid)
+        task = Task(self._connect,permid,dns,callback)
+
+        if DEBUG:
+            print >> sys.stderr,"secover: connect",show_permid_short(permid),"currently at",dns
+        
+        self.rawserver.add_task(task.start, 0)
+
+
+    def send(self,permid,msg,callback):
+        """ Sends a message to the indicated permid. Non-blocking.
+            
+            Pre: connection to permid must have been established successfully.
+
+            Network thread calls "callback(exc,permid)" when the message is sent
+            or when an error occurs during sending. In the former case, exc 
+            is None, otherwise it contains an Exception.
+        """
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        dns = self.get_dns_from_peerdb(permid)
+        task = Task(self._send,permid,dns,msg,callback)
+        self.rawserver.add_task(task.start, 0)
+
+
+
+    def close(self,permid):
+        """ Closes any connection to indicated permid. Non-blocking.
+            
+            Pre: connection to permid must have been established successfully.
+
+            Network thread calls "callback(exc,permid,selver)" when the connection
+            is closed.
+        """
+        # To prevent concurrency problems on sockets the calling thread 
+        # delegates to the network thread.
+        task = Task(self._close,permid)
+        self.rawserver.add_task(task.start, 0)
+
+
+    def register_recv_callback(self,callback):
+        """ Register a callback to be called when receiving a message from 
+            any permid. Non-blocking.
+
+            Network thread calls "callback(exc,permid,selver,msg)" when a message 
+            is received. The callback is not called on errors e.g. remote 
+            connection close.
+            
+            The callback must return True to keep the connection open.
+        """
+        self.usermsghandler = callback
+
+    def register_conns_callback(self,callback):
+        """ Register a callback to be called when receiving a connection from 
+            any permid. Non-blocking.
+
+            Network thread calls "callback(exc,permid,selver,locally_initiated)" 
+            when a connection is established (locally initiated or remote), or
+            when a connection is closed locally or remotely. In the former case, 
+            exc is None, otherwise it contains an Exception.
+
+            Note that this means that if a callback is registered via this method,
+            both this callback and the callback passed to a connect() method 
+            will be called.
+        """
+        self.userconnhandler = callback
+
+
+    #
+    # Internal methods
+    #
+    def _connect_dns(self,dns,callback):
+        try:
+            if DEBUG:
+                print >> sys.stderr,"secover: actual connect_dns",dns
+            if dns[0] == self.myip and int(dns[1]) == self.myport:
+                callback(KeyError('IP and port of the target is the same as myself'),dns,None,0)
+            iplport = ip_and_port2str(dns[0],dns[1])
+            oc = None
+            try:
+                oc = self.iplport2oc[iplport]
+            except KeyError:
+                pass
+            if oc is None:
+                oc = self.start_connection(dns)
+                self.iplport2oc[iplport] = oc
+            if not oc.is_auth_done():
+                oc.queue_callback(dns,callback)
+            else:
+                callback(None,dns,oc.get_auth_permid(),oc.get_sel_proto_ver())
+        except Exception,exc:
+            if DEBUG:
+                print_exc()
+            callback(exc,dns,None,0)
+
+    def _connect(self,expectedpermid,dns,callback):
+        if DEBUG:
+            print >> sys.stderr,"secover: actual connect",show_permid_short(expectedpermid), currentThread().getName()
+        if expectedpermid == self.permid:
+            callback(KeyError('The target permid is the same as my permid'),None,expectedpermid,0)
+        try:
+            oc = self.get_oc_by_permid(expectedpermid)
+            if oc is None:
+                if dns is None:
+                    callback(KeyError('IP address + port for permid unknown'),dns,expectedpermid,0)
+                else:
+                    self._connect_dns(dns,lambda exc,dns2,peerpermid,selver:\
+                          self._whoishe_callback(exc,dns2,peerpermid,selver,expectedpermid,callback))
+            else:
+                # We already have a connection to this permid
+                self._whoishe_callback(None,(oc.get_ip(),oc.get_auth_listen_port()),expectedpermid,oc.get_sel_proto_ver(),expectedpermid,callback)
+        except Exception,exc:
+            if DEBUG:
+                print_exc()
+            callback(exc,None,expectedpermid,0)
+
+    def _whoishe_callback(self,exc,dns,peerpermid,selver,expectedpermid,callback):
+        """ Called by network thread after the permid on the other side is known
+            or an error occured
+        """
+        try:
+            if exc is None:
+                # Connect went OK
+                if peerpermid == expectedpermid:
+                    callback(None,dns,expectedpermid,selver)
+                else:
+                    # Someone else answered the phone
+                    callback(KeyError('Recorded IP address + port now of other permid'),
+                                     dns,expectedpermid,0)
+            else:
+                callback(exc,dns,expectedpermid,0)
+        except Exception,exc:
+            if DEBUG:
+                print_exc()
+            callback(exc,dns,expectedpermid,0)
+
+    def _send(self,permid,dns,message,callback):
+        if DEBUG:
+            print >> sys.stderr,"secover: actual send",getMessageName(message[0]),\
+                        "to",show_permid_short(permid), currentThread().getName()
+        try:
+            if dns is None:
+                callback(KeyError('IP address + port for permid unknown'),permid)
+            else:
+                iplport = ip_and_port2str(dns[0],dns[1])
+                oc = None
+                try:
+                    oc = self.iplport2oc[iplport]
+                except KeyError:
+                    pass
+                if oc is None:
+                    callback(KeyError('Not connected to permid'),permid)
+                elif oc.is_auth_done():
+                    if oc.get_auth_permid() == permid:
+                        oc.send_message(message)
+                        callback(None,permid)
+                    else:
+                        callback(KeyError('Recorded IP address + port now of other permid'),permid)
+                else:
+                    callback(KeyError('Connection not yet established'),permid)
+        except Exception,exc:
+            if DEBUG:
+                print_exc()
+            callback(exc,permid)
+
+
+    def _close(self,permid):
+        if DEBUG:
+            print >> sys.stderr,"secover: actual close",show_permid_short(permid)
+        try:
+            oc = self.get_oc_by_permid(permid)
+            if not oc:
+                if DEBUG:
+                    print >> sys.stderr,"secover: error - actual close, but no connection to peer in admin"
+            else:
+                oc.close()
+        except Exception,e:
+            print_exc()
+
+    #
+    # Interface for SocketHandler
+    #
+    def get_handler(self):
+        return self
+    
+    def external_connection_made(self,singsock):
+        """ incoming connection (never used) """
+        if DEBUG:
+            print >> sys.stderr,"secover: external_connection_made",singsock.get_ip(),singsock.get_port()
+        # self.last_activity = time()
+        oc = OverlayConnection(self,singsock,self.rawserver)
+        singsock.set_handler(oc)
+
+    def connection_flushed(self,singsock):
+        """ sockethandler flushes connection """
+        if DEBUG:
+            print >> sys.stderr,"secover: connection_flushed",singsock.get_ip(),singsock.get_port()
+    
+    #
+    # Interface for ServerPortHandler
+    #
+    def externally_handshaked_connection_made(self, singsock, options, msg_remainder):
+        """ incoming connection, handshake partially read to identity 
+            as an it as overlay connection (used always)
+        """
+        if DEBUG:
+            print >> sys.stderr,"secover: externally_handshaked_connection_made",\
+                singsock.get_ip(),singsock.get_port()
+        oc = OverlayConnection(self,singsock,self.rawserver,ext_handshake = True, options = options)
+        singsock.set_handler(oc)
+        if msg_remainder:
+            oc.data_came_in(singsock,msg_remainder)
+        return True
+
+
+    #
+    # Interface for OverlayConnection
+    #
+    def got_auth_connection(self,oc):
+        """ authentication of peer via identity protocol succesful """
+        if DEBUG:
+            print >> sys.stderr,"secover: got_auth_connection", \
+                show_permid_short(oc.get_auth_permid()),oc.get_ip(),oc.get_auth_listen_port(), currentThread().getName()
+
+        if oc.is_locally_initiated() and oc.get_port() != oc.get_auth_listen_port():
+            if DEBUG:
+                print >> sys.stderr,"secover: got_auth_connection: closing because auth", \
+                    "listen port not as expected",oc.get_port(),oc.get_auth_listen_port()
+            self.cleanup_admin_and_callbacks(oc,Exception('closing because auth listen port not as expected'))
+            return False
+
+        # self.last_activity = time()
+
+        ret = True
+        iplport = ip_and_port2str(oc.get_ip(),oc.get_auth_listen_port())
+        known = iplport in self.iplport2oc
+        if not known:
+            self.iplport2oc[iplport] = oc
+        elif known and not oc.is_locally_initiated():
+            # Locally initiated connections will already be registered,
+            # so if it's not a local connection and we already have one 
+            # we have a duplicate, and we close the new one.
+            if DEBUG:
+                print >> sys.stderr,"secover: got_auth_connection:", \
+                    "closing because we already have a connection to",iplport
+            self.cleanup_admin_and_callbacks(oc,
+                     Exception('closing because we already have a connection to peer'))
+            ret = False
+            
+        if ret:
+            if oc.is_auth_done():
+                hisdns = (oc.get_ip(),oc.get_auth_listen_port())
+            else:
+                hisdns = None
+
+            #if DEBUG:
+            #    print >>sys.stderr,"secover: userconnhandler is",self.userconnhandler
+            
+            if self.userconnhandler is not None:
+                try:
+                    self.userconnhandler(None,oc.get_auth_permid(),oc.get_sel_proto_ver(),oc.is_locally_initiated(),hisdns)
+                except:
+                    # Catch all
+                    print_exc()
+            oc.dequeue_callbacks()
+        return ret
+
+    def local_close(self,oc):
+        """ our side is closing the connection """
+        if DEBUG:
+            print >> sys.stderr,"secover: local_close"
+        self.cleanup_admin_and_callbacks(oc,CloseException('local close',oc.is_auth_done()))
+
+    def connection_lost(self,oc):
+        """ overlay connection telling us to clear admin """
+        if DEBUG:
+            print >> sys.stderr,"secover: connection_lost"
+        self.cleanup_admin_and_callbacks(oc,CloseException('connection lost',oc.is_auth_done()))
+
+
+    def got_message(self,permid,message,selversion):
+        """ received message from authenticated peer, pass to upper layer """
+        if DEBUG:
+            print >> sys.stderr,"secover: got_message",getMessageName(message[0]),\
+                            "v"+str(selversion)
+        # self.last_activity = time()
+        if self.usermsghandler is None:
+            if DEBUG:
+                print >> sys.stderr,"secover: User receive callback not set"
+            return
+        try:
+            
+            #if DEBUG:
+            #    print >>sys.stderr,"secover: usermsghandler is",self.usermsghandler
+            
+            ret = self.usermsghandler(permid,selversion,message)
+            if ret is None:
+                if DEBUG:
+                    print >> sys.stderr,"secover: INTERNAL ERROR:", \
+                        "User receive callback returned None, not True or False"
+                ret = False
+            elif DEBUG:
+                print >> sys.stderr,"secover: message handler returned",ret
+            return ret
+        except:
+            # Catch all
+            print_exc()
+            return False
+
+        
+    def get_max_len(self):
+        return self.max_len
+    
+    def get_my_peer_id(self):
+        return self.myid
+
+    def get_my_keypair(self):
+        return self.mykeypair
+
+    def measurefunc(self,length):
+        pass
+
+    #
+    # Interface for OverlayThreadingBridge
+    #
+    def get_dns_from_peerdb(self,permid,use_cache=True):
+        # Called by any thread, except NetworkThread
+        
+        if currentThread().getName().startswith("NetworkThread"):
+            print >>sys.stderr,"secover: get_dns_from_peerdb: called by NetworkThread!"
+            print_stack()
+        
+        dns = self.dns.get(permid, None)
+
+        if not dns:
+            values = ('ip', 'port')
+            peer = self.peer_db.getOne(values, permid=bin2str(permid))
+            if peer and peer[0] and peer[1]:
+                ip = hostname_or_ip2ip(peer[0])
+                dns = (ip, int(peer[1]))
+        return dns
+    def add_peer_to_db(self,permid,dns,selversion):
+        """ add a connected peer to database """
+        # Called by OverlayThread
+        
+        if currentThread().getName().startswith("NetworkThread"):
+            print >>sys.stderr,"secover: add_peer_to_peerdb: called by NetworkThread!"
+            print_stack()
+        if DEBUG:
+            print >>sys.stderr,"secover: add_peer_to_peerdb: called by",currentThread().getName()
+        
+        self.dns[permid] = dns    # cache it to avoid querying db later
+        now = int(time())
+        peer_data = {'permid':permid, 'ip':dns[0], 'port':dns[1], 'oversion':selversion, 'last_seen':now, 'last_connected':now}
+        self.peer_db.addPeer(permid, peer_data, update_dns=True, update_connected=True, commit=True)
+        #self.peer_db.updateTimes(permid, 'connected_times', 1, commit=True)
+        
+
+    def update_peer_status(self,permid,authwasdone):
+        """ update last_seen and last_connected in peer db when close """
+        # Called by OverlayThread
+        
+        if currentThread().getName().startswith("NetworkThread"):
+            print >>sys.stderr,"secover: update_peer_status: called by NetworkThread!"
+            print_stack()
+        
+        now = int(time())
+        if authwasdone:
+            self.peer_db.updatePeer(permid, last_seen=now, last_connected=now)
+            self.lm.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, False)
+    #
+    # Interface for debugging
+    #
+    def debug_get_live_connections(self):
+        """ return a list of (permid,dns) tuples of the peers with which we 
+            are connected. Like all methods here it must be called by the network thread
+        """
+        live_conn = []
+        for iplport in self.iplport2oc:
+            oc = self.iplport2oc[iplport]
+            if oc:
+                peer_permid = oc.get_auth_permid()
+                if peer_permid:
+                    live_conn.append((peer_permid,(oc.get_ip(),oc.get_port())))
+        return live_conn
+
+
+    #
+    # Internal methods
+    #
+    def start_connection(self,dns):
+        if DEBUG:
+            print >> sys.stderr,"secover: Attempt to connect to",dns
+        singsock = self.sock_hand.start_connection(dns)
+        oc = OverlayConnection(self,singsock,self.rawserver,
+                               locally_initiated=True,specified_dns=dns)
+        singsock.set_handler(oc)
+        return oc
+
+    def cleanup_admin_and_callbacks(self,oc,exc):
+        oc.cleanup_callbacks(exc)
+        self.cleanup_admin(oc)
+        if oc.is_auth_done() and self.userconnhandler is not None:
+            self.userconnhandler(exc,oc.get_auth_permid(),oc.get_sel_proto_ver(),
+                                 oc.is_locally_initiated(),None)
+
+    def cleanup_admin(self,oc):
+        iplports = []
+        d = 0
+        for key in self.iplport2oc.keys():
+            #print "***** iplport2oc:", key, self.iplport2oc[key]
+            if self.iplport2oc[key] == oc:
+                del self.iplport2oc[key]
+                #print "*****!!! del", key, oc
+                d += 1
+        
+    def get_oc_by_permid(self, permid):
+        """ return the OverlayConnection instance given a permid """
+
+        for iplport in self.iplport2oc:
+            oc = self.iplport2oc[iplport]
+            if oc.get_auth_permid() == permid:
+                return oc
+        return None
+
+
+
+class Task:
+    def __init__(self,method,*args, **kwargs):
+        self.method = method
+        self.args = args
+        self.kwargs = kwargs
+
+    def start(self):
+        if DEBUG:
+            print >> sys.stderr,"secover: task: start",self.method
+            #print_stack()
+        self.method(*self.args,**self.kwargs)
+
+    
+class CloseException(Exception):
+    def __init__(self,msg=None,authdone=False):
+        Exception.__init__(self,msg)
+        self.authdone= authdone
+
+    def __str__(self):
+        return str(self.__class__)+': '+Exception.__str__(self)
+
+    def was_auth_done(self):
+        return self.authdone
+    
+
+class OverlayConnection:
+    def __init__(self,handler,singsock,rawserver,locally_initiated = False,
+                 specified_dns = None, ext_handshake = False,options = None):
+        self.handler = handler        
+        self.singsock = singsock # for writing
+        self.rawserver = rawserver
+        self.buffer = StringIO()
+        self.cb_queue = []
+        self.auth_permid = None
+        self.unauth_peer_id = None
+        self.auth_peer_id = None
+        self.auth_listen_port = None
+        self.low_proto_ver = 0
+        self.cur_proto_ver = 0
+        self.sel_proto_ver = 0
+        self.options = None
+        self.locally_initiated = locally_initiated
+        self.specified_dns = specified_dns
+        self.last_use = time()
+
+        self.state = STATE_INITIAL
+        self.write(chr(len(protocol_name)) + protocol_name + 
+                option_pattern + overlay_infohash + self.handler.get_my_peer_id())
+        if ext_handshake:
+            self.state = STATE_HS_PEERID_WAIT
+            self.next_len = 20
+            self.next_func = self.read_peer_id
+            self.set_options(options)
+        else:
+            self.state = STATE_HS_FULL_WAIT
+            self.next_len = 1
+            self.next_func = self.read_header_len
+            
+        # Leave autoclose here instead of SecureOverlay, as that doesn't record
+        # remotely-initiated OverlayConnections before authentication is done.
+        self.rawserver.add_task(self._olconn_auto_close, EXPIRE_CHECK_INTERVAL)
+
+    #
+    # Interface for SocketHandler
+    #
+    def data_came_in(self, singsock, data):
+        """ sockethandler received data """
+        # now we got something we can ask for the peer's real port
+        dummy_port = singsock.get_port(True)
+
+        if DEBUG:
+            print >> sys.stderr,"olconn: data_came_in",singsock.get_ip(),singsock.get_port()
+        self.handler.measurefunc(len(data))
+        self.last_use = time()
+        while 1:
+            if self.state == STATE_CLOSED:
+                return
+            i = self.next_len - self.buffer.tell()
+            if i > len(data):
+                self.buffer.write(data)
+                return
+            self.buffer.write(data[:i])
+            data = data[i:]
+            m = self.buffer.getvalue()
+            self.buffer.reset()
+            self.buffer.truncate()
+            try:
+                if DEBUG:
+                    print >> sys.stderr,"olconn: Trying to read",self.next_len #,"using",self.next_func
+                x = self.next_func(m)
+            except:
+                self.next_len, self.next_func = 1, self.read_dead
+                if DEBUG:
+                    print_exc()
+                raise
+            if x is None:
+                if DEBUG:
+                    print >> sys.stderr,"olconn: next_func returned None",self.next_func
+                self.close()
+                return
+            self.next_len, self.next_func = x
+
+    def connection_lost(self,singsock):
+        """ kernel or socket handler reports connection lost """
+        if DEBUG:
+            print >> sys.stderr,"olconn: connection_lost",singsock.get_ip(),singsock.get_port(),self.state
+        if self.state != STATE_CLOSED:
+            self.state = STATE_CLOSED
+            self.handler.connection_lost(self)
+
+    def connection_flushed(self,singsock):
+        """ sockethandler flushes connection """
+        pass
+
+    # 
+    # Interface for SecureOverlay
+    #
+    def send_message(self,message):
+        self.last_use = time()
+        s = tobinary(len(message))+message
+        self.write(s)
+
+    def is_locally_initiated(self):
+        return self.locally_initiated
+
+    def get_ip(self):
+        return self.singsock.get_ip()
+
+    def get_port(self):
+        return self.singsock.get_port()
+
+    def is_auth_done(self):
+        return self.auth_permid is not None
+
+    def get_auth_permid(self):
+        return self.auth_permid
+
+    def get_auth_listen_port(self):
+        return self.auth_listen_port
+
+    def get_remote_listen_port(self):
+        if self.is_auth_done():
+            return self.auth_listen_port
+        elif self.is_locally_initiated():
+            return self.specified_dns[1]
+        else:
+            return NO_REMOTE_LISTEN_PORT_KNOWN
+
+    def get_low_proto_ver(self):
+        return self.low_proto_ver
+
+    def get_cur_proto_ver(self):
+        return self.cur_proto_ver
+
+    def get_sel_proto_ver(self):
+        return self.sel_proto_ver
+
+    def queue_callback(self,dns,callback):
+        if callback is not None:
+            self.cb_queue.append(callback)
+
+    def dequeue_callbacks(self):
+        try:
+            permid = self.get_auth_permid()
+            for callback in self.cb_queue:
+                callback(None,self.specified_dns,permid,self.get_sel_proto_ver())
+            self.cb_queue = []
+        except Exception,e:
+            print_exc()
+
+
+    def cleanup_callbacks(self,exc):
+        if DEBUG:
+            print >> sys.stderr,"olconn: cleanup_callbacks: #callbacks is",len(self.cb_queue)
+        try:
+            for callback in self.cb_queue:
+                ## Failure connecting
+                if DEBUG:
+                    print >> sys.stderr,"olconn: cleanup_callbacks: callback is",callback
+                callback(exc,self.specified_dns,self.get_auth_permid(),0)
+        except Exception,e:
+            print_exc()
+
+    #
+    # Interface for ChallengeResponse
+    #
+    def get_unauth_peer_id(self):
+        return self.unauth_peer_id
+
+    def got_auth_connection(self,singsock,permid,peer_id):
+        """ authentication of peer via identity protocol succesful """
+        self.auth_permid = str(permid)
+        self.auth_peer_id = peer_id
+        self.auth_listen_port = decode_auth_listen_port(peer_id)
+
+        self.state = STATE_DATA_WAIT
+
+        if not self.handler.got_auth_connection(self):
+            self.close()
+            return
+
+    #
+    # Internal methods
+    #
+    def read_header_len(self, s):
+        if ord(s) != len(protocol_name):
+            return None
+        return len(protocol_name), self.read_header
+
+    def read_header(self, s):
+        if s != protocol_name:
+            return None
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        if DEBUG:
+            print >> sys.stderr,"olconn: Reserved bits:", `s`
+        self.set_options(s)
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if s != overlay_infohash:
+            return None
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        self.unauth_peer_id = s
+        
+        [self.low_proto_ver,self.cur_proto_ver] = get_proto_version_from_peer_id(self.unauth_peer_id)
+        self.sel_proto_ver = select_supported_protoversion(self.low_proto_ver,self.cur_proto_ver)
+        if not self.sel_proto_ver:
+            if DEBUG:
+                print >> sys.stderr,"olconn: We don't support peer's version of the protocol"
+            return None
+        elif DEBUG:
+            print >> sys.stderr,"olconn: Selected protocol version",self.sel_proto_ver
+
+        if self.cur_proto_ver <= 2:
+            # Arno, 2010-02-04: Kick TorrentSwapper clones, still around 
+            print >>sys.stderr,"olconn: Kicking ancient peer",`self.unauth_peer_id`,self.get_ip()
+            return None
+
+        self.state = STATE_AUTH_WAIT
+        self.cr = ChallengeResponse(self.handler.get_my_keypair(),self.handler.get_my_peer_id(),self)
+        if self.locally_initiated:
+            self.cr.start_cr(self)
+        return 4, self.read_len
+    
+
+    def read_len(self, s):
+        l = toint(s)
+        if l > self.handler.get_max_len():
+            return None
+        return l, self.read_message
+
+    def read_message(self, s):
+        if s != '':
+            if self.state == STATE_AUTH_WAIT:
+                if not self.cr.got_message(self,s):
+                    return None
+            elif self.state == STATE_DATA_WAIT:
+                if not self.handler.got_message(self.auth_permid,s,self.sel_proto_ver):
+                    return None
+            else:
+                if DEBUG:
+                    print >> sys.stderr,"olconn: Received message while in illegal state, internal error!"
+                return None
+        return 4, self.read_len
+
+    def read_dead(self, s):
+        return None
+
+    def write(self,s):
+        self.singsock.write(s)
+
+    def set_options(self,options):
+        self.options = options
+
+    def close(self):
+        if DEBUG:
+            print >> sys.stderr,"olconn: we close()",self.get_ip(),self.get_port()
+            #print_stack()
+        self.state_when_error = self.state
+        if self.state != STATE_CLOSED:
+            self.state = STATE_CLOSED
+            self.handler.local_close(self)
+            self.singsock.close()
+        return
+
+    def _olconn_auto_close(self):
+        if (time() - self.last_use) > EXPIRE_THRESHOLD:
+            self.close()
+        else:
+            self.rawserver.add_task(self._olconn_auto_close, EXPIRE_CHECK_INTERVAL)
+
+
+#
+# Internal functions
+#
+def create_my_peer_id(my_listen_port):
+    myid = createPeerID()
+    myid = myid[:16] + pack('<H', OLPROTO_VER_LOWEST) + pack('<H', OLPROTO_VER_CURRENT)
+    myid = myid[:14] + pack('<H', my_listen_port) + myid[16:]
+    return myid
+
+def get_proto_version_from_peer_id(peerid):
+    """ overlay swarm versioning solution- use last 4 bytes in PeerID """
+
+    low_ver_str = peerid[16:18]
+    cur_ver_str = peerid[18:20]
+    low_ver = unpack('<H', low_ver_str)[0]
+    cur_ver = unpack('<H', cur_ver_str)[0]
+    return [low_ver,cur_ver]
+
+def is_proto_version_supported(low_ver,cur_ver):
+    if cur_ver != OLPROTO_VER_CURRENT:
+        if low_ver > OLPROTO_VER_CURRENT:    # the other's version is too high
+            return False
+        if cur_ver < OLPROTO_VER_LOWEST:     # the other's version is too low
+            return False           
+        if cur_ver < OLPROTO_VER_CURRENT and \
+           cur_ver not in SupportedVersions:   # the other's version is not supported
+            return False
+    return True
+
+def select_supported_protoversion(his_low_ver,his_cur_ver):
+    selected = None
+    if his_cur_ver != OLPROTO_VER_CURRENT:
+        if his_low_ver > OLPROTO_VER_CURRENT:    # the other's low version is too high
+            return selected
+        if his_cur_ver < OLPROTO_VER_LOWEST:     # the other's current version is too low
+            return selected        
+        if his_cur_ver < OLPROTO_VER_CURRENT and \
+           his_cur_ver not in SupportedVersions:   # the other's current version is not supported (peer of this version is abondoned)
+            return selected
+        
+    selected = min(his_cur_ver,OLPROTO_VER_CURRENT)
+    return selected
+
+def decode_auth_listen_port(peerid):
+    bin = peerid[14:16]
+    tup = unpack('<H', bin)
+    return tup[0]
+
+def ip_and_port2str(ip,port):
+    return ip+':'+str(port)
diff --git a/instrumentation/next-share/BaseLib/Core/Overlay/__init__.py b/instrumentation/next-share/BaseLib/Core/Overlay/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/Overlay/permid.py b/instrumentation/next-share/BaseLib/Core/Overlay/permid.py
new file mode 100644 (file)
index 0000000..be4a2fc
--- /dev/null
@@ -0,0 +1,410 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+from BaseLib.Core.Utilities.Crypto import sha
+from base64 import encodestring
+from copy import deepcopy
+import traceback,os
+
+from M2Crypto import Rand,EC
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+DEBUG = False
+
+# Internal constants
+keypair_ecc_curve = EC.NID_sect233k1
+num_random_bits = 1024*8 # bits
+
+# Protocol states
+STATE_INITIAL = 0
+STATE_AWAIT_R1 = 1
+STATE_AWAIT_R2 = 2
+STATE_AUTHENTICATED = 3
+STATE_FAILED = 4
+
+# Exported functions
+def init():
+    Rand.rand_seed(os.urandom(num_random_bits/8))
+
+def exit():
+    pass
+
+def generate_keypair():
+    ec_keypair=EC.gen_params(keypair_ecc_curve)
+    ec_keypair.gen_key()
+    return ec_keypair
+
+def read_keypair(keypairfilename):
+    return EC.load_key(keypairfilename)
+
+def read_pub_key(pubfilename):
+    return EC.load_pub_key(pubfilename)
+
+def save_keypair(keypair,keypairfilename):
+    keypair.save_key(keypairfilename, None)    
+
+def save_pub_key(keypair,pubkeyfilename):
+    keypair.save_pub_key(pubkeyfilename)    
+
+
+# def show_permid(permid):
+# See Tribler/utilities.py
+
+def permid_for_user(permid):
+    # Full BASE64-encoded 
+    return encodestring(permid).replace("\n","")
+
+# For convenience
+def sign_data(plaintext,ec_keypair):
+    digest = sha(plaintext).digest()
+    return ec_keypair.sign_dsa_asn1(digest)
+
+def verify_data(plaintext,permid,blob):
+    pubkey = EC.pub_key_from_der(permid)
+    digest = sha(plaintext).digest()
+    return pubkey.verify_dsa_asn1(digest,blob)
+
+def verify_data_pubkeyobj(plaintext,pubkey,blob):
+    digest = sha(plaintext).digest()
+    return pubkey.verify_dsa_asn1(digest,blob)
+
+
+# Internal functions
+
+#
+# The following methods and ChallengeResponse class implement a
+# Challenge/Response identification protocol, notably the
+# ISO/IEC 9798-3 protocol, as described in $10.3.3 (ii) (2) of the 
+# ``Handbook of Applied Cryptography''by  Alfred J. Menezes et al.
+#
+
+def generate_challenge():
+    randomB = Rand.rand_bytes(num_random_bits/8)
+    return [randomB,bencode(randomB)]
+
+def check_challenge(cdata):
+    try:
+        randomB = bdecode(cdata)
+    except:
+        return None
+    if len(randomB) != num_random_bits/8:
+        return None
+    else:
+        return randomB
+
+def generate_response1(randomB,peeridB,keypairA):
+    randomA = Rand.rand_bytes(num_random_bits/8)
+    response1 = {}
+    response1['certA'] = str(keypairA.pub().get_der())
+    response1['rA'] = randomA
+    response1['B'] = peeridB
+    response1['SA'] = sign_response(randomA,randomB,peeridB,keypairA)
+    return [randomA,bencode(response1)]
+
+def check_response1(rdata1,randomB,peeridB):
+    try:
+        response1 = bdecode(rdata1)
+    except:
+        return [None,None]
+    if response1['B'] != peeridB:
+        return [None,None]
+    pubA_der = response1['certA']
+    pubA = EC.pub_key_from_der(pubA_der)
+    sigA = response1['SA']
+    randomA = response1['rA']
+    if verify_response(randomA,randomB,peeridB,pubA,sigA):
+        return [randomA,pubA]
+    else:
+        return [None,None]
+    
+def generate_response2(randomA,peeridA,randomB,keypairB):
+    response2 = {}
+    response2['certB'] = str(keypairB.pub().get_der())
+    response2['A'] = peeridA
+    response2['SB'] = sign_response(randomB,randomA,peeridA,keypairB)
+    return bencode(response2)
+
+def check_response2(rdata2,randomA,peeridA,randomB,peeridB):
+    try:
+        response2 = bdecode(rdata2)
+    except:
+        return None
+    if response2['A'] != peeridA:
+        return None
+    pubB_der = response2['certB']
+    pubB = EC.pub_key_from_der(pubB_der)
+    sigB = response2['SB']
+    if verify_response(randomB,randomA,peeridA,pubB,sigB):
+        return pubB
+    else:
+        return None
+
+def sign_response(randomA,randomB,peeridB,keypairA):
+    list = [ randomA, randomB, peeridB ]
+    blist = bencode(list)
+    digest = sha(blist).digest()
+    blob = keypairA.sign_dsa_asn1(digest)
+    return blob
+
+def verify_response(randomA,randomB,peeridB,pubA,sigA):
+    list = [ randomA, randomB, peeridB ]
+    blist = bencode(list)
+    digest = sha(blist).digest()
+    return pubA.verify_dsa_asn1(digest,sigA)
+    
+
+# External functions
+
+def create_torrent_signature(metainfo,keypairfilename):
+    keypair = EC.load_key(keypairfilename)
+    bmetainfo = bencode(metainfo)
+    digester = sha(bmetainfo[:])
+    digest = digester.digest()
+    sigstr = keypair.sign_dsa_asn1(digest)
+    metainfo['signature'] = sigstr
+    metainfo['signer'] = str(keypair.pub().get_der())
+    
+def verify_torrent_signature(metainfo):
+    r = deepcopy(metainfo)
+    signature = r['signature']
+    signer = r['signer']
+    del r['signature']
+    del r['signer']
+    bmetainfo = bencode(r)
+    digester = sha(bmetainfo[:])
+    digest = digester.digest()
+    return do_verify_torrent_signature(digest,signature,signer)
+
+
+# Internal
+
+def do_verify_torrent_signature(digest,sigstr,permid):
+    if permid is None:
+        return False
+    try:
+        ecpub = EC.pub_key_from_der(permid)
+        if ecpub is None:
+            return False
+        intret = ecpub.verify_dsa_asn1(digest,sigstr)
+        return intret == 1
+    except Exception, e:
+        print >> sys.stderr,"permid: Exception in verify_torrent_signature:",str(e) 
+        return False
+
+
+# Exported classes
+class PermIDException(Exception): pass
+
+class ChallengeResponse:
+    """ Exchange Challenge/Response via Overlay Swarm """
+
+    def __init__(self, my_keypair, my_id, secure_overlay):
+        self.my_keypair = my_keypair
+        self.permid = str(my_keypair.pub().get_der())
+        self.my_id = my_id
+        self.secure_overlay = secure_overlay
+
+        self.my_random = None
+        self.peer_id = None
+        self.peer_random = None
+        self.peer_pub = None
+        self.state = STATE_INITIAL
+        # Calculate message limits:
+        [dummy_random,cdata] = generate_challenge()
+        [dummy_random1,rdata1] = generate_response1(dummy_random,my_id,self.my_keypair)
+        rdata2 = generate_response2(dummy_random,my_id,dummy_random,self.my_keypair)
+        self.minchal = 1+len(cdata) # 1+ = message type
+        self.minr1 = 1+len(rdata1) - 1 # Arno: hack, also here, just to be on the safe side
+        self.minr2 = 1+len(rdata2) - 1 # Arno: hack, sometimes the official minimum is too big
+
+    def starting_party(self,locally_initiated):
+        if self.state == STATE_INITIAL and locally_initiated:
+            self.state = STATE_AWAIT_R1
+            return True
+        else:
+            return False
+
+    def create_challenge(self):
+        [self.my_random,cdata] = generate_challenge()
+        return cdata
+
+    def got_challenge_event(self,cdata,peer_id):
+        if self.state != STATE_INITIAL:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr, "Got unexpected CHALLENGE message"
+            raise PermIDException
+        self.peer_random = check_challenge(cdata)
+        if self.peer_random is None:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr,"Got bad CHALLENGE message"
+            raise PermIDException
+        self.peer_id = peer_id
+        [self.my_random,rdata1] = generate_response1(self.peer_random,peer_id,self.my_keypair)
+        self.state = STATE_AWAIT_R2
+        return rdata1
+
+    def got_response1_event(self,rdata1,peer_id):
+        if self.state != STATE_AWAIT_R1:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr,"Got unexpected RESPONSE1 message"
+            raise PermIDException
+        [randomA,peer_pub] = check_response1(rdata1,self.my_random,self.my_id)
+        
+        if randomA is None or peer_pub is None:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr,"Got bad RESPONSE1 message"
+            raise PermIDException
+        
+        # avoid being connected by myself
+        peer_permid = str(peer_pub.get_der())
+        if self.permid == peer_permid:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr,"Got the same Permid as myself"
+            raise PermIDException
+        
+        self.peer_id = peer_id
+        self.peer_random = randomA
+        self.peer_pub = peer_pub
+        self.set_peer_authenticated()
+        rdata2 = generate_response2(self.peer_random,self.peer_id,self.my_random,self.my_keypair)
+        return rdata2
+
+    def got_response2_event(self,rdata2):
+        if self.state != STATE_AWAIT_R2:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr,"Got unexpected RESPONSE2 message"
+            raise PermIDException
+        self.peer_pub = check_response2(rdata2,self.my_random,self.my_id,self.peer_random,self.peer_id)
+        if self.peer_pub is None:
+            self.state = STATE_FAILED
+            if DEBUG:
+                print >> sys.stderr,"Got bad RESPONSE2 message, authentication failed."
+            raise PermIDException
+        else:
+            # avoid being connected by myself
+            peer_permid = str(self.peer_pub.get_der())
+            if self.permid == peer_permid:
+                self.state = STATE_FAILED
+                if DEBUG:
+                    print >> sys.stderr,"Got the same Permid as myself"
+                raise PermIDException
+            else:
+                self.set_peer_authenticated()
+
+    def set_peer_authenticated(self):
+        if DEBUG:
+            print >> sys.stderr,"permid: Challenge response succesful!"
+        self.state = STATE_AUTHENTICATED
+
+    def get_peer_authenticated(self):
+        return self.state == STATE_AUTHENTICATED
+    
+    def get_peer_permid(self):
+        if self.state != STATE_AUTHENTICATED:
+            raise PermIDException
+        return self.peer_pub.get_der()
+
+    def get_auth_peer_id(self):
+        if self.state != STATE_AUTHENTICATED:
+            raise PermIDException
+        return self.peer_id
+
+    def get_challenge_minlen(self):
+        return self.minchal
+
+    def get_response1_minlen(self):
+        return self.minr1
+
+    def get_response2_minlen(self):
+        return self.minr2
+
+#---------------------------------------
+
+    def start_cr(self, conn):
+        if not self.get_peer_authenticated() and self.starting_party(conn.is_locally_initiated()):
+            self.send_challenge(conn)
+
+    def send_challenge(self, conn):
+        cdata = self.create_challenge()
+        conn.send_message(CHALLENGE + str(cdata) )
+
+    def got_challenge(self, cdata, conn):
+        rdata1 = self.got_challenge_event(cdata, conn.get_unauth_peer_id())
+        conn.send_message(RESPONSE1 + rdata1)
+
+    def got_response1(self, rdata1, conn):
+        rdata2 = self.got_response1_event(rdata1, conn.get_unauth_peer_id())
+        conn.send_message(RESPONSE2 + rdata2)
+        # get_peer_permid() throws exception if auth has failed
+        self.secure_overlay.got_auth_connection(conn,self.get_peer_permid(),self.get_auth_peer_id())
+     
+    def got_response2(self, rdata2, conn):
+        self.got_response2_event(rdata2)
+        if self.get_peer_authenticated():
+            #conn.send_message('')    # Send KeepAlive message as reply
+            self.secure_overlay.got_auth_connection(conn,self.get_peer_permid(),self.get_auth_peer_id())
+
+
+    def got_message(self, conn, message):
+        """ Handle message for PermID exchange and return if the message is valid """
+        
+        if not conn:
+            return False
+        t = message[0]
+        if message[1:]:
+            msg = message[1:]
+            
+        if t == CHALLENGE:
+            if len(message) < self.get_challenge_minlen():
+                if DEBUG:
+                    print >> sys.stderr,"permid: Close on bad CHALLENGE: msg len",len(message)
+                self.state = STATE_FAILED
+                return False
+            try:
+                self.got_challenge(msg, conn)
+            except Exception,e:
+                if DEBUG:
+                    print >> sys.stderr,"permid: Close on bad CHALLENGE: exception",str(e)
+                    traceback.print_exc()
+                return False
+        elif t == RESPONSE1:
+            if len(message) < self.get_response1_minlen():
+                if DEBUG:
+                    print >> sys.stderr,"permid: Close on bad RESPONSE1: msg len",len(message)
+                self.state = STATE_FAILED
+                return False
+            try:
+                self.got_response1(msg, conn)
+            except Exception,e:
+                if DEBUG:
+                    print >> sys.stderr,"permid: Close on bad RESPONSE1: exception",str(e)
+                    traceback.print_exc()
+                return False
+        elif t == RESPONSE2:
+            if len(message) < self.get_response2_minlen():
+                if DEBUG:
+                    print >> sys.stderr,"permid: Close on bad RESPONSE2: msg len",len(message)
+                self.state = STATE_FAILED
+                return False
+            try:
+                self.got_response2(msg, conn)
+            except Exception,e:
+                if DEBUG:
+                    print >> sys.stderr,"permid: Close on bad RESPONSE2: exception",str(e)
+                    traceback.print_exc()
+                return False
+        else:
+            return False
+        return True
+
+if __name__ == '__main__':
+    init()
+#    ChallengeResponse(None, None)
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/Coordinator.py b/instrumentation/next-share/BaseLib/Core/ProxyService/Coordinator.py
new file mode 100644 (file)
index 0000000..4d0cfa7
--- /dev/null
@@ -0,0 +1,802 @@
+# Written by Pawel Garbacki, Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# TODO: when ASK_FOR_HELP cannot be sent, mark this in the interface
+
+from traceback import print_exc
+import copy
+import sys
+from threading import Lock
+
+from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Core.CacheDB.CacheDBHandler import PeerDBHandler, TorrentDBHandler
+#from BaseLib.Core.Session import Session
+from BaseLib.Core.Overlay.SecureOverlay import OverlayConnection
+from BaseLib.Core.BitTornado.bencode import bencode
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from BaseLib.Core.BitTornado.BT1.MessageID import ASK_FOR_HELP, STOP_HELPING, REQUEST_PIECES, CANCEL_PIECE, JOIN_HELPERS, RESIGN_AS_HELPER, DROPPED_PIECE
+from BaseLib.Core.ProxyService.ProxyServiceUtil import *
+from mailcap import show
+
+# Print debug messages
+DEBUG = False
+# ???
+MAX_ROUNDS = 137
+
+
+class Coordinator:
+
+    def __init__(self, infohash, num_pieces):
+        # Number of pieces in the torrent
+        self.num_pieces = num_pieces 
+        
+        # Vector for reserved-state infromation per piece
+        self.reserved_pieces = [False] * num_pieces
+        # Torrent infohash
+        self.infohash = infohash # readonly so no locking on this
+
+        # List of sent challenges 
+        self.sent_challenges_by_challenge = {}
+        self.sent_challenges_by_permid = {}
+
+        # List of asked helpers 
+        self.asked_helpers_lock = Lock()
+        self.asked_helpers = [] # protected by asked_helpers_lock
+        
+        # List of confirmed helpers 
+        self.confirmed_helpers_lock = Lock()
+        self.confirmed_helpers = [] # protected by confirmed_helpers_lock
+        
+        # Dictionary for keeping evidence of helpers and the pieces requested to them
+        # Key: permid of a helper
+        # Value: list of pieces requested to the helper 
+        self.requested_pieces = {} 
+        
+        # optimization
+        # List of reserved pieces ???
+        self.reserved = []
+        
+        # Tribler overlay warm
+        self.overlay_bridge = OverlayThreadingBridge.getInstance()
+        
+        # BT1Download object
+        self.downloader = None
+
+
+    #
+    # Send messages
+    # 
+
+    #
+    # Interface for Core API. 
+    # 
+    def send_ask_for_help(self, peerList, force = False):
+        """ Asks for help to all the peers in peerList that have not been asked before
+        
+        Called by ask_coopdl_helpers in SingleDownload
+        
+        @param peerList: A list of peer objects for the peers that will be contacted for helping, containing ['permid','ip','port']
+        @param force: If True, all the peers in peerList will be contacted for help, regardless of previous help requests being sent to them 
+        """
+        if DEBUG:
+            for peer in peerList:
+                print >> sys.stderr, "coordinator: i was requested to send help request to", show_permid_short(peer['permid'])
+                
+        try:
+            # List of helpers to be contacted for help
+            newly_asked_helpers = []
+            if force:
+                # Contact all peers for help, regardless of previous help requests being sent to them
+                newly_asked_helpers = peerList
+            else:
+                # TODO: optimize the search below
+                # TODO: if a candidate is in the asked_helpers list, remember the last time it was asked for help
+                # and wait for a timeout before asking it again
+                # Check which of the candidate helpers is already a helper
+                self.confirmed_helpers_lock.acquire()
+                try:
+                    for candidate in peerList:
+                        flag = 0
+                        for confirmed_helper in self.confirmed_helpers:
+                            if self.samePeer(candidate,confirmed_helper):
+                                # the candidate is already a helper 
+                                flag = 1
+                                break
+                            
+                        if flag == 0:
+                            # candidate has never been asked for help
+                            newly_asked_helpers.append(candidate)
+                            # Extend the list of asked helpers
+                            # The list is extended and not appended because the candidate might already be in
+                            # this list from previous attempts to contact it for helping
+                            self.asked_helpers.append(candidate)
+                finally:
+                    self.confirmed_helpers_lock.release()
+
+            # List of permid's for the peers to be asked for help
+            permidlist = []
+            for peer in newly_asked_helpers:
+                # ???
+                peer['round'] = 0
+                permidlist.append(peer['permid'])
+                
+                # Generate a random challenge - random number on 8 bytes (62**8 possible combinations)
+                challenge = generate_proxy_challenge()
+                
+                # Save permid - challenge pair
+                self.sent_challenges_by_challenge[challenge] = peer['permid']
+                self.sent_challenges_by_permid[peer['permid']] = challenge
+                
+            # Send the help request
+            olthread_send_request_help_lambda = lambda:self.olthread_send_ask_for_help(permidlist)
+            self.overlay_bridge.add_task(olthread_send_request_help_lambda,0)
+        except Exception,e:
+            print_exc()
+            print >> sys.stderr, "coordinator: Exception while requesting help",e
+        
+
+    def olthread_send_ask_for_help(self,permidlist):
+        """ Creates a bridge connection for the help request to be sent
+        
+        Called by the overlay thread.
+        
+        @param permidlist: A list of permids for the peers that will be contacted for helping
+        """
+        for permid in permidlist:
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_send_ask_for_help connecting to",show_permid_short(permid)
+            
+            # Connect to the peer designated by permid
+            self.overlay_bridge.connect(permid,self.olthread_ask_for_help_connect_callback)
+
+
+    def olthread_ask_for_help_connect_callback(self,exc,dns,permid,selversion):
+        """ Sends the help request message on the connection with the peer
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the peer that is contacted for helping
+        @param selversion 
+        """
+        if exc is None:
+            # Peer is reachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_ask_for_help_connect_callback sending help request to",show_permid_short(permid)
+            
+            # get the peer challenge
+            challenge = self.sent_challenges_by_permid[permid]
+            
+            # Create message according to protocol version
+            message = ASK_FOR_HELP + self.infohash + bencode(challenge)
+            
+            # Connect using Tribler Ovrlay Swarm
+            self.overlay_bridge.send(permid, message, self.olthread_ask_for_help_send_callback)
+        else:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_ask_for_help_connect_callback: error connecting to",show_permid_short(permid),exc
+            # Remove peer from the list of asked peers
+            self.remove_unreachable_helper(permid)
+
+
+    def olthread_ask_for_help_send_callback(self,exc,permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted for helping
+        """
+        if exc is not None:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_ask_for_help_send_callback: error sending to",show_permid_short(permid),exc
+            # Remove peer from the list of asked peers
+            self.remove_unreachable_helper(permid)
+
+
+    def remove_unreachable_helper(self,permid):
+        """ Remove a peer from the list of asked helpers
+        
+        Called by the overlay thread.
+        
+        @param permid: the permid of the peer to be removed from the list
+        """
+        self.asked_helpers_lock.acquire()
+        try:
+            # Search the peers with permid != from the given permid
+            new_asked_helpers = []
+            for peer in self.asked_helpers:
+                if peer['permid'] != permid:
+                    new_asked_helpers.append(peer)
+            self.asked_helpers = new_asked_helpers
+        except Exception,e:
+            print_exc()
+            print >> sys.stderr, "coordinator: Exception in remove_unreachable_helper",e
+        finally:
+            self.asked_helpers_lock.release()
+
+
+
+
+
+    def send_stop_helping(self,peerList, force = False):
+        """ Asks for all the peers in peerList to stop helping
+        
+        Called by stop_coopdl_helpers in SingleDownload
+        
+        @param peerList: A list of peer objects (containing ['permid','ip','port']) for the peers that will be asked to stop helping
+        @param force: If True, all the peers in peerList will be asked to stop helping for help, regardless of previous help requests being sent to them 
+        """
+        if DEBUG:
+            for peer in peerList:
+                print >> sys.stderr, "coordinator: i was requested to send a stop helping request to", show_permid_short(peer)
+                
+
+        # TODO: optimize the search below
+        try:
+            if force:
+                # Tell all peers in the peerList to stop helping, regardless of previous help requests being sent to them
+                to_stop_helpers = peerList
+            else:
+                # Who in the peerList is actually a helper currently?
+                # List of peers that will be asked to stop helping
+                to_stop_helpers = []
+                
+                
+                # Searchv and update the confirmed_helpers list
+                self.confirmed_helpers_lock.acquire()
+                try:
+                    for candidate in peerList:
+                        # For each candidate
+                        # Search the candidate in the confirmed_helpers list
+                        for confirmed_helper in self.confirmed_helpers:
+                            if self.samePeer(candidate, confirmed_helper):
+                                # candidate was asked for help
+                                to_stop_helpers.append(candidate)
+                                break
+    
+                    # Who of the confirmed helpers gets to stay?
+                    to_keep_helpers = []
+                    for confirmed_helper in self.confirmed_helpers:
+                        flag = 0
+                        for candidate in to_stop_helpers:
+                            if self.samePeer(candidate,confirmed_helper):
+                                # candidate was asked for help
+                                flag = 1
+                                break
+                        if flag == 0:
+                            # candidate was not asked for help
+                            to_keep_helpers.append(confirmed_helper)
+        
+                    # Update confirmed_helpers
+                    self.confirmed_helpers = to_keep_helpers
+                finally:
+                    self.confirmed_helpers_lock.release()
+
+                
+                # Search and update the asked_helpers list
+                self.asked_helpers_lock.acquire()
+                try:
+                    for candidate in peerList:
+                        # Search the candidate in the asked_helpers list
+                        # TODO: if the same helper is both in confirmed_helpers and asked_helepers
+                        # than it will be added twice to the to_stop_helpers list 
+                        for asked_helper in self.asked_helpers:
+                            if self.samePeer(candidate, asked_helper):
+                                # candidate was asked for help
+                                to_stop_helpers.append(candidate)
+                                break
+                    # Who of the confirmed helpers gets to stay?
+                    to_keep_helpers = []
+                    for asked_helper in self.asked_helpers:
+                        flag = 0
+                        for candidate in to_stop_helpers:
+                            if self.samePeer(candidate,asked_helper):
+                                # candidate was asked for help
+                                flag = 1
+                                break
+                        if flag == 0:
+                            # candidate was not asked for help
+                            to_keep_helpers.append(asked_helper)
+        
+                    # Update confirmed_helpers
+                    self.asked_helpers = to_keep_helpers
+                finally:
+                    self.asked_helpers_lock.release()
+
+            # List of permid's for the peers that are asked to stop helping 
+            permidlist = []
+            for peer in to_stop_helpers:
+                permidlist.append(peer['permid'])
+
+            # Ask peers to stop helping
+            olthread_send_stop_help_lambda = lambda:self.olthread_send_stop_help(permidlist)
+            self.overlay_bridge.add_task(olthread_send_stop_help_lambda,0)
+        except Exception,e:
+            print_exc()
+            print >> sys.stderr, "coordinator: Exception in send_stop_helping",e
+
+
+    def olthread_send_stop_help(self,permidlist):
+        """ Creates a bridge connection for the stop helping request to be sent
+        
+        Called by the overlay thread.
+        
+        @param permidlist: list of the peer permid's to be asked to stop helping
+        """
+        for permid in permidlist:
+            if DEBUG:
+                print >> sys.stderr, "coordinator: error connecting to", show_permid_short(permid), "for stopping help"
+            self.overlay_bridge.connect(permid,self.olthread_stop_help_connect_callback)
+
+
+    def olthread_stop_help_connect_callback(self,exc,dns,permid,selversion):
+        """ Sends the help request message on the connection with the peer
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the peer that is contacted to stop helping
+        @param selversion:
+        """
+        if exc is None:
+            # Peer is reachable
+            ## Create message according to protocol version
+            message = STOP_HELPING + self.infohash
+            self.overlay_bridge.send(permid, message, self.olthread_stop_help_send_callback)
+        elif DEBUG:
+            # Peer is not reachable
+            print >> sys.stderr, "coordinator: olthread_stop_help_connect_callback: error connecting to",show_permid_short(permid),exc
+
+
+    def olthread_stop_help_send_callback(self,exc,permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted to stop helping
+        """
+        if exc is not None:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: STOP_HELPING: error sending to",show_permid_short(permid),exc
+
+
+
+
+
+    def send_request_pieces(self, piece, peerid):
+        """ Send messages to helpers to request the pieces in pieceList
+        
+        Called by next() in PiecePicker
+        
+        @param piece: The piece that will be requested to one of the helpers
+        @param peerid: The peerid of the helper that will be requested for the piece
+        """
+        if DEBUG:
+            print >>sys.stderr, "coordinator: send_request_pieces: will send requests for piece", piece
+                
+        try:
+            # Choose one of the confirmed helpers
+            chosen_permid = self.choose_helper(peerid);
+            
+            # Store the helper identification data and the piece requested to it
+            if chosen_permid in self.requested_pieces:
+                # The peer is already in the dictionary: a previous request was sent to it
+                current_requested_pieces = self.requested_pieces.get(chosen_permid)
+                # Check if the piece was not requested before
+                if piece in current_requested_pieces:
+                    # The piece has already been requested to that helper. No re-requests in this version
+                    if DEBUG:
+                        print >> sys.stderr, "coordinator: send_request_pieces: piece", piece, "was already requested to another helper"
+                    return
+                current_requested_pieces.append(piece)
+                self.requested_pieces[chosen_permid] = current_requested_pieces
+            else:
+                # The peer is not in the dictionary: no previous requests were sent to it
+                self.requested_pieces[chosen_permid] = [piece]
+
+            # Sent the request message to the helper
+            olthread_send_request_help_lambda = lambda:self.olthread_send_request_pieces(chosen_permid, piece)
+            self.overlay_bridge.add_task(olthread_send_request_help_lambda,0)
+        except Exception,e:
+            print_exc()
+            print >> sys.stderr, "coordinator: Exception while requesting piece",piece,e
+        
+
+    def olthread_send_request_pieces(self, permid, piece):
+        """ Creates a bridge connection for the piece request message to be sent
+        
+        Called by the overlay thread.
+        
+        @param permid: The permid of the peer that will be contacted
+        @param piece: The piece that will be requested
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: olthread_send_request_pieces connecting to", show_permid_short(permid), "to request piece", piece
+        # Connect to the peer designated by permid
+        olthread_reserve_pieces_connect_callback_lambda = lambda e,d,p,s:self.olthread_request_pieces_connect_callback(e,d,p,s,piece)
+        self.overlay_bridge.connect(permid, olthread_reserve_pieces_connect_callback_lambda)
+
+
+    def olthread_request_pieces_connect_callback(self, exc, dns, permid, selversion, piece):
+        """ Sends the join_helpers message on the connection with the coordinator
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the helper that is requested a piece
+        @param peice: the requested piece 
+        @param selversion:
+        """
+        if exc is None:
+            # Peer is reachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_request_pieces_connect_callback sending help request to", show_permid_short(permid), "for piece", piece
+            
+            # Create message according to protocol version
+            message = REQUEST_PIECES + self.infohash + bencode(piece)
+            
+            # Connect using Tribler Ovrlay Swarm
+            self.overlay_bridge.send(permid, message, self.olthread_request_pieces_send_callback)
+        else:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_request_pieces_connect_callback: error connecting to",show_permid_short(permid),exc
+            # Remove peer from the list of asked peers
+            self.remove_unreachable_helper(permid)
+
+
+    def olthread_request_pieces_send_callback(self,exc,permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted for helping
+        """
+        if exc is not None:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_request_pieces_send_callback: error sending to",show_permid_short(permid),exc
+            # Remove peer from the list of asked peers
+            self.remove_unreachable_helper(permid)
+
+
+    def choose_helper(self, peerid):
+        """ The method returns one of the confirmed helpers, to be contacted for help for a specific piece
+        
+        Called by send_request_pieces
+        @param peerid: The peerid of the helper that will be requested to download a piece
+        @return: the permid of that peer
+        """
+
+        helper_challenge = decode_challenge_from_peerid(peerid)
+        chosen_helper = self.sent_challenges_by_challenge[helper_challenge]
+        
+        # Current proxy selection policy: choose a random helper from the confirmed helper list
+#        chosen_helper = random.choice(self.confirmed_helpers)
+        
+        return chosen_helper
+
+
+
+
+
+    def send_cancel_piece(self, piece):
+        """ Send a cancel message for the specified piece
+        
+        Called by TODO
+        
+        @param piece: The piece that will be canceled to the respective helper
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: i will cancel the request for piece", piece
+            
+        try:
+            # Check if the piece was reserved before
+            all_requested_pieces = self.requested_pieces.values()
+            if piece not in all_requested_pieces:
+                if DEBUG:
+                    print >> sys.stderr, "coordinator: piece", piece, "was not requested to any peer"
+                return
+            
+            # Find the peer that was requested to download the piece
+            for helper in self.requested_pieces.keys():
+                his_pieces = self.requested_pieces[helper]
+                if piece in his_pieces:
+                    if DEBUG:
+                        print >> sys.stderr, "coordinator: canceling piece", piece, "to peer", show_permid_short(helper)
+                    # Sent the cancel message to the helper
+                    olthread_send_cancel_piece_lambda = lambda:self.olthread_send_cancel_piece(chosen_permid, piece)
+                    self.overlay_bridge.add_task(olthread_send_cancel_piece_lambda,0)
+        except Exception,e:
+            print_exc()
+            print >> sys.stderr, "coordinator: Exception while requesting piece",piece,e
+        
+
+    def olthread_send_cancel_piece(self, permid, piece):
+        """ Creates a bridge connection for the piece cancel message to be sent
+        
+        Called by the overlay thread.
+        
+        @param permid: The permid of the peer that will be contacted
+        @param piece: The piece that will be canceled
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: olthread_send_cancel_piece connecting to", show_permid_short(permid), "to cancel piece", piece
+        # Connect to the peer designated by permid
+        self.overlay_bridge.connect(permid, piece, self.olthread_cancel_piece_connect_callback)
+
+
+    def olthread_cancel_piece_connect_callback(self, exc, dns, permid, piece, selversion):
+        """ Sends the cancel piece message on the connection with the peer
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the helper that is requested a piece
+        @param peice: the canceled piece 
+        @param selversion:
+        """
+        if exc is None:
+            # Peer is reachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_cancel_piece_connect_callback sending a cancel request to", show_permid_short(permid), "for piece", piece
+            
+            # Create message according to protocol version
+            message = CANCEL_PIECE + self.infohash + bencode(piece)
+            
+            # Connect using Tribler Ovrlay Swarm
+            self.overlay_bridge.send(permid, message, self.olthread_cancel_piece_send_callback)
+        else:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_cancel_piece_connect_callback: error connecting to",show_permid_short(permid),exc
+            # Remove peer from the list of asked peers
+            self.remove_unreachable_helper(permid)
+
+
+    def olthread_cancel_piece_send_callback(self,exc,permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted for helping
+        """
+        if exc is not None:
+            # Peer is unreachable
+            if DEBUG:
+                print >> sys.stderr, "coordinator: olthread_cancel_piece_send_callback: error sending to",show_permid_short(permid),exc
+            # Remove peer from the list of asked peers
+            self.remove_unreachable_helper(permid)
+
+
+    
+    
+    
+    #
+    # Got (received) messages
+    # 
+    def got_join_helpers(self,permid,selversion):
+        """ Mark the peer as an active helper
+        
+        @param permid: The permid of the node sending the message
+        @param selversion:
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: received a JOIN_HELPERS message from", show_permid_short(permid)
+
+        #Search the peer in the asked_helpers list, remove it from there, and put it in the confirmed_helpers list.
+        self.asked_helpers_lock.acquire()
+        try:
+            # Search the peers with permid != from the given permid
+            new_asked_helpers = []
+            for peer in self.asked_helpers:
+                if peer['permid'] != permid:
+                    new_asked_helpers.append(peer)
+                else:
+                    # Keep a reference to the peer, to add it to the confirmed_helpers list
+                    #
+                    # If there are more than one peer with the same peerid in the asked_helpers list
+                    # than only add the last one to the confirmed_helpers list. 
+                    confirmed_helper = peer
+            self.asked_helpers = new_asked_helpers
+        finally:
+            self.asked_helpers_lock.release()
+        
+        self.confirmed_helpers_lock.acquire()
+        self.confirmed_helpers.append(confirmed_helper)
+        self.confirmed_helpers_lock.release()
+        
+
+
+
+
+    def got_resign_as_helper(self,permid,selversion):
+        """ Remove the peer from the list of active helpers (and form the list of asked helpers)
+        
+        @param permid: The permid of the node sending the message
+        @param selversion:
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: received a RESIGN_AS_HELPER message from", show_permid_short(permid)
+
+        #Search the peer in the asked_helpers list and remove it from there
+        self.asked_helpers_lock.acquire()
+        try:
+            # Search the peers with permid != from the given permid
+            new_asked_helpers = []
+            for peer in self.asked_helpers:
+                if peer['permid'] != permid:
+                    new_asked_helpers.append(peer)
+            self.asked_helpers = new_asked_helpers
+        finally:
+            self.asked_helpers_lock.release()
+
+        #Search the peer in the confirmed_helpers list and remove it from there
+        self.confirmed_helpers_lock.acquire()
+        try:
+            # Search the peers with permid != from the given permid
+            new_confirmed_helpers = []
+            for peer in self.confirmed_helpers:
+                if peer['permid'] != permid:
+                    new_confirmed_helpers.append(peer)
+            self.confirmed_helpers = new_confirmed_helpers
+        finally:
+            self.confirmed_helpers_lock.release()
+
+
+
+
+
+    def got_dropped_piece(self, permid, piece, selversion):
+        """ TODO
+        
+        @param permid: The permid of the node sending the message
+        @param peice: The piece that are dropped
+        @param selversion:
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: received a DROPPED_PIECE message from", show_permid_short(permid)
+
+        pass
+
+
+
+
+
+    def got_proxy_have(self,permid,selversion, aggregated_string):
+        """ Take the list of pieces the helper sent and combine it with the numhaves in the piece picker
+        
+        @param permid: The permid of the node sending the message
+        @param selversion:
+        @param aggregated_string: a bitstring of available pieces built by the helper based on HAVE messages it received
+        """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: received a PROXY_HAVE message from", show_permid_short(permid)
+
+#        if len(aggregated_string) != self.num_pieces:
+#            print >> sys.stderr, "coordinator: got_proxy_have: invalid payload in received PROXY_HAVE message. self.num_pieces=", self.num_pieces, "len(aggregated_string)=", len(aggregated_string)
+
+        # Get the recorded peer challenge
+        peer_challenge = self.sent_challenges_by_permid[permid]
+        
+        # Search for the connection that has this challenge
+        for d in self.downloader.downloads:
+            peer_id = d.connection.get_id()
+            if peer_challenge == decode_challenge_from_peerid(peer_id):
+                # If the connection is found, add the piece_list information to the d.have information
+                #new_have_list = map(sum, zip(d.have, piece_list))
+                d.proxy_have = Bitfield(length=self.downloader.numpieces, bitstring=aggregated_string)
+                break
+
+
+
+
+
+    # Returns a copy of the asked helpers lit
+    def network_get_asked_helpers_copy(self):
+        """ Returns a COPY of the list. We need 'before' and 'after' info here,
+        so the caller is not allowed to update the current confirmed_helpers """
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_get_asked_helpers_copy: Number of helpers:",len(self.confirmed_helpers)
+        self.confirmed_helpers_lock.acquire()
+        try:
+            return copy.deepcopy(self.confirmed_helpers)
+        finally:
+            self.confirmed_helpers_lock.release()
+
+    # Compares peers a and b 
+    def samePeer(self,a,b):
+        """ Compares peers a and b
+        
+        @param a: First peer to compare
+        @param b: Second peer to compare
+        @return: True, if the peers are identical. False, if the peers are different
+        """
+        if a.has_key('permid'):
+            if b.has_key('permid'):
+                if a['permid'] == b['permid']:
+                    return True
+        # TODO: Why, if permid's are different, the function returns True ???
+        if a['ip'] == b['ip'] and a['port'] == b['port']:
+            return True
+        else:
+            return False
+
+
+
+
+
+    #
+    # Interface for Encrypter.Connection
+    #
+    # TODO: rename this function
+    # TODO: change ip param to permid
+    # Returns true if the peer with the IP ip is a helper
+    def is_helper_ip(self, ip):
+        """ Used by Coordinator's Downloader (via Encrypter) to see what 
+        connections are helpers """
+        # called by network thread
+        self.confirmed_helpers_lock.acquire()
+        try:
+            for peer in self.confirmed_helpers:
+                if peer['ip'] == ip:
+                    return True
+            return False
+        finally:
+            self.confirmed_helpers_lock.release()
+
+
+
+
+
+    #
+    # Interface for CoordinatorMessageHandler
+    #
+    # TOSO: rename this function
+    # Return True if the peer is a helper
+    # permid = permid of the peer
+    def network_is_helper_permid(self, permid):
+        """ Used by CoordinatorMessageHandler to check if RESERVE_PIECES is from good source (if the permid is a helper) """
+        # called by overlay thread
+        for peer in self.confirmed_helpers:
+            if peer['permid'] == permid:
+                return True
+        return False
+
+
+
+
+
+
+
+
+    # TODO: rename this function
+    # Returns the list of reserved pieces
+    def network_get_reserved(self):
+        return self.reserved
+
+
+
+
+
+    # Set download object
+    def set_downloader(self, downloader):
+        """ Method used to set a reference to the downloader object
+        
+        Called by BT1Download, after it creates the Downloader object
+        
+        @param downloader: A reference to the downloader object for the current download
+        """
+        self.downloader = downloader
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/CoordinatorMessageHandler.py b/instrumentation/next-share/BaseLib/Core/ProxyService/CoordinatorMessageHandler.py
new file mode 100644 (file)
index 0000000..e052c93
--- /dev/null
@@ -0,0 +1,256 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# SecureOverlay message handler for a Coordinator
+#
+import sys
+
+from BaseLib.Core.BitTornado.bencode import bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Core.simpledefs import *
+
+DEBUG = False
+
+class CoordinatorMessageHandler:
+    def __init__(self, launchmany):
+        # Launchmany ???
+        self.launchmany = launchmany
+
+    def handleMessage(self, permid, selversion, message):
+        """ Handle the received message and call the appropriate function to solve it.
+        
+        As there are multiple coordinator instances, one for each download/upload, the right coordinator instance must be found prior to making a call to it's methods.
+            
+        @param permid: The permid of the peer who sent the message
+        @param selversion:
+        @param message: The message received
+        """
+        
+        type = message[0]
+        if DEBUG:
+            print >> sys.stderr, "coordinator message handler: received the message", getMessageName(type), "from", show_permid_short(permid)
+
+        # Call the appropriate function 
+        if type == JOIN_HELPERS:
+            return self.got_join_helpers(permid, message, selversion)
+        elif type == RESIGN_AS_HELPER:
+            return self.got_resign_as_helper(permid, message, selversion)
+        elif type == DROPPED_PIECE:
+            return self.got_dropped_piece(permid, message, selversion)
+        elif type == PROXY_HAVE:
+            return self.got_proxy_have(permid, message, selversion)
+
+
+
+
+
+    def got_join_helpers(self, permid, message, selversion):
+        """ Handle the JOIN_HELPERS message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_got_join_helpers: got_join_helpers"
+
+        try:
+            infohash = message[1:21]
+        except:
+            print >> sys.stderr, "coordinator: network_got_join_helpers: warning - bad data in JOIN_HELPERS"
+            return False
+
+        # Add a task to find the appropriate Coordinator object method 
+        network_got_join_helpers_lambda = lambda:self.network_got_join_helpers(permid, infohash, selversion)
+        self.launchmany.rawserver.add_task(network_got_join_helpers_lambda, 0)
+
+        return True
+
+
+    def network_got_join_helpers(self, permid, infohash, selversion):
+        """ Find the appropriate Coordinator object and call it's method.
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_got_join_helpers: network_got_join_helpers"
+
+        # Get coordinator object
+        coord_obj = self.launchmany.get_coopdl_role_object(infohash, COOPDL_ROLE_COORDINATOR)
+        if coord_obj is None:
+            # There is no coordinator object associated with this infohash
+            if DEBUG:
+                print >> sys.stderr, "coordinator: network_got_join_helpers: There is no coordinator object associated with this infohash"
+            return
+
+        # Call the coordinator method
+        coord_obj.got_join_helpers(permid, selversion)
+
+
+
+
+
+    def got_resign_as_helper(self, permid, message, selversion):
+        """ Handle the RESIGN_AS_HELPER message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: got_resign_as_helper"
+
+        try:
+            infohash = message[1:21]
+        except:
+            print >> sys.stderr, "coordinator warning: bad data in RESIGN_AS_HELPER"
+            return False
+
+        # Add a task to find the appropriate Coordinator object method 
+        network_got_resign_as_helper_lambda = lambda:self.network_got_resign_as_helper(permid, infohash, selversion)
+        self.launchmany.rawserver.add_task(network_got_resign_as_helper_lambda, 0)
+
+        return True
+
+
+    def network_got_resign_as_helper(self, permid, infohash, selversion):
+        """ Find the appropriate Coordinator object and call it's method.
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_got_resign_as_helper"
+
+        # Get coordinator object
+        coord_obj = self.launchmany.get_coopdl_role_object(infohash, COOPDL_ROLE_COORDINATOR)
+        if coord_obj is None:
+            # There is no coordinator object associated with this infohash
+            if DEBUG:
+                print >> sys.stderr, "coordinator: network_got_resign_as_helper: There is no coordinator object associated with this infohash"
+            return
+
+        # Call the coordinator method
+        coord_obj.got_resign_as_helper(permid, selversion)
+
+
+
+
+
+    def got_dropped_piece(self, permid, message, selversion):
+        """ Handle the DROPPED_PIECE message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: got_dropped_piece"
+
+        try:
+            infohash = message[1:21]
+            piece = bdecode(message[22:])
+        except:
+            print >> sys.stderr, "coordinator warning: bad data in DROPPED_PIECE"
+            return False
+
+        # Add a task to find the appropriate Coordinator object method 
+        network_got_dropped_piece_lambda = lambda:self.network_got_dropped_piece(permid, infohash, peice, selversion)
+        self.launchmany.rawserver.add_task(network_got_dropped_piece_lambda, 0)
+
+        return True
+
+
+    def network_got_dropped_piece(self, permid, infohash, piece, selversion):
+        """ Find the appropriate Coordinator object and call it's method.
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param piece: The piece that is dropped
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_got_dropped_piece"
+
+        # Get coordinator object
+        coord_obj = self.launchmany.get_coopdl_role_object(infohash, COOPDL_ROLE_COORDINATOR)
+        if coord_obj is None:
+            # There is no coordinator object associated with this infohash
+            if DEBUG:
+                print >> sys.stderr, "coordinator: network_got_dropped_piece: There is no coordinator object associated with this infohash"
+            return
+
+        # Call the coordinator method
+        coord_obj.got_dropped_piece_(permid, piece, selversion)
+
+
+
+
+
+    def got_proxy_have(self, permid, message, selversion):
+        """ Handle the PROXY_HAVE message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_got_proxy_have: got_proxy_have"
+
+        try:
+            infohash = message[1:21]
+            aggregated_string = bdecode(message[21:])
+        except:
+            print >> sys.stderr, "coordinator: network_got_proxy_have: warning - bad data in PROXY_HAVE"
+            return False
+
+        # Add a task to find the appropriate Coordinator object method 
+        network_got_proxy_have_lambda = lambda:self.network_got_proxy_have(permid, infohash, selversion, aggregated_string)
+        self.launchmany.rawserver.add_task(network_got_proxy_have_lambda, 0)
+
+        return True
+
+
+    def network_got_proxy_have(self, permid, infohash, selversion, aggregated_string):
+        """ Find the appropriate Coordinator object and call it's method.
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param selversion:
+        @param aggregated_string: a bitstring of pieces the helper built based on HAVE messages
+        """
+
+        if DEBUG:
+            print >> sys.stderr, "coordinator: network_got_proxy_have: network_got_proxy_have"
+
+        # Get coordinator object
+        coord_obj = self.launchmany.get_coopdl_role_object(infohash, COOPDL_ROLE_COORDINATOR)
+        if coord_obj is None:
+            # There is no coordinator object associated with this infohash
+            if DEBUG:
+                print >> sys.stderr, "coordinator: network_got_proxy_have: There is no coordinator object associated with this infohash"
+            return
+
+        # Call the coordinator method
+        coord_obj.got_proxy_have(permid, selversion, aggregated_string)
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/Helper.py b/instrumentation/next-share/BaseLib/Core/ProxyService/Helper.py
new file mode 100644 (file)
index 0000000..1a66f9e
--- /dev/null
@@ -0,0 +1,543 @@
+# Written by Pawel Garbacki, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+from traceback import print_exc
+from time import time
+from collections import deque
+from threading import Lock
+
+from BaseLib.Core.BitTornado.bencode import bencode
+from BaseLib.Core.BitTornado.BT1.MessageID import ASK_FOR_HELP, STOP_HELPING, REQUEST_PIECES, CANCEL_PIECE, JOIN_HELPERS, RESIGN_AS_HELPER, DROPPED_PIECE, PROXY_HAVE, PROXY_UNHAVE
+
+from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+from BaseLib.Core.CacheDB.CacheDBHandler import PeerDBHandler, TorrentDBHandler 
+from BaseLib.Core.Utilities.utilities import show_permid_short
+
+# ???
+MAX_ROUNDS = 200
+# ???
+DEBUG = False
+
+class SingleDownloadHelperInterface:
+    """ This interface should contain all methods that the PiecePiecker/Helper
+        calls on the SingleDownload class.
+    """
+    def __init__(self):
+        self.frozen_by_helper = False
+
+    def helper_set_freezing(self,val):
+        self.frozen_by_helper = val
+
+    def is_frozen_by_helper(self):
+        return self.frozen_by_helper
+
+    def is_choked(self):
+        pass
+
+    def helper_forces_unchoke(self):
+        pass
+
+    def _request_more(self, new_unchoke = False):
+        pass
+
+
+class Helper:
+    def __init__(self, torrent_hash, num_pieces, coordinator_permid, coordinator = None):
+        
+        self.torrent_hash = torrent_hash
+        self.coordinator = coordinator
+
+        if coordinator_permid is not None and coordinator_permid == '':
+            self.coordinator_permid = None
+        else:
+            self.coordinator_permid = coordinator_permid
+
+        # Get coordinator ip and address
+        self.coordinator_ip = None  # see is_coordinator()
+        self.coordinator_port = -1
+        if self.coordinator_permid is not None:
+            peerdb = PeerDBHandler.getInstance()
+            peer = peerdb.getPeer(coordinator_permid)
+            if peer is not None:
+                self.coordinator_ip = peer['ip']
+                self.coordinator_port = peer['port']
+        
+        self.overlay_bridge = OverlayThreadingBridge.getInstance()
+        
+        self.reserved_pieces = [False] * num_pieces
+        self.ignored_pieces = [False] * num_pieces
+        self.distr_reserved_pieces = [False] * num_pieces
+
+        self.requested_pieces = deque()
+        self.requested_pieces_lock = Lock()
+        
+        self.counter = 0
+        self.completed = False
+        self.marker = [True] * num_pieces
+        self.round = 0
+        self.encoder = None
+        self.continuations = []
+        self.outstanding = None
+        self.last_req_time = 0
+        
+        # The challenge sent by the coordinator
+        self.challenge = None
+        
+
+    def test(self):
+        result = self.reserve_piece(10,None)
+        print >> sys.stderr,"reserve piece returned: " + str(result)
+        print >> sys.stderr,"Test passed"
+
+
+
+
+
+    def notify(self):
+        """ Called by HelperMessageHandler to "wake up" the download that's
+            waiting for its coordinator to reserve it a piece 
+        """
+        if self.outstanding is None:
+            if DEBUG:
+                print >> sys.stderr,"helper: notify: No continuation waiting?"
+        else:
+            if DEBUG:
+                print >> sys.stderr,"helper: notify: Waking downloader"
+            sdownload = self.outstanding
+            self.outstanding = None # must be not before calling self.restart!
+            self.restart(sdownload)
+            
+            #self.send_reservation()
+            l = self.continuations[:] # copy just to be sure
+            self.continuations = []
+            for sdownload in l:
+                self.restart(sdownload)
+
+    def restart(self,sdownload):
+        """ TODO ???
+        """
+        # Chokes can get in while we're waiting for reply from coordinator.
+        # But as we were called from _request_more() we were not choked
+        # just before, so pretend we didn't see the message yet.
+        if sdownload.is_choked():
+            sdownload.helper_forces_unchoke()
+        sdownload.helper_set_freezing(False)
+        sdownload._request_more()
+
+
+
+
+
+    #
+    # Send messages
+    # 
+
+    def send_join_helpers(self, permid):
+        """ Send a confirmation to the coordinator that the current node will provide proxy services
+        
+        Called by self.got_ask_for_help()
+        
+        @param permid: The permid of the node that will become coordinator
+        """
+
+        if DEBUG:
+            print "helper: send_join_helpers: sending a join_helpers message to", show_permid_short(permid)
+
+        olthread_send_join_helpers_lambda = lambda:self.olthread_send_join_helpers()
+        self.overlay_bridge.add_task(olthread_send_join_helpers_lambda,0)
+
+        
+    def olthread_send_join_helpers(self):
+        """ Creates a bridge connection for the join helpers message to be sent
+        
+        Called by the overlay thread.
+        """
+        # TODO: ??? We need to create the message according to protocol version, so need to pass all info.
+        olthread_join_helpers_connect_callback_lambda = lambda e,d,p,s:self.olthread_join_helpers_connect_callback(e,d,p,s)
+        self.overlay_bridge.connect(self.coordinator_permid,olthread_join_helpers_connect_callback_lambda)
+
+
+    def olthread_join_helpers_connect_callback(self,exc,dns,permid,selversion):
+        """ Sends the join helpers message on the connection with the coordinator
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the coordinator
+        @param selversion:
+        """
+        if exc is None:
+            # Create message according to protocol version
+            message = JOIN_HELPERS + self.torrent_hash
+
+            if DEBUG:
+                print >> sys.stderr,"helper: olthread_join_helpers_connect_callback: Sending JOIN_HELPERS to",show_permid_short(permid)
+
+            self.overlay_bridge.send(permid, message, self.olthread_join_helpers_send_callback)
+        elif DEBUG:
+            # The coordinator is unreachable
+            print >> sys.stderr,"helper: olthread_join_helpers_connect_callback: error connecting to",show_permid_short(permid),exc
+
+
+    def olthread_join_helpers_send_callback(self, exc, permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted for helping
+        """
+
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"helper: olthread_join_helpers_send_callback: error sending message to",show_permid_short(permid),exc
+        
+        pass
+
+
+
+
+
+    def send_proxy_have(self, aggregated_haves):
+        """ Send a list of aggregated have and bitfield information
+        
+        Called by Downloader.aggregate_and_send_haves
+        
+        @param aggregated_haves: A Bitfield object, containing an aggregated list of stored haves
+        """
+
+        if DEBUG:
+            print "helper: send_proxy_have: sending a proxy_have message to", show_permid_short(self.coordinator_permid)
+
+        aggregated_string = aggregated_haves.tostring()
+        olthread_send_proxy_have_lambda = lambda:self.olthread_send_proxy_have(aggregated_string)
+        self.overlay_bridge.add_task(olthread_send_proxy_have_lambda,0)
+
+        
+    def olthread_send_proxy_have(self, aggregated_string):
+        """ Creates a bridge connection for the proxy_have message to be sent
+        
+        Called by the overlay thread.
+        
+        @param aggregated_string: a bitstring of available piesces
+        """
+        # TODO: ??? We need to create the message according to protocol version, so need to pass all info.
+        olthread_proxy_have_connect_callback_lambda = lambda e,d,p,s:self.olthread_proxy_have_connect_callback(e,d,p,s,aggregated_string)
+        self.overlay_bridge.connect(self.coordinator_permid,olthread_proxy_have_connect_callback_lambda)
+
+
+    def olthread_proxy_have_connect_callback(self,exc,dns,permid,selversion, aggregated_string):
+        """ Sends the proxy_have message on the connection with the coordinator
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the coordinator
+        @param selversion: selected (buddycast?) version
+        @param aggregated_string: a bitstring of available pieces
+        """
+        if exc is None:
+            # Create message according to protocol version
+            message = PROXY_HAVE + self.torrent_hash + bencode(aggregated_string)
+
+            if DEBUG:
+                print >> sys.stderr,"helper: olthread_proxy_have_connect_callback: Sending PROXY_HAVE to",show_permid_short(permid)
+
+            self.overlay_bridge.send(permid, message, self.olthread_proxy_have_send_callback)
+        elif DEBUG:
+            # The coordinator is unreachable
+            print >> sys.stderr,"helper: olthread_proxy_have_connect_callback: error connecting to",show_permid_short(permid),exc
+
+
+    def olthread_proxy_have_send_callback(self, exc, permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted for helping
+        """
+
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"helper: olthread_proxy_have_send_callback: error sending message to",show_permid_short(permid),exc
+        
+        pass
+
+
+
+
+
+    def send_resign_as_helper(self, permid):
+        """ Send a message to the coordinator that the current node will stop providing proxy services
+        
+        Called by TODO
+        
+        @param permid: The permid of the coordinator
+        """
+
+        if DEBUG:
+            print "helper: send_resign_as_helper: sending a resign_as_helper message to", permid
+
+        olthread_send_resign_as_helper_lambda = lambda:self.olthread_send_resign_as_helper()
+        self.overlay_bridge.add_task(olthread_send_resign_as_helper_lambda,0)
+
+        
+    def olthread_send_resign_as_helper(self):
+        """ Creates a bridge connection for the resign_as_helper message to be sent
+        
+        Called by the overlay thread.
+        """
+        olthread_resign_as_helper_connect_callback_lambda = lambda e,d,p,s:self.olthread_resign_as_helper_connect_callback(e,d,p,s)
+        self.overlay_bridge.connect(self.coordinator_permid,olthread_resign_as_helper_connect_callback_lambda)
+
+
+    def olthread_resign_as_helper_connect_callback(self,exc,dns,permid,selversion):
+        """ Sends the resign_as_helper message on the connection with the coordinator
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param dns:
+        @param permid: the permid of the coordinator
+        @param selversion:
+        """
+        if exc is None:
+            # Create message according to protocol version
+            message = RESIGN_AS_HELPER + self.torrent_hash
+
+            if DEBUG:
+                print >> sys.stderr,"helper: olthread_resign_as_helper_connect_callback: Sending RESIGN_AS_HELPER to",show_permid_short(permid)
+
+            self.overlay_bridge.send(permid, message, self.olthread_resign_as_helper_send_callback)
+        elif DEBUG:
+            # The coordinator is unreachable
+            print >> sys.stderr,"helper: olthread_resign_as_helper_connect_callback: error connecting to",show_permid_short(permid),exc
+
+
+    def olthread_resign_as_helper_send_callback(self,exc,permid):
+        """ Callback function for error checking in network communication
+        
+        Called by the overlay thread.
+        
+        @param exc: Peer reachable/unreachable information. None = peer reachable
+        @param permid: the permid of the peer that is contacted for helping
+        """
+        
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"helper: olthread_resign_as_helper_send_callback: error sending message to",show_permid_short(permid),exc
+        
+        pass
+
+    
+    
+    
+
+    #
+    # Got (received) messages
+    # 
+    def got_ask_for_help(self, permid, infohash, challenge):
+        """ Start helping a coordinator or reply with an resign_as_helper message
+        
+        @param permid: The permid of the node sending the help request message
+        @param infohash: the infohash of the torrent for which help is requested 
+        @param challenge: The challenge sent by the coordinator
+        """
+        if DEBUG:
+            print >>sys.stderr,"helper: got_ask_for_help: will answer to the help request from", show_permid_short(permid)
+        if self.can_help(infohash):
+            # Send JOIN_HELPERS
+            if DEBUG:
+                print >>sys.stderr,"helper: got_ask_for_help: received a help request, going to send join_helpers"
+            self.send_join_helpers(permid)
+            self.challenge = challenge
+        else:
+            # Send RESIGN_AS_HELPER
+            if DEBUG:
+                print >>sys.stderr,"helper: got_ask_for_help: received a help request, going to send resign_as_helper"
+            self.send_resign_as_helper(permid)
+            return False
+
+        return True
+
+
+    def can_help(self, infohash):
+        """ Decide if the current node can help a coordinator for the current torrent
+        
+        @param infohash: the infohash of the torrent for which help is requested 
+        """        
+        #TODO: test if I can help the cordinator to download this file
+        #Future support: make the decision based on my preference
+        return True
+
+
+
+
+
+    def got_stop_helping(self, permid, infohash):
+        """ Stop helping a coordinator
+        
+        @param permid: The permid of the node sending the message
+        @param infohash: the infohash of the torrent for which help is released 
+        """        
+        #TODO: decide what to do here
+        return True
+
+
+
+
+
+    def got_request_pieces(self, permid, piece):
+        """ Start downloading the pieces that the coordinator requests
+        
+        @param permid: The permid of the node requesting the pieces
+        @param piece: a piece number, that is going to be downloaded 
+        """        
+        if DEBUG:
+            print "helper: got_request_pieces: received request_pieces for piece", piece
+
+        # Mark the piece as requested in the local data structures
+        self.reserved_pieces[piece] = True
+#        if self.distr_reserved_pieces[piece] == True:
+            # if the piece was previously requested by the same coordinator, don't do anything
+            #self.distr_reserved_pieces[piece] = True
+#            print "Received duplicate proxy request for", piece
+#            return
+
+        self.distr_reserved_pieces[piece] = True
+        self.ignored_pieces[piece] = False
+        
+        self.requested_pieces_lock.acquire()
+        self.requested_pieces.append(piece)
+        self.requested_pieces_lock.release()
+
+        # Start data connection
+        self.start_data_connection()
+
+    def start_data_connection(self):
+        """ Start a data connection with the coordinator
+        
+        @param permid: The permid of the coordinator
+        """
+        # Do this always, will return quickly when connection already exists
+        dns = (self.coordinator_ip, self.coordinator_port)
+        if DEBUG:
+            print >> sys.stderr,"helper: start_data_connection: Starting data connection to coordinator at", dns
+        
+        self.encoder.start_connection(dns, id = None, coord_con = True, challenge = self.challenge)
+
+
+
+    #
+    # Util functions
+    #
+    def is_coordinator(self, permid):
+        """ Check if the permid is the current coordinator
+        
+        @param permid: The permid to be checked if it is the coordinator
+        @return: True, if the permid is the current coordinator; False, if the permid is not the current coordinator
+        """
+        # If we could get coordinator_ip, don't help
+        if self.coordinator_ip is None:
+            return False
+
+        if self.coordinator_permid == permid:
+            return True
+        else:
+            return False
+
+
+    def next_request(self):
+        """ Returns the next piece in the list of coordinator-requested pieces
+        
+        Called by the PiecePicker
+        
+        @return: a piece number, if there is a requested piece pending download; None, if there is no pending piece
+        """
+        self.requested_pieces_lock.acquire()
+        if len(self.requested_pieces) == 0:
+            self.requested_pieces_lock.release()
+            if DEBUG:
+                print >>sys.stderr,"helper: next_request: no requested pieces yet. Returning None"
+            return None
+        else:
+            next_piece = self.requested_pieces.popleft()
+            self.requested_pieces_lock.release()
+            if DEBUG:
+                print >>sys.stderr,"helper: next_request: Returning", next_piece
+            return next_piece
+        
+        
+    def set_encoder(self, encoder):
+        """ Sets the current encoder.
+        
+        Called from download_bt1.py
+        
+        @param encoder: the new encoder that will be set
+        """
+        self.encoder = encoder
+        self.encoder.set_coordinator_ip(self.coordinator_ip)
+        # To support a helping user stopping and restarting a torrent
+        if self.coordinator_permid is not None:
+            self.start_data_connection()   
+
+
+    def get_coordinator_permid(self):
+        """ Returns the coordinator permid
+        
+        Called from SingleDownload.py
+        
+        @return: Coordinator permid
+        """
+        return self.coordinator_permid
+
+
+    def is_reserved(self, piece):
+        """ Check if a piece is reserved (requested) by a coordinator
+        
+        Called by the network thread (Interface for PiecePicker and Downloader)
+        
+        @param piece: the piece whose status is to be checked
+        @return: True, if the piece is reqested by a coordinator; False, otherwise.
+        """
+        if self.reserved_pieces[piece] or (self.coordinator is not None and self.is_complete()):
+            return True
+        return self.reserved_pieces[piece]
+
+
+    def is_ignored(self, piece):
+        """ Check if a piece is ignored by a coordinator
+        
+        Called by the network thread (Interface for PiecePicker and Downloader)
+        
+        @param piece: the piece whose status is to be checked
+        @return: True, if the piece is ignored by a coordinator; False, otherwise.
+        """
+        if not self.ignored_pieces[piece] or (self.coordinator is not None and self.is_complete()):
+            return False
+        return self.ignored_pieces[piece]
+
+
+    def is_complete(self):
+        """ Check torrent is completely downloaded
+        
+        Called by the network thread (Interface for PiecePicker and Downloader)
+        
+        @return: True, all the pieces are downloaded; False, otherwise.
+        """
+        if self.completed:
+            return True
+        
+        self.round = (self.round + 1) % MAX_ROUNDS
+        
+        if self.round != 0:
+            return False
+        if self.coordinator is not None:
+            self.completed = (self.coordinator.reserved_pieces == self.marker)
+        else:
+            self.completed = (self.distr_reserved_pieces == self.marker)
+        return self.completed
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/HelperMessageHandler.py b/instrumentation/next-share/BaseLib/Core/ProxyService/HelperMessageHandler.py
new file mode 100644 (file)
index 0000000..fa327e4
--- /dev/null
@@ -0,0 +1,419 @@
+# Written by Pawel Garbacki, Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# SecureOverlay message handler for a Helper
+#
+
+import sys, os
+import binascii
+from threading import Lock
+from time import sleep
+
+from BaseLib.Core.TorrentDef import *
+from BaseLib.Core.Session import *
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.CacheDB.CacheDBHandler import PeerDBHandler, TorrentDBHandler
+
+from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+
+DEBUG = False
+
+class HelperMessageHandler:
+    def __init__(self):
+        self.metadata_queue = {}
+        self.metadata_queue_lock = Lock()
+        self.overlay_bridge = OverlayThreadingBridge.getInstance()
+        self.received_challenges = {}
+
+    def register(self,session,metadata_handler,helpdir,dlconfig):
+        self.session = session
+        self.helpdir = helpdir
+        # The default DownloadStartupConfig dict as set in the Session
+        self.dlconfig = dlconfig
+
+        self.metadata_handler = metadata_handler
+        self.torrent_db = TorrentDBHandler.getInstance()
+
+    def handleMessage(self,permid,selversion,message):
+        """ Handle the received message and call the appropriate function to solve it.
+        
+        As there are multiple helper instances, one for each download/upload, the right helper instance must be found prior to making a call to it's methods.
+            
+        @param permid: The permid of the peer who sent the message
+        @param selversion:
+        @param message: The message received
+        """
+
+        t = message[0]
+        if DEBUG:
+            print >> sys.stderr, "helper: received the message", getMessageName(t), "from", show_permid_short(permid)
+
+        #if ProxyService is not turned on, return
+        session_config = self.session.get_current_startup_config_copy()
+        if session_config.get_proxyservice_status() == PROXYSERVICE_OFF:
+            if DEBUG:
+                print >> sys.stderr, "helper: ProxyService not active, ignoring message"
+
+            return
+        
+        if t == ASK_FOR_HELP:
+            return self.got_ask_for_help(permid, message, selversion)
+        elif t == STOP_HELPING:
+            return self.got_stop_helping(permid, message, selversion)
+        elif t == REQUEST_PIECES:
+            return self.got_request_pieces(permid, message, selversion)
+
+
+
+
+
+    def got_ask_for_help(self, permid, message, selversion):
+        """ Handle the ASK_FOR_HELP message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+        try:
+            infohash = message[1:21]
+            challenge = bdecode(message[21:])
+        except:
+            if DEBUG:
+                print >> sys.stderr, "helper: got_ask_for_help: bad data in ask_for_help"
+            return False
+
+        if len(infohash) != 20:
+            if DEBUG:
+                print >> sys.stderr, "helper: got_ask_for_help: bad infohash in ask_for_help"
+            return False
+        
+        if DEBUG:
+            print >> sys.stderr, "helper: got_ask_for_help: received a help request from",show_permid_short(permid)
+
+        
+        # Save the challenge
+        self.received_challenges[permid] = challenge
+        
+        # Find the appropriate Helper object. If no helper object is associated with the requested infohash, than start a new download for it
+        helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
+        if helper_obj is None:
+            if DEBUG:
+                print >> sys.stderr, "helper: got_ask_for_help: There is no current download for this infohash. A new download must be started."
+            
+            self.start_helper_download(permid, infohash, selversion)
+            # start_helper_download will make, indirectly, a call to the network_got_ask_for_help method of the helper,
+            # in a similar fashion as the one below
+            return
+            
+        # Call the helper object got_ask_for_help method
+        # If the object was created with start_helepr_download, an amount of time is required
+        # before the download is fully operational, so the call to the the helper object got_ask_for_help method
+        # is made using the network thread (the network thread executes tasks sequentially, so the start_download task should
+        # be executed before the network_got_ask_for_help)
+        network_got_ask_for_help_lambda = lambda:self.network_got_ask_for_help(permid, infohash)
+        self.session.lm.rawserver.add_task(network_got_ask_for_help_lambda, 0)
+        
+        return True
+
+
+    def network_got_ask_for_help(self, permid, infohash):
+        """ Find the appropriate Helper object and call it's method. If no helper object is associated with the requested
+        infohash, than start a new download for it
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param challenge: The challenge sent by the coordinator
+        """
+        
+        helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
+        if helper_obj is None:
+            if DEBUG:
+                print >> sys.stderr, "helper: network_got_ask_for_help: There is no current download for this infohash. Try again later..."
+            return
+            
+        # At this point, a previous download existed
+        # A node can not be a helper and a coordinator at the same time
+        if not helper_obj.is_coordinator(permid):
+            if DEBUG:
+                print >> sys.stderr, "helper: network_got_ask_for_help: The node asking for help is not the current coordinator"
+            return
+
+        challenge = self.received_challenges[permid]
+        helper_obj.got_ask_for_help(permid, infohash, challenge)
+        # Wake up download thread
+        helper_obj.notify()
+        
+
+    def start_helper_download(self, permid, infohash, selversion):
+        """ Start a new download, as a helper, for the requested infohash
+        
+        @param permid: the coordinator permid requesting help
+        @param infohash: the infohash of the .torrent
+        @param selversion: 
+        @param challenge: The challenge sent by the coordinator
+        """
+        
+        # Getting .torrent information
+        torrent_data = self.find_torrent(infohash)
+        if torrent_data:
+            # The .torrent was already in the local cache
+            self.new_download(infohash, torrent_data, permid)
+        else:
+            # The .torrent needs to be downloaded
+            # new_download will be called at the end of get_torrent_metadata
+            self.get_torrent_metadata(permid, infohash, selversion)
+
+
+    # It is very important here that we create safe filenames, i.e., it should
+    # not be possible for a coordinator to send a METADATA message that causes
+    # important files to be overwritten
+    #
+    def new_download(self, infohash, torrent_data, permid):
+        """ Start a new download in order to get the pieces that will be requested by the coordinator.
+        After the download is started, find the appropriate Helper object and call it's method.
+        
+        @param infohash: the infohash of the torrent for which help is requested
+        @param torrent_data: the content of the .torrent file
+        @param permid: the permid of the coordonator
+        @param challenge: The challenge sent by the coordinator
+        """        
+
+        # Create the name for the .torrent file in the helper cache
+        basename = binascii.hexlify(infohash)+'.torrent' # ignore .tribe stuff, not vital
+        torrentfilename = os.path.join(self.helpdir,basename)
+
+        # Write the .torrent information in the .torrent helper cache file 
+        tfile = open(torrentfilename, "wb")
+        tfile.write(torrent_data)
+        tfile.close()
+
+        if DEBUG:
+            print >> sys.stderr, "helper: new_download: Got metadata required for helping",show_permid_short(permid)
+            print >> sys.stderr, "helper: new_download: torrent: ",torrentfilename
+
+        tdef = TorrentDef.load(torrentfilename)
+        if self.dlconfig is None:
+            dscfg = DownloadStartupConfig()
+        else:
+            dscfg = DownloadStartupConfig(self.dlconfig)
+        dscfg.set_coopdl_coordinator_permid(permid)
+        dscfg.set_dest_dir(self.helpdir)
+        dscfg.set_proxy_mode(PROXY_MODE_OFF) # a helper does not use other helpers for downloading data
+
+        # Start new download
+        if DEBUG:
+            print >> sys.stderr, "helper: new_download: Starting a new download"
+        d=self.session.start_download(tdef,dscfg)
+        d.set_state_callback(self.state_callback, getpeerlist=False)
+        
+        # Call the helper object got_ask_for_help method
+        # If the object was created with start_helepr_download, an amount of time is required
+        # before the download is fully operational, so the call to the the helper object got_ask_for_help method
+        # is made using the network thread (the network thread executes tasks sequentially, so the start_download task should
+        # be executed before the network_got_ask_for_help)
+        network_got_ask_for_help_lambda = lambda:self.network_got_ask_for_help(permid, infohash)
+        self.session.lm.rawserver.add_task(network_got_ask_for_help_lambda, 0)
+
+    # Print torrent statistics
+    def state_callback(self, ds):
+        d = ds.get_download()
+    #    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+        print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \
+                (d.get_def().get_name(), \
+                dlstatus_strings[ds.get_status()], \
+                ds.get_progress() * 100, \
+                ds.get_error(), \
+                ds.get_current_speed(UPLOAD), \
+                ds.get_current_speed(DOWNLOAD))
+    
+        return (1.0, False)
+
+
+
+
+    def get_torrent_metadata(self, permid, infohash, selversion):
+        """ Get the .torrent file from the coordinator requesting help for it
+        
+        @param permid: the permid of the coordinator
+        @param infihash: the infohash of the .torrent
+        @param selversion:
+        """
+        if DEBUG:
+            print >> sys.stderr, "helper: get_torrent_metadata: Asking coordinator for the .torrent"
+        self.metadata_queue_lock.acquire()
+        try:
+            if not self.metadata_queue.has_key(infohash):
+                self.metadata_queue[infohash] = []
+            self.metadata_queue[infohash].append(permid)
+        finally:
+            self.metadata_queue_lock.release()
+        
+        self.metadata_handler.send_metadata_request(permid, infohash, selversion, caller="dlhelp")
+
+
+    def metadatahandler_received_torrent(self, infohash, torrent_data):
+        """ The coordinator sent the .torrent file.
+        """
+        # TODO: Where is this handler registered ?
+        # TODO: Is this handler actually called by the network thread ?
+        if DEBUG:
+            print >> sys.stderr, "helper: metadatahandler_received_torrent: the .torrent is in."
+        
+        self.metadata_queue_lock.acquire()
+        try:
+            if not self.metadata_queue.has_key(infohash) or not self.metadata_queue[infohash]:
+                if DEBUG:
+                    print >> sys.stderr, "helper: metadatahandler_received_torrent: a .torrent was received that we are not waiting for."
+                return
+            
+            infohash_queue = self.metadata_queue[infohash]
+            del self.metadata_queue[infohash]
+        finally:
+            self.metadata_queue_lock.release()
+        for permid in infohash_queue:
+            # only ask for metadata once
+            self.new_download(infohash, torrent_data, permid)
+
+
+    def find_torrent(self, infohash):
+        """ Find the .torrent for the required infohash.
+        
+        @param infohash: the infohash of the .torrent that must be returned 
+        """
+        torrent = self.torrent_db.getTorrent(infohash)
+        if torrent is None:
+            # The .torrent file is not in the local cache
+            if DEBUG:
+                print >> sys.stderr, "helper: find_torrent: The .torrent file is not in the local cache"
+            return None
+        elif 'torrent_dir' in torrent:
+            fn = torrent['torrent_dir']
+            if os.path.isfile(fn):
+                f = open(fn,"rb")
+                data = f.read()
+                f.close()
+                return data
+            else:
+                # The .torrent file path does not exist or the path is not for a file
+                if DEBUG:
+                    print >> sys.stderr, "helper: find_torrent: The .torrent file path does not exist or the path is not for a file" 
+                return None
+        else:
+            # The torrent dictionary does not contain a torrent_dir field 
+            if DEBUG:
+                print >> sys.stderr, "helper: find_torrent: The torrent dictionary does not contain a torrent_dir field" 
+            return None
+
+
+
+
+
+    def got_stop_helping(self, permid, message, selversion):
+        """ Handle the STOP_HELPING message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+        try:
+            infohash = message[1:]
+        except:
+            if DEBUG:
+                print >> sys.stderr, "helper: got_stop_helping: bad data in STOP_HELPING"
+            return False
+
+        if len(infohash) != 20:
+            if DEBUG:
+                print >> sys.stderr, "helper: got_stop_helping: bad infohash in STOP_HELPING"
+            return False
+
+        network_got_stop_helping_lambda = lambda:self.network_got_stop_helping(permid, infohash, selversion)
+        self.session.lm.rawserver.add_task(network_got_stop_helping_lambda, 0)
+        
+        # If the request is from a unauthorized peer, we close
+        # If the request is from an authorized peer (=coordinator) we close as well. So return False
+        return False
+
+
+    def network_got_stop_helping(self, permid, infohash, selversion):
+        """ Find the appropriate Helper object and call it's method.
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param selversion:
+        """
+        helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
+        if helper_obj is None:
+            if DEBUG:
+                print >> sys.stderr, "helper: network_got_stop_helping: There is no helper object associated with this infohash"
+            return
+        
+        if not helper_obj.is_coordinator(permid): 
+            if DEBUG:
+                print >> sys.stderr, "helper: network_got_stop_helping: The node asking for help is not the current coordinator"
+            return
+        
+#        helper_obj.got_stop_helping(permid, infohash)
+#        # Wake up download thread
+#        helper_obj.notify()
+        # Find and remove download
+        dlist = self.session.get_downloads()
+        for d in dlist:
+            if d.get_def().get_infohash() == infohash:
+                self.session.remove_download(d)
+                break
+
+
+
+
+
+    def got_request_pieces(self,permid, message, selversion):
+        """ Handle the REQUEST_PIECES message.
+        
+        @param permid: The permid of the peer who sent the message
+        @param message: The message received
+        @param selversion:
+        """
+        try:
+            infohash = message[1:21]
+            pieces = bdecode(message[21:])
+        except:
+            print >> sys.stderr, "helper: got_request_pieces: bad data in REQUEST_PIECES"
+            return False
+
+        network_got_request_pieces_lambda = lambda:self.network_got_request_pieces(permid, message, selversion, infohash, pieces)
+        self.session.lm.rawserver.add_task(network_got_request_pieces_lambda, 0)
+        
+        return True
+
+    def network_got_request_pieces(self, permid, message, selversion, infohash, pieces):
+        """ Find the appropriate Helper object and call it's method.
+        
+        Called by the network thread.
+        
+        @param permid: The permid of the peer who sent the message
+        @param infohash: The infohash sent by the remote peer
+        @param selversion:
+        """
+        helper_obj = self.session.lm.get_coopdl_role_object(infohash, COOPDL_ROLE_HELPER)
+        if helper_obj is None:
+            if DEBUG:
+                print >> sys.stderr, "helper: network_got_request_pieces: There is no helper object associated with this infohash"
+            return
+
+        if not helper_obj.is_coordinator(permid): 
+            if DEBUG:
+                print >> sys.stderr, "helper: network_got_request_pieces: The node asking for help is not the current coordinator"
+            return
+
+        helper_obj.got_request_pieces(permid, pieces)
+        # Wake up download thread
+        helper_obj.notify()
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/ProxyServiceUtil.py b/instrumentation/next-share/BaseLib/Core/ProxyService/ProxyServiceUtil.py
new file mode 100644 (file)
index 0000000..6ff3467
--- /dev/null
@@ -0,0 +1,46 @@
+# Written George Milescu
+# see LICENSE.txt for license information
+
+# This class contains all util methods related to the ProxyService
+
+import string
+import random
+
+def generate_proxy_challenge():
+    """ Generates a challenge (8 byte long random number) that a doe sends to a proxy during the handshake
+    
+    @return: an 8 byte log string
+    """
+    # Generate a random challenge - random number on 8 bytes (62**8 possible combinations)
+    chars = string.letters + string.digits #len(chars)=62
+    challenge = ''
+    for i in range(8):
+        challenge = challenge + random.choice(chars)
+    
+    return challenge
+
+
+def decode_challenge_from_peerid(peerid):
+    """ Method used to retrieve (decode) a challenge from a peerid
+    
+    @param peerid: the peerid of the peer whose challenge is to be retrieved
+    
+    @return: a number, the challenge previously send to that peer, and encoded by the peer in its peerid
+    """
+
+    return peerid[12:20]
+
+
+def encode_challenge_in_peerid(peerid, challenge):
+    """ Method used to insert (encode) a challenge into a peerid
+    
+    @param peerid: the regular peerid, into which the challenge will be encoded
+    @param challenge: an 8 byte long number, to be encoded in the peerid
+    
+    @return: a new peerid, with the challenge encoded in it
+    """
+
+    # proxy_peer_id = | regular_peer_id[1:12] | challenge[1:8] |
+    proxy_peer_id = peerid[:12] + challenge # len(self.challenge) = 8
+    
+    return proxy_peer_id 
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/RatePredictor.py b/instrumentation/next-share/BaseLib/Core/ProxyService/RatePredictor.py
new file mode 100644 (file)
index 0000000..ddd8673
--- /dev/null
@@ -0,0 +1,59 @@
+# Written by Pawel Garbacki, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+from BaseLib.Core.BitTornado.clock import clock
+
+MIN_CAPACITY = 0.75
+DEBUG = False
+
+class RatePredictor:
+    def __init__(self, raw_server, rate_measure, max_rate, probing_period = 2):
+        self.raw_server = raw_server
+        self.rate_measure = rate_measure
+        if max_rate == 0:   
+            self.max_rate = 2147483647
+        else:
+            self.max_rate = max_rate
+        self.probing_period = probing_period # in seconds
+
+class ExpSmoothRatePredictor(RatePredictor):
+    def __init__(self, raw_server, rate_measure, max_rate, alpha = 0.5, max_period = 30, probing_period = 2):
+        RatePredictor.__init__(self, raw_server, rate_measure, max_rate, probing_period)
+        if DEBUG: print >>sys.stderr, "RatePredictor:__init__"
+        self.alpha = alpha
+        self.max_period = max_period
+        self.value = None
+        self.timestamp = None
+
+    def update(self):
+        if DEBUG: print >>sys.stderr, "RatePredictor:update"
+        self.raw_server.add_task(self.update, self.probing_period)
+        current_value = self.rate_measure.get_rate() / 1000.
+        current_time = clock()
+        if self.value is None or current_time - self.timestamp > self.max_period:
+            self.value = current_value
+        else:
+            self.value = self.alpha * current_value + (1 - self.alpha) * self.value
+            if self.max_rate > 0 and self.value > self.max_rate:
+                self.value = self.max_rate
+        self.timestamp = current_time
+
+    # exponential smoothing prediction
+    def predict(self):
+        if DEBUG: print >>sys.stderr, "RatePredictor:predict"
+        # self.update()
+        if self.value is None:
+            return 0
+        return self.value
+
+    def has_capacity(self):
+        if DEBUG: print >>sys.stderr, "RatePredictor:has_capacity"
+#        return False
+        # self.update()
+        result = None
+        if self.value is None:
+            result = True
+        else:
+            result = (1. - float(self.value) / self.max_rate) > MIN_CAPACITY
+        return result
diff --git a/instrumentation/next-share/BaseLib/Core/ProxyService/__init__.py b/instrumentation/next-share/BaseLib/Core/ProxyService/__init__.py
new file mode 100644 (file)
index 0000000..afcd0f2
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/RequestPolicy.py b/instrumentation/next-share/BaseLib/Core/RequestPolicy.py
new file mode 100644 (file)
index 0000000..58ffcb8
--- /dev/null
@@ -0,0 +1,138 @@
+# Written by Jelle Roozenburg 
+# see LICENSE.txt for license information
+""" Controls the authorization of messages received via the Tribler Overlay """
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+DEBUG = False
+
+MAX_QUERIES_FROM_RANDOM_PEER = 1000
+
+
+class AbstractRequestPolicy:
+    """ Superclass for all Tribler RequestPolicies. A RequestPolicy controls
+    the authorization of messages received via the Tribler Overlay, such
+    as distributed recommendations, remote queries, etc.
+    """
+    def __init__(self):
+        """ Constructor """
+
+    def allowed(self, permid, messageID):
+        """ Returns whether or not the peer identified by permid is allowed to 
+        send us a message of type messageID.
+        @param permid The permid of the sending peer.
+        @param messageID A integer messageID, see BaseLib.Core.BitTornado.BT1.MessageID  
+        @returns A boolean indicating whether the message is authorized.
+        """
+        raise NotYetImplementedException()
+
+
+class AllowAllRequestPolicy(AbstractRequestPolicy):
+    """ A RequestPolicy that allows all messages to be sent by all peers. """
+
+    def allowed(self, permid, messageID):
+        return self.allowAllRequestsAllPeers(permid, messageID)
+
+    def allowAllRequestsAllPeers(self, permid, messageID):
+        return True
+
+
+class CommonRequestPolicy(AbstractRequestPolicy):    
+    """ A base class implementing some methods that can be used as building 
+    blocks for RequestPolicies. 
+    """ 
+    def __init__(self,session):
+        """ Constructor """
+        self.session = session
+        self.friendsdb = session.open_dbhandler(NTFY_FRIENDS)
+        self.peerdb = session.open_dbhandler(NTFY_PEERS)
+        AbstractRequestPolicy.__init__(self)
+    
+    def isFriend(self, permid):
+        """
+        @param permid The permid of the sending peer. 
+        @return Whether or not the specified permid is a friend.
+        """
+        fs = self.friendsdb.getFriendState(permid)
+        return (fs == FS_MUTUAL or fs == FS_I_INVITED)
+
+    def isSuperPeer(self, permid):        
+        """
+        @param permid The permid of the sending peer.
+        @return Whether of not the specified permid is a superpeer.
+        """
+        return permid in self.session.lm.superpeer_db.getSuperPeers()
+
+    def isCrawler(self, permid):
+        """
+        @param permid The permid of the sending peer.
+        @return Whether of not the specified permid is a crawler.
+        """
+        return permid in self.session.lm.crawler_db.getCrawlers()
+
+    def benign_random_peer(self,permid):
+        """
+        @param permid The permid of the sending peer. 
+        @return Whether or not the specified permid has exceeded his
+        quota of remote query messages.
+        """
+        if MAX_QUERIES_FROM_RANDOM_PEER > 0:
+            nqueries = self.get_peer_nqueries(permid)
+            return nqueries < MAX_QUERIES_FROM_RANDOM_PEER
+        else: 
+            return True
+    
+    def get_peer_nqueries(self, permid):
+        """
+        @param permid The permid of the sending peer. 
+        @return The number of remote query messages already received from
+        this peer.
+        """
+        peer = self.peerdb.getPeer(permid)
+        #print >>sys.stderr,"CommonRequestPolicy: get_peer_nqueries: getPeer",`permid`,peer
+        #print >>sys.stderr,"CommonRequestPolicy: get_peer_nqueries: called by",currentThread().getName()
+        if peer is None:
+            return 0
+        else:
+            return peer['num_queries']
+
+class AllowFriendsRequestPolicy(CommonRequestPolicy):
+    """
+    A RequestPolicy that allows all non-crawler messages to be sent by
+    friends only. Crawler messages are allowed from Crawlers only.
+    """
+
+    def allowed(self, permid, messageID):
+        if messageID in (CRAWLER_REQUEST, CRAWLER_REPLY):
+            return self.isCrawler(permid)
+        else:
+            return self.allowAllRequestsFromFriends(permid, messageID)
+
+    def allowAllRequestsFromFriends(self, permid, messageID):
+        # Access control
+        return self.isFriend(permid)
+
+
+class FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(CommonRequestPolicy):
+    """
+    Allows friends to send all messages related to cooperative
+    downloads, subjects all other peers to a remote query quotum of
+    100, and allows all peers to send all other non-crawler
+    messages. Crawler messages are allowed from Crawlers only.
+    """
+
+    def allowed(self, permid, messageID):
+        """ Returns whether or not the peer identified by permid is allowed to  
+        send us a message of type messageID.
+        @return Boolean. """
+        if messageID == CRAWLER_REQUEST:
+            return self.isCrawler(permid)
+        elif (messageID in HelpCoordinatorMessages or messageID in HelpHelperMessages) and not self.isFriend(permid):
+            return False
+        elif messageID == QUERY and not (self.isFriend(permid) or self.benign_random_peer(permid)):
+            return False
+        else:
+            return True
+
diff --git a/instrumentation/next-share/BaseLib/Core/Search/KeywordSearch.py b/instrumentation/next-share/BaseLib/Core/Search/KeywordSearch.py
new file mode 100644 (file)
index 0000000..3421a84
--- /dev/null
@@ -0,0 +1,107 @@
+# written by Jelle Roozenburg
+# see LICENSE.txt for license information
+
+import re
+import sys
+
+DEBUG = False
+
+class KeywordSearch:
+    """
+    Tribler keywordsearch now has the following features:
+    1. All items with one of the keywords in the 'name' field are returned (self.simpleSearch() )
+    2. The sorting of the results is based on:
+      a) The number of matching keywords
+      b) The length of the matching keywords
+      c) If the keywords matched a whole word (search for 'cat' find 'category')
+      (done in self.search() )
+    3. Searching is case insensitive
+    """
+    def search(self, haystack, needles, haystackismatching=False):
+        if DEBUG:
+            print >>sys.stderr,'kws: unprocessed keywords: %s' % needles
+        needles = self.unRegExpifySearchwords(needles)
+        if DEBUG:
+            print >>sys.stderr,'kws: Searching for %s in %d items' % (repr(needles), len(haystack))
+            
+        if not haystackismatching:
+            searchspace = self.simpleSearch(haystack, needles)
+            if DEBUG:
+                print >>sys.stderr,'kws: Found %s items using simple search' % len(searchspace)
+        else:
+            searchspace = haystack
+        results = []
+        wbsearch = []
+        
+        for needle in needles:
+            wbsearch.append(re.compile(r'\b%s\b' % needle))
+                                              
+        for item in searchspace:
+            title = item['name'].lower()
+            score = 0
+            for i in xrange(len(needles)):
+                wb = wbsearch[i].findall(title)
+                score += len(wb) * 2 * len(needles[i])
+                if len(wb) == 0:
+                    if title.find(needles[i].lower()) != -1:
+                        score += len(needles[i])
+
+            results.append((score, item))
+        
+        results.sort(reverse=True)
+        if DEBUG:
+            print >>sys.stderr,'kws: Found %d items eventually' % len(results)
+            #for r in results:
+            #    print r
+        return [r[1] for r in results]
+
+    
+    def unRegExpifySearchwords(self, needles):
+        replaceRegExpChars = re.compile(r'(\\|\*|\.|\+|\?|\||\(|\)|\[|\]|\{|\})')
+        new_needles = []
+        for needle in needles:
+            needle = needle.strip()
+            if len(needle)== 0:
+                continue
+            new_needle = re.sub(replaceRegExpChars, r'\\\1', needle.lower())
+            new_needles.append(new_needle)
+        return new_needles
+            
+    def simpleSearch(self, haystack, needles, searchtype='AND'):
+        "Can do both OR or AND search"
+        hits = []
+        if searchtype == 'OR':
+            searchRegexp = r''
+            for needle in needles:
+                searchRegexp+= needle+'|'
+            searchRegexp = re.compile(searchRegexp[:-1])
+            for item in haystack:
+                title = item['name'].lower()
+                if len(searchRegexp.findall(title)) > 0:
+                    hits.append(item)
+        elif searchtype == 'AND':
+            for item in haystack:
+                title = item['name'].lower()
+                foundAll = True
+                for needle in needles:
+                    if title.find(needle) == -1:
+                        foundAll = False
+                        break
+                if foundAll:
+                    hits.append(item)
+        return hits
+
+
+def test():
+    data = [{'name':'Fedoras 3.10'},
+            {'name':'Fedora 2.10'},
+            {'name':'Movie 3.10'},
+            {'name':'fedora_2'},
+            {'name':'movie_theater.avi'}
+            ]
+    words = ['fedora', '1']
+    #print KeywordSearch().simpleSearch(data, words)
+    print KeywordSearch().search(data, words)
+if __name__ == '__main__':
+    test()
+         
diff --git a/instrumentation/next-share/BaseLib/Core/Search/Reranking.py b/instrumentation/next-share/BaseLib/Core/Search/Reranking.py
new file mode 100644 (file)
index 0000000..2df031e
--- /dev/null
@@ -0,0 +1,97 @@
+# written by Nicolas Neubauer\r
+# see LICENSE.txt for license information\r
+\r
+import sys, time\r
+\r
+DEBUG = False\r
+\r
+class Reranker:\r
+    def getID(self):\r
+        """the ID that is stored in the clicklog 'reranking_strategy' field for later comparison"""\r
+        return 0\r
+    \r
+    def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db):\r
+        """takes hits and reorders them given the current keywords"""\r
+        return hits\r
+    \r
+class DefaultTorrentReranker(Reranker):\r
+    """ just leave the hits alone """\r
+    def getID(self):\r
+        return 1\r
+    def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db):\r
+        return hits    \r
+    \r
+class TestReranker(Reranker):\r
+    """ for testing purposes only """\r
+    def getID(self):\r
+        return 2\r
+    def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db):\r
+        if len(hits)>1:\r
+            h = hits[0]\r
+            hits[0] = hits[1]\r
+            hits[1] = h\r
+        return hits  \r
+    \r
+class SwapFirstTwoReranker(Reranker):\r
+    """ swaps first and second place if second place has been frequently selected from bad position """\r
+    \r
+    def __init__(self):\r
+        self.MAX_SEEN_BEFORE_RERANK = 5\r
+        self.MAX_POPULAR_RATIO = 5\r
+    \r
+    def getID(self):\r
+        return 2\r
+    \r
+    def rerank(self, hits, keywords, torrent_db, pref_db, mypref_db, search_db):\r
+        if len(hits)<2:\r
+            return hits\r
+        \r
+        torrent_id_0 = hits[0].get('torrent_id',0)\r
+        torrent_id_1 = hits[1].get('torrent_id',0)\r
+        if torrent_id_0 == 0 or torrent_id_1 == 0:\r
+            if DEBUG:\r
+                print >> sys.stderr, "reranking: torrent_id=0 in hits, exiting"\r
+            # we got some problems elsewhere, don't add to it\r
+            return hits\r
+        \r
+        (num_hits_0, position_score_0) = pref_db.getPositionScore(torrent_id_0, keywords)\r
+        (num_hits_1, position_score_1) = pref_db.getPositionScore(torrent_id_1, keywords)\r
+        if DEBUG:\r
+            print >> sys.stderr, "reranking:  first torrent (%d): (num, score)= (%s, %s)" % (torrent_id_0, num_hits_0, position_score_0)\r
+            print >> sys.stderr, "reranking: second torrent (%d): (num, score)= (%s, %s)" % (torrent_id_1, num_hits_1, position_score_1)\r
+             \r
+        if (num_hits_0 < self.MAX_SEEN_BEFORE_RERANK or num_hits_1 < self.MAX_SEEN_BEFORE_RERANK):\r
+            # only start thinking about reranking if we have seen enough samples\r
+            if DEBUG:\r
+                print >> sys.stderr, "reranking: not enough samples, not reranking"\r
+            return hits\r
+        \r
+        if (num_hits_0/num_hits_1 > self.MAX_POPULAR_RATIO):\r
+            # if number one is much more popular, keep everything as it is\r
+            if DEBUG:\r
+                print >> sys.stderr, "reranking: first torrent is too popular, not reranking"            \r
+            return hits\r
+        \r
+        # if all these tests are successful, we may swap first and second if second \r
+        # has gotten hits from worse positions than first\r
+        \r
+        if position_score_0<position_score_1:\r
+            if DEBUG:\r
+                print >> sys.stderr, "reranking: second torrent has better position score, reranking!"                        \r
+            h = hits[0]\r
+            hits[0] = hits[1]\r
+            hits[1] = h\r
+        else:\r
+            if DEBUG:\r
+                print >> sys.stderr, "reranking: second torrent does not have better position score, reranking!"                        \r
+            \r
+        return hits      \r
+    \r
+_rerankers = [DefaultTorrentReranker(), SwapFirstTwoReranker()]\r
+\r
+\r
+def getTorrentReranker():\r
+    global _rerankers\r
+    index = int(time.strftime("%H")) % (len(_rerankers))\r
+    return _rerankers[index]\r
\r
diff --git a/instrumentation/next-share/BaseLib/Core/Search/SearchManager.py b/instrumentation/next-share/BaseLib/Core/Search/SearchManager.py
new file mode 100644 (file)
index 0000000..06792f4
--- /dev/null
@@ -0,0 +1,51 @@
+# Written by Jelle Roozenburg, Arno Bakker
+# see LICENSE.txt for license information
+
+# ARNOCOMMENT: remove this now it doesn't use KeywordSearch anymore?
+
+import re
+import sys
+
+#from BaseLib.Core.Search.KeywordSearch import KeywordSearch
+
+DEBUG = False
+
+re_keywordsplit = re.compile(r"[\W_]", re.UNICODE)
+def split_into_keywords(string):
+    """
+    Takes a (unicode) string and returns a list of (unicode) lowercase
+    strings.  No empty strings are returned.
+
+    We currently split on non-alphanumeric characters and the
+    underscore.  This ensures that the keywords are SQL insertion
+    proof.
+    """
+    return [keyword for keyword in re_keywordsplit.split(string.lower()) if len(keyword) > 0]
+
+
+class SearchManager:
+    """ Arno: This is DB neutral. All it assumes is a DBHandler with
+    a searchNames() method that returns records with at least a 'name' field
+    in them.
+    """
+    
+    def __init__(self,dbhandler):
+        self.dbhandler = dbhandler
+        # self.keywordsearch = KeywordSearch()
+    
+    def search(self,kws,maxhits=None):
+        """ Called by any thread """
+        if DEBUG:
+            print >>sys.stderr,"SearchManager: search",kws
+            
+        hits = self.dbhandler.searchNames(kws)
+        if maxhits is None:
+            return hits
+        else:
+            return hits[:maxhits]
+
+    def searchChannels(self, query): ##
+        data = self.dbhandler.searchChannels(query) 
+        return data
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/Search/__init__.py b/instrumentation/next-share/BaseLib/Core/Search/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/Session.py b/instrumentation/next-share/BaseLib/Core/Session.py
new file mode 100644 (file)
index 0000000..840507b
--- /dev/null
@@ -0,0 +1,914 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+""" A Session is a running instance of the Tribler Core and the Core's central class. """
+
+import os
+import sys
+import copy
+import binascii
+from traceback import print_exc
+from threading import RLock
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.defaults import sessdefaults
+from BaseLib.Core.Base import *
+from BaseLib.Core.SessionConfig import *
+from BaseLib.Core.DownloadConfig import get_default_dest_dir
+from BaseLib.Core.Utilities.utilities import find_prog_in_PATH
+from BaseLib.Core.APIImplementation.SessionRuntimeConfig import SessionRuntimeConfig
+from BaseLib.Core.APIImplementation.LaunchManyCore import TriblerLaunchMany
+from BaseLib.Core.APIImplementation.UserCallbackHandler import UserCallbackHandler
+from BaseLib.Core.osutils import get_appstate_dir
+GOTM2CRYPTO=False
+try:
+    import M2Crypto
+    import BaseLib.Core.Overlay.permid as permidmod
+    GOTM2CRYPTO=True
+except ImportError:
+    pass
+
+DEBUG = False
+
+class Session(SessionRuntimeConfig):
+    """
+    
+    A Session is a running instance of the Tribler Core and the Core's central
+    class. It implements the SessionConfigInterface which can be used to change
+    session parameters at runtime (for selected parameters).
+    
+    cf. libtorrent session
+    """
+    __single = None
+
+    
+    def __init__(self,scfg=None,ignore_singleton=False):
+        """
+        A Session object is created which is configured following a copy of the
+        SessionStartupConfig scfg. (copy constructor used internally)
+        
+        @param scfg SessionStartupConfig object or None, in which case we
+        look for a saved session in the default location (state dir). If
+        we can't find it, we create a new SessionStartupConfig() object to 
+        serve as startup config. Next, the config is saved in the directory
+        indicated by its 'state_dir' attribute.
+        
+        In the current implementation only a single session instance can exist
+        at a time in a process. The ignore_singleton flag is used for testing.
+        """
+        if not ignore_singleton:
+            if Session.__single:
+                raise RuntimeError, "Session is singleton"
+            Session.__single = self
+        
+        self.sesslock = RLock()
+
+        # Determine startup config to use
+        if scfg is None: # If no override
+            try:
+                # Then try to read from default location
+                state_dir = Session.get_default_state_dir()
+                cfgfilename = Session.get_default_config_filename(state_dir)
+                scfg = SessionStartupConfig.load(cfgfilename)
+            except:
+                # If that fails, create a fresh config with factory defaults
+                print_exc()
+                scfg = SessionStartupConfig()
+            self.sessconfig = scfg.sessconfig
+        else: # overrides any saved config
+            # Work from copy
+            self.sessconfig = copy.copy(scfg.sessconfig)
+        
+        # Create dir for session state, if not exist    
+        state_dir = self.sessconfig['state_dir']
+        if state_dir is None:
+            state_dir = Session.get_default_state_dir()
+            self.sessconfig['state_dir'] = state_dir
+            
+        if not os.path.isdir(state_dir):
+            os.makedirs(state_dir)
+
+        collected_torrent_dir = self.sessconfig['torrent_collecting_dir']
+        if not collected_torrent_dir:
+            collected_torrent_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_TORRENTCOLL_DIR)
+            self.sessconfig['torrent_collecting_dir'] = collected_torrent_dir
+            
+        collected_subtitles_dir = self.sessconfig.get('subtitles_collecting_dir',None)
+        if not collected_subtitles_dir:
+            collected_subtitles_dir = os.path.join(self.sessconfig['state_dir'], STATEDIR_SUBSCOLL_DIR)
+            self.sessconfig['subtitles_collecting_dir'] = collected_subtitles_dir
+            
+        if not os.path.exists(collected_torrent_dir):
+            os.makedirs(collected_torrent_dir)
+            
+        if not self.sessconfig['peer_icon_path']:
+            self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'], STATEDIR_PEERICON_DIR)
+            
+        # PERHAPS: load default TorrentDef and DownloadStartupConfig from state dir
+        # Let user handle that, he's got default_state_dir, etc.
+
+        # Core init
+        #print >>sys.stderr,'Session: __init__ config is', self.sessconfig
+
+        if GOTM2CRYPTO:
+            permidmod.init()
+
+            #
+            # Set params that depend on state_dir
+            #
+            # 1. keypair
+            #
+            pairfilename = os.path.join(self.sessconfig['state_dir'],'ec.pem')
+            if self.sessconfig['eckeypairfilename'] is None:
+                self.sessconfig['eckeypairfilename'] = pairfilename
+            
+            if os.access(self.sessconfig['eckeypairfilename'],os.F_OK):
+                # May throw exceptions
+                self.keypair = permidmod.read_keypair(self.sessconfig['eckeypairfilename'])
+            else:
+                self.keypair = permidmod.generate_keypair()
+
+                # Save keypair
+                pubfilename = os.path.join(self.sessconfig['state_dir'],'ecpub.pem')
+                permidmod.save_keypair(self.keypair,pairfilename)
+                permidmod.save_pub_key(self.keypair,pubfilename)
+        
+        # 2. Downloads persistent state dir
+        dlpstatedir = os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR)
+        if not os.path.isdir(dlpstatedir):
+            os.mkdir(dlpstatedir)
+        
+        # 3. tracker
+        trackerdir = self.get_internal_tracker_dir()
+        if not os.path.isdir(trackerdir):
+            os.mkdir(trackerdir)
+
+        if self.sessconfig['tracker_dfile'] is None:
+            self.sessconfig['tracker_dfile'] = os.path.join(trackerdir,'tracker.db')    
+
+        if self.sessconfig['tracker_allowed_dir'] is None:
+            self.sessconfig['tracker_allowed_dir'] = trackerdir    
+        
+        if self.sessconfig['tracker_logfile'] is None:
+            if sys.platform == "win32":
+                # Not "Nul:" but "nul" is /dev/null on Win32
+                sink = 'nul'
+            else:
+                sink = '/dev/null'
+            self.sessconfig['tracker_logfile'] = sink
+
+        # 4. superpeer.txt and crawler.txt
+        if self.sessconfig['superpeer_file'] is None:
+            self.sessconfig['superpeer_file'] = os.path.join(self.sessconfig['install_dir'],LIBRARYNAME,'Core','superpeer.txt')
+        if 'crawler_file' not in self.sessconfig or self.sessconfig['crawler_file'] is None:
+            self.sessconfig['crawler_file'] = os.path.join(self.sessconfig['install_dir'], LIBRARYNAME,'Core','Statistics','crawler.txt')
+
+        # 5. download_help_dir
+        if self.sessconfig['overlay'] and self.sessconfig['download_help']:
+            if self.sessconfig['download_help_dir'] is None:
+                self.sessconfig['download_help_dir'] = os.path.join(get_default_dest_dir(),DESTDIR_COOPDOWNLOAD)
+            # Jelle: under linux, default_dest_dir can be /tmp. Then download_help_dir can be deleted inbetween
+            # sessions.
+            if not os.path.isdir(self.sessconfig['download_help_dir']):
+                os.makedirs(self.sessconfig['download_help_dir'])
+
+        # 6. peer_icon_path
+        if self.sessconfig['peer_icon_path'] is None:
+            self.sessconfig['peer_icon_path'] = os.path.join(self.sessconfig['state_dir'],STATEDIR_PEERICON_DIR)
+            if not os.path.isdir(self.sessconfig['peer_icon_path']):
+                os.mkdir(self.sessconfig['peer_icon_path'])
+
+        # 7. Poor man's versioning of SessionConfig, add missing
+        # default values. Really should use PERSISTENTSTATE_CURRENTVERSION 
+        # and do conversions.
+        for key,defvalue in sessdefaults.iteritems():
+            if key not in self.sessconfig:
+                self.sessconfig[key] = defvalue
+
+        if not 'live_aux_seeders' in self.sessconfig:
+            # Poor man's versioning, really should update PERSISTENTSTATE_CURRENTVERSION
+            self.sessconfig['live_aux_seeders'] = sessdefaults['live_aux_seeders']
+
+        if not 'nat_detect' in self.sessconfig:
+            self.sessconfig['nat_detect'] = sessdefaults['nat_detect']
+        if not 'puncturing_internal_port' in self.sessconfig:
+            self.sessconfig['puncturing_internal_port'] = sessdefaults['puncturing_internal_port']
+        if not 'stun_servers' in self.sessconfig:
+            self.sessconfig['stun_servers'] = sessdefaults['stun_servers']
+        if not 'pingback_servers' in self.sessconfig:
+            self.sessconfig['pingback_servers'] = sessdefaults['pingback_servers']
+        if not 'mainline_dht' in self.sessconfig:
+            self.sessconfig['mainline_dht'] = sessdefaults['mainline_dht']
+            
+        # Checkpoint startup config
+        self.save_pstate_sessconfig()
+
+        # Create handler for calling back the user via separate threads
+        self.uch = UserCallbackHandler(self)
+
+        # Create engine with network thread
+        self.lm = TriblerLaunchMany()
+        self.lm.register(self,self.sesslock)
+        self.lm.start()
+
+
+    #
+    # Class methods
+    #
+    def get_instance(*args, **kw):
+        """ Returns the Session singleton if it exists or otherwise
+            creates it first, in which case you need to pass the constructor 
+            params. 
+            @return Session."""
+        if Session.__single is None:
+            Session(*args, **kw)
+        return Session.__single
+    get_instance = staticmethod(get_instance)
+
+    def get_default_state_dir(homedirpostfix='.Tribler'):
+        """ Returns the factory default directory for storing session state
+        on the current platform (Win32,Mac,Unix).
+        @return An absolute path name. """
+
+        # Allow override
+        statedirvar = '${TSTATEDIR}'
+        statedir = os.path.expandvars(statedirvar)
+        if statedir and statedir != statedirvar:
+            return statedir
+        
+        appdir = get_appstate_dir() 
+        statedir = os.path.join(appdir, homedirpostfix)
+        return statedir
+
+    get_default_state_dir = staticmethod(get_default_state_dir)
+
+
+    #
+    # Public methods
+    #
+    def start_download(self,tdef,dcfg=None,initialdlstatus=None):
+        """ 
+        Creates a Download object and adds it to the session. The passed 
+        TorrentDef and DownloadStartupConfig are copied into the new Download 
+        object. The Download is then started and checkpointed.
+
+        If a checkpointed version of the Download is found, that is restarted
+        overriding the saved DownloadStartupConfig if "dcfg" is not None.
+        
+        @param tdef  A finalized TorrentDef
+        @param dcfg DownloadStartupConfig or None, in which case 
+        a new DownloadStartupConfig() is created with its default settings
+        and the result becomes the runtime config of this Download.
+        @param initialdlstatus The initial download status of this Download 
+        or None. This enables the caller to create a Download in e.g. 
+        DLSTATUS_REPEXING state instead.
+        @return Download
+        """
+        # locking by lm
+        return self.lm.add(tdef,dcfg,initialdlstatus=initialdlstatus)
+
+    def resume_download_from_file(self,filename):
+        """
+        Recreates Download from resume file
+        
+        @return a Download object.
+        
+        Note: this cannot be made into a method of Download, as the Download 
+        needs to be bound to a session, it cannot exist independently.
+        """
+        raise NotYetImplementedException()
+
+    def get_downloads(self):
+        """
+        Returns a copy of the list of Downloads.
+        @return A list of Download objects.
+        """
+        # locking by lm
+        return self.lm.get_downloads()
+    
+    
+    def remove_download(self,d,removecontent=False):  
+        """
+        Stops the download and removes it from the session.
+        @param d The Download to remove
+        @param removecontent Whether to delete the already downloaded content
+        from disk.
+        """
+        # locking by lm
+        self.lm.remove(d,removecontent=removecontent)
+
+
+    def set_download_states_callback(self,usercallback,getpeerlist=False):
+        """
+        See Download.set_state_callback. Calls usercallback with a list of
+        DownloadStates, one for each Download in the Session as first argument.
+        The usercallback must return a tuple (when,getpeerlist) that indicates
+        when to reinvoke the callback again (as a number of seconds from now,
+        or < 0.0 if not at all) and whether to also include the details of
+        the connected peers in the DownloadStates on that next call.
+        
+        The callback will be called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+        
+        @param usercallback A function adhering to the above spec. 
+        """
+        self.lm.set_download_states_callback(usercallback,getpeerlist)
+
+
+    #
+    # Config parameters that only exist at runtime
+    #
+    def get_permid(self):
+        """ Returns the PermID of the Session, as determined by the
+        SessionConfig.set_permid() parameter. A PermID is a public key 
+        @return The PermID encoded in a string in DER format. """
+        self.sesslock.acquire()
+        try:
+            return str(self.keypair.pub().get_der())
+        finally:
+            self.sesslock.release()
+
+    def get_external_ip(self):
+        """ Returns the external IP address of this Session, i.e., by which
+        it is reachable from the Internet. This address is determined via
+        various mechanisms such as the UPnP protocol, our dialback mechanism,
+        and an inspection of the local network configuration.
+        @return A string. """
+        # locking done by lm
+        return self.lm.get_ext_ip()
+        
+
+    def get_externally_reachable(self):
+        """ Returns whether the Session is externally reachable, i.e., its 
+          listen port is not firewalled. Use add_observer() with NTFY_REACHABLE
+          to register to the event of detecting reachablility. Note that due to
+          the use of UPnP a Session may become reachable some time after 
+          startup and due to the Dialback mechanism, this method may return 
+          False while the Session is actually already reachable. Note that True
+          doesn't mean the Session is reachable from the open Internet, could just
+          be from the local (otherwise firewalled) LAN.
+          @return A boolean. """
+
+        # Arno, LICHT: make it throw exception when used in LITE versie.
+        from BaseLib.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
+        
+        return DialbackMsgHandler.getInstance().isConnectable()
+
+
+    def get_current_startup_config_copy(self):
+        """ Returns a SessionStartupConfig that is a copy of the current runtime 
+        SessionConfig.
+        @return SessionStartupConfig
+        """
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            sessconfig = copy.copy(self.sessconfig)
+            return SessionStartupConfig(sessconfig=sessconfig)
+        finally:
+            self.sesslock.release()
+            
+    #
+    # Internal tracker 
+    #
+    def get_internal_tracker_url(self):
+        """ Returns the announce URL for the internal tracker. 
+        @return URL """
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            url = None
+            if 'tracker_url' in self.sessconfig:
+                url = self.sessconfig['tracker_url'] # user defined override, e.g. specific hostname
+            if url is None:
+                ip = self.lm.get_ext_ip()
+                port = self.get_listen_port()
+                url = 'http://'+ip+':'+str(port)+'/announce/'
+            return url
+        finally:
+            self.sesslock.release()
+
+
+    def get_internal_tracker_dir(self):
+        """ Returns the directory containing the torrents tracked by the internal 
+        tracker (and associated databases).
+        @return An absolute path. """
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            if self.sessconfig['state_dir'] is None:
+                return None
+            else:
+                return os.path.join(self.sessconfig['state_dir'],STATEDIR_ITRACKER_DIR)
+        finally:
+            self.sesslock.release()
+
+
+    def add_to_internal_tracker(self,tdef):
+        """ Add a torrent def to the list of torrents tracked by the internal
+        tracker. Use this method to use the Session as a standalone tracker. 
+        @param tdef A finalized TorrentDef. 
+        """
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            infohash = tdef.get_infohash()
+            filename = self.get_internal_tracker_torrentfilename(infohash)
+            tdef.save(filename)
+            
+            print >>sys.stderr,"Session: add_to_int_tracker: saving to",filename,"url-compat",tdef.get_url_compat()
+            
+            # Bring to attention of Tracker thread
+            self.lm.tracker_rescan_dir()
+        finally:
+            self.sesslock.release()
+        
+    def remove_from_internal_tracker(self,tdef):
+        """ Remove a torrent def from the list of torrents tracked by the 
+        internal tracker. Use this method to use the Session as a standalone 
+        tracker. 
+        @param tdef A finalized TorrentDef.
+        """
+        infohash = tdef.get_infohash()
+        self.remove_from_internal_tracker_by_infohash(infohash)
+        
+    def remove_from_internal_tracker_by_infohash(self,infohash):
+        """ Remove a torrent def from the list of torrents tracked by the 
+        internal tracker. Use this method to use the Session as a standalone 
+        tracker. 
+        @param infohash Identifier of the torrent def to remove.
+        """
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            filename = self.get_internal_tracker_torrentfilename(infohash)
+            if DEBUG:
+                print >>sys.stderr,"Session: removing itracker entry",filename
+            if os.access(filename,os.F_OK):
+                os.remove(filename)
+            # Bring to attention of Tracker thread
+            self.lm.tracker_rescan_dir()
+        finally:
+            self.sesslock.release()
+
+    #
+    # Notification of events in the Session
+    #
+    def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], objectID = None):
+        """ Add an observer function function to the Session. The observer 
+        function will be called when one of the specified events (changeTypes)
+        occurs on the specified subject.
+        
+        The function will be called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+        
+        @param func The observer function. It should accept as its first argument
+        the subject, as second argument the changeType, as third argument an
+        objectID (e.g. the primary key in the observed database) and an 
+        optional list of arguments.
+        @param subject The subject to observe, one of NTFY_* subjects (see 
+        simpledefs).
+        @param changeTypes The list of events to be notified of one of NTFY_* 
+        events.
+        @param objectID The specific object in the subject to monitor (e.g. a
+        specific primary key in a database to monitor for updates.)
+        
+        
+        TODO: Jelle will add per-subject/event description here ;o)
+        
+        """
+        #Called by any thread
+        self.uch.notifier.add_observer(func, subject, changeTypes, objectID) # already threadsafe
+        
+    def remove_observer(self, func):
+        """ Remove observer function. No more callbacks will be made.
+        @param func The observer function to remove. """
+        #Called by any thread
+        self.uch.notifier.remove_observer(func) # already threadsafe
+
+    def open_dbhandler(self,subject):
+        """ Opens a connection to the specified database. Only the thread 
+        calling this method may use this connection. The connection must be 
+        closed with close_dbhandler() when this thread exits.
+        
+        @param subject The database to open. Must be one of the subjects
+        specified here.
+        @return A reference to a DBHandler class for the specified subject or 
+        None when the Session was not started with megacaches enabled. 
+        <pre> NTFY_PEERS -> PeerDBHandler
+        NTFY_TORRENTS -> TorrentDBHandler
+        NTFY_PREFERENCES -> PreferenceDBHandler
+        NTFY_SUPERPEERS -> SuperpeerDBHandler
+        NTFY_FRIENDS -> FriendsDBHandler
+        NTFY_MYPREFERENCES -> MyPreferenceDBHandler
+        NTFY_BARTERCAST -> BartercastDBHandler
+        NTFY_SEARCH -> SearchDBHandler
+        NTFY_TERM -> TermDBHandler
+        NTFY_VOTECAST -> VotecastDBHandler
+        NTFY_CHANNELCAST -> ChannelCastDBHandler
+        NTFY_RICH_METADATA -> MetadataDBHandler
+        </pre>
+        """ 
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            if subject == NTFY_PEERS:
+                return self.lm.peer_db
+            elif subject == NTFY_TORRENTS:
+                return self.lm.torrent_db
+            elif subject == NTFY_PREFERENCES:
+                return self.lm.pref_db
+            elif subject == NTFY_SUPERPEERS:
+                return self.lm.superpeer_db
+            elif subject == NTFY_FRIENDS:
+                return self.lm.friend_db
+            elif subject == NTFY_MYPREFERENCES:
+                return self.lm.mypref_db
+            elif subject == NTFY_BARTERCAST:
+                return self.lm.bartercast_db
+            elif subject == NTFY_SEEDINGSTATS:
+                return self.lm.seedingstats_db
+            elif subject == NTFY_SEEDINGSTATSSETTINGS:
+                return self.lm.seedingstatssettings_db
+            elif subject == NTFY_VOTECAST:
+                return self.lm.votecast_db
+            elif subject == NTFY_SEARCH:
+                return self.lm.search_db
+            elif subject == NTFY_TERM:
+                return self.lm.term_db
+            elif subject == NTFY_CHANNELCAST:
+                return self.lm.channelcast_db
+            elif subject == NTFY_RICH_METADATA:
+                return self.lm.richmetadataDbHandler
+            else:
+                raise ValueError('Cannot open DB subject: '+subject)
+        finally:
+            self.sesslock.release()
+        
+        
+    def close_dbhandler(self,dbhandler):
+        """ Closes the given database connection """
+        dbhandler.close()
+    
+
+    #
+    # Access control
+    #
+    def set_overlay_request_policy(self, reqpol):
+        """
+        Set a function which defines which overlay requests (e.g. dl_helper, rquery msg) 
+        will be answered or will be denied.
+        
+        The function will be called by a network thread and must return 
+        as soon as possible to prevent performance problems.
+        
+        @param reqpol is a BaseLib.Core.RequestPolicy.AbstractRequestPolicy 
+        object.
+        """
+        # Called by any thread
+        # to protect self.sessconfig
+        self.sesslock.acquire()
+        try:
+            overlay_loaded = self.sessconfig['overlay']
+        finally:
+            self.sesslock.release()
+        if overlay_loaded:
+            self.lm.overlay_apps.setRequestPolicy(reqpol) # already threadsafe
+        elif DEBUG:
+            print >>sys.stderr,"Session: overlay is disabled, so no overlay request policy needed"
+
+
+    #
+    # Persistence and shutdown 
+    #
+    def load_checkpoint(self,initialdlstatus=None):
+        """ Restart Downloads from checkpoint, if any.
+        
+        This method allows the API user to manage restoring downloads. 
+        E.g. a video player that wants to start the torrent the user clicked 
+        on first, and only then restart any sleeping torrents (e.g. seeding).
+        The optional initialdlstatus parameter can be set to DLSTATUS_STOPPED
+        to restore all the Downloads in DLSTATUS_STOPPED state.
+        """
+        self.lm.load_checkpoint(initialdlstatus)
+    
+    
+    def checkpoint(self):
+        """ Saves the internal session state to the Session's state dir. """
+        #Called by any thread
+        self.checkpoint_shutdown(stop=False,checkpoint=True,gracetime=None,hacksessconfcheckpoint=False)
+    
+    def shutdown(self,checkpoint=True,gracetime=2.0,hacksessconfcheckpoint=True):
+        """ Checkpoints the session and closes it, stopping the download engine.
+        @param checkpoint Whether to checkpoint the Session state on shutdown.
+        @param gracetime Time to allow for graceful shutdown + signoff (seconds).
+        """ 
+        # Called by any thread
+        self.lm.early_shutdown()
+        self.checkpoint_shutdown(stop=True,checkpoint=checkpoint,gracetime=gracetime,hacksessconfcheckpoint=hacksessconfcheckpoint)
+        # Arno, 2010-08-09: now shutdown after gracetime
+        #self.uch.shutdown()
+    
+    def has_shutdown(self):
+        """ Whether the Session has completely shutdown, i.e., its internal
+        threads are finished and it is safe to quit the process the Session
+        is running in.
+        @return A Boolean.
+        """
+        return self.lm.sessdoneflag.isSet()
+        
+    def get_downloads_pstate_dir(self):
+        """ Returns the directory in which to checkpoint the Downloads in this
+        Session. """
+        # Called by network thread
+        self.sesslock.acquire()
+        try:
+            return os.path.join(self.sessconfig['state_dir'],STATEDIR_DLPSTATE_DIR)
+        finally:
+            self.sesslock.release()
+
+    #
+    # Tribler Core special features
+    #
+    def query_connected_peers(self,query,usercallback,max_peers_to_query=None):
+        """ Ask all Tribler peers we're currently connected to resolve the
+        specified query and return the hits. For each peer that returns
+        hits the usercallback method is called with first parameter the
+        permid of the peer, as second parameter the query string and
+        as third parameter a dictionary of hits. The number of times the 
+        usercallback method will be called is undefined.
+
+        The callback will be called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+
+        At the moment we support three types of query, which are all queries for
+        torrent files that match a set of keywords. The format of the
+        query string is "SIMPLE kw1 kw2 kw3" (type 1) or "SIMPLE+METADATA kw1 kw2 
+        kw3" (type 3). In the future we plan to support full SQL queries.
+        
+        For SIMPLE queries the dictionary of hits consists of 
+        (infohash,torrentrecord) pairs. The torrentrecord is a 
+        dictionary that contains the following keys:
+        <pre>
+        * 'content_name': The 'name' field of the torrent as Unicode string.
+        * 'length': The total size of the content in the torrent.
+        * 'leecher': The currently known number of downloaders.
+        * 'seeder': The currently known number of seeders.
+        * 'category': A list of category strings the torrent was classified into
+          by the remote peer.
+        </pre>
+        
+        From Session API version 1.0.2 the following keys were added
+        to the torrentrecord:
+        <pre>
+        * 'torrent_size': The size of the .torrent file.
+        </pre>
+
+        From Session API version 1.0.4 the following keys were added
+        to the torrentrecord:
+        <pre>
+        * 'channel_permid': PermID of the channel this torrent belongs to (or '')
+        * 'channel_name': channel name as Unicode string (or '').
+       
+
+        For SIMPLE+METADATA queries there is an extra field
+        <pre>
+        * 'torrent_file': Bencoded contents of the .torrent file. 
+        </pre>
+        The torrents *not* be automatically added to the TorrentDBHandler 
+        (if enabled) at the time of the call.
+
+        
+        The third type of query: search for channels. It is used to query for 
+        channels: either a particular channel matching the permid in the query, 
+        or a list of channels whose names match the keywords in the query
+        by sending the query to connected peers. 
+        
+        The format of the query in the corresponding scenarios should be: 
+        a. keyword-based query: "CHANNEL k bbc"     
+            ('k' stands for keyword-based and ' '{space} is a separator followed by 
+            the keywords)
+        b. permid-based query: "CHANNEL p f34wrf2345wfer2345wefd3r34r54" 
+            ('p' stands for permid-based and ' '{space} is a separator followed by
+            the permid)
+        
+        In each of the above 2 cases, the format of the hits that is returned 
+        by the queried peer is a dictionary of hits of (signature,channelrecord). 
+        The channelrecord is a dictionary the contains following keys: 
+        <pre>
+        * 'publisher_id': a PermID
+        * 'publisher_name': as Unicode string
+        * 'infohash': 20-byte SHA1 hash
+        * 'torrenthash': 20-byte SHA1 hash
+        * 'torrentname': as Unicode string
+        * 'time_stamp': as integer
+        </pre>
+
+        
+        @param query A Unicode query string adhering to the above spec.
+        @param usercallback A function adhering to the above spec.
+        """
+        self.sesslock.acquire()
+        try:
+            if self.sessconfig['overlay']:
+                if not (query.startswith('SIMPLE ') or query.startswith('SIMPLE+METADATA ')) and not query.startswith('CHANNEL '):
+                    raise ValueError('Query does not start with SIMPLE or SIMPLE+METADATA or CHANNEL')
+                
+                from BaseLib.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler
+                
+                rqmh = RemoteQueryMsgHandler.getInstance()
+                rqmh.send_query(query,usercallback,max_peers_to_query=max_peers_to_query)
+            else:
+                raise OperationNotEnabledByConfigurationException("Overlay not enabled")
+        finally:
+            self.sesslock.release()
+            
+    
+    def download_torrentfile_from_peer(self,permid,infohash,usercallback):
+        """ Ask the designated peer to send us the torrentfile for the torrent
+        identified by the passed infohash. If the torrent is succesfully 
+        received, the usercallback method is called with the infohash as first
+        and the contents of the torrentfile (bencoded dict) as second parameter.
+        If the torrent could not be obtained, the callback is not called.
+        The torrent will have been added to the TorrentDBHandler (if enabled)
+        at the time of the call.
+        
+        @param permid The PermID of the peer to query.
+        @param infohash The infohash of the torrent.
+        @param usercallback A function adhering to the above spec.
+        """
+        # ARNOCOMMENT: Perhaps make save to database optional.
+        self.sesslock.acquire()
+        try:
+            if self.sessconfig['overlay']:
+                from BaseLib.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler
+                
+                rtorrent_handler = RemoteTorrentHandler.getInstance()
+                rtorrent_handler.download_torrent(permid,infohash,usercallback)
+            else:
+                raise OperationNotEnabledByConfigurationException("Overlay not enabled")
+        finally:
+            self.sesslock.release()
+        
+
+    #
+    # Internal persistence methods
+    #
+    def checkpoint_shutdown(self,stop,checkpoint,gracetime,hacksessconfcheckpoint):
+        """ Checkpoints the Session and optionally shuts down the Session.
+        @param stop Whether to shutdown the Session as well.
+        @param checkpoint Whether to checkpoint at all, or just to stop.
+        @param gracetime Time to allow for graceful shutdown + signoff (seconds). 
+        """
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            # Arno: Make checkpoint optional on shutdown. At the moment setting 
+            # the config at runtime is not possible (see SessionRuntimeConfig)
+            # so this has little use, and interferes with our way of
+            # changing the startup config, which is to write a new
+            # config to disk that will be read at start up.
+            if hacksessconfcheckpoint:
+                try:
+                    self.save_pstate_sessconfig()
+                except Exception,e:
+                    self.lm.rawserver_nonfatalerrorfunc(e)
+
+            # Checkpoint all Downloads and stop NetworkThread
+            if DEBUG:
+                print >>sys.stderr,"Session: checkpoint_shutdown"
+            self.lm.checkpoint(stop=stop,checkpoint=checkpoint,gracetime=gracetime)
+        finally:
+            self.sesslock.release()
+
+    def save_pstate_sessconfig(self):
+        """ Save the runtime SessionConfig to disk """
+        # Called by any thread
+        sscfg = self.get_current_startup_config_copy()
+        cfgfilename = Session.get_default_config_filename(sscfg.get_state_dir())
+        sscfg.save(cfgfilename)
+
+
+    def get_default_config_filename(state_dir):
+        """ Return the name of the file where a session config is saved by default. 
+        @return A filename 
+        """
+        return os.path.join(state_dir,STATEDIR_SESSCONFIG)
+    get_default_config_filename = staticmethod(get_default_config_filename)
+
+
+    def get_internal_tracker_torrentfilename(self,infohash):
+        """ Return the absolute pathname of the torrent file used by the
+        internal tracker.
+        @return A filename
+        """
+        trackerdir = self.get_internal_tracker_dir()
+        basename = binascii.hexlify(infohash)+'.torrent' # ignore .tribe stuff, not vital
+        return os.path.join(trackerdir,basename)
+
+    def get_nat_type(self, callback=None):
+        """ Return the type of Network Address Translator (NAT) detected.
+
+        When a callback parameter is supplied it will always be
+        called. When the NAT-type is already known the callback will
+        be made instantly. Otherwise, the callback will be made when
+        the NAT discovery has finished.
+
+        The callback will be called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+
+        Return values:
+        "Blocked"
+        "Open Internet"
+        "Restricted Cone Firewall"
+        "Port Restricted Cone Firewall"
+        "Full Cone NAT"
+        "Restricted Cone NAT"
+        "Port Restricted Cone NAT"
+        "Symmetric NAT"
+        "Unknown NAT/Firewall"
+
+        @param callback Optional callback used to notify the NAT type
+        @return String 
+        """
+        # TODO: define constants in simpledefs for these
+        # Called by any thread
+        self.sesslock.acquire()
+        try:
+            from BaseLib.Core.NATFirewall.ConnectionCheck import ConnectionCheck
+            
+            return ConnectionCheck.getInstance(self).get_nat_type(callback=callback)
+        finally:
+            self.sesslock.release()
+
+    #
+    # Friendship functions
+    #
+    def send_friendship_message(self,permid,mtype,approved=None):
+        """ Send friendship msg to the specified peer 
+        
+        F_REQUEST_MSG:
+        
+        F_RESPONSE_MSG:
+        @param approved Whether you want him as friend or not.
+        
+        """
+        self.sesslock.acquire()
+        try:
+            if self.sessconfig['overlay']:
+                if mtype == F_FORWARD_MSG:
+                    raise ValueError("User cannot send FORWARD messages directly")
+                
+                from BaseLib.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler
+                
+                fmh = FriendshipMsgHandler.getInstance()
+                params = {}
+                if approved is not None:
+                    params['response'] = int(approved)
+                fmh.anythread_send_friendship_msg(permid,mtype,params)
+            else:
+                raise OperationNotEnabledByConfigurationException("Overlay not enabled")
+        finally:
+            self.sesslock.release()
+
+
+    def set_friendship_callback(self,usercallback):
+        """ When a new friendship request is received the given
+        callback function is called with as first parameter the
+        requester's permid and as second parameter a dictionary of
+        request arguments:
+            callback(requester_permid,params)
+
+        The callback is called by a popup thread which can be used
+        indefinitely (within reason) by the higher level code.
+        
+        @param usercallback A callback function adhering to the above spec.
+        """
+        self.sesslock.acquire()
+        try:
+            if self.sessconfig['overlay']:
+                from BaseLib.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler
+                
+                fmh = FriendshipMsgHandler.getInstance()
+                fmh.register_usercallback(usercallback)
+            else:
+                raise OperationNotEnabledByConfigurationException("Overlay not enabled")
+        finally:
+            self.sesslock.release()
+            
+            
+    # 02-06-2010 Andrea: returns a reference to SubtitleSupport instance, which
+    # is the facade (i.e. acts as the entry point) of the Subtitles subsystem
+    def get_subtitles_support_facade(self):
+        '''
+        Returns an instance of SubtitleSupport, which is intended to be used
+        by clients to interact with the subtitles subsystem.
+        
+        Subsequent calls to this method should always return the same instance.
+        
+        If the instance is not available, None will be returned
+        '''
+        try:
+            return self.lm.overlay_apps.subtitle_support
+        except:
+            return None
diff --git a/instrumentation/next-share/BaseLib/Core/SessionConfig.py b/instrumentation/next-share/BaseLib/Core/SessionConfig.py
new file mode 100644 (file)
index 0000000..ff2c368
--- /dev/null
@@ -0,0 +1,1319 @@
+# Written by Arno Bakker, George Milescu 
+# see LICENSE.txt for license information
+""" Controls the operation of a Session """
+
+#
+# WARNING: When extending this class:
+#
+# 1. Add a JavaDoc description for each method you add.
+# 2. Also add the methods to APIImplementation/SessionRuntimeConfig.py  
+# 3. Document your changes in API.py
+#
+#
+
+import sys
+import copy
+import pickle
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.defaults import sessdefaults
+from BaseLib.Core.Base import *
+from BaseLib.Core.BitTornado.RawServer import autodetect_socket_style
+from BaseLib.Core.Utilities.utilities import find_prog_in_PATH
+
+
+class SessionConfigInterface:
+    """ 
+    (key,value) pair config of global parameters, 
+    e.g. PermID keypair, listen port, max upload speed, etc.
+    
+    Use SessionStartupConfig from creating and manipulation configurations
+    before session startup time. This is just a parent class.
+    """
+    def __init__(self,sessconfig=None):
+        """ Constructor. 
+        @param sessconfig Optional dictionary used internally 
+        to make this a copy constructor.
+        """
+
+        if sessconfig is not None: # copy constructor
+            self.sessconfig = sessconfig
+            return
+        
+        self.sessconfig = {}
+        
+        # Define the built-in default here
+        self.sessconfig.update(sessdefaults)
+        
+        # Set video_analyser_path
+        if sys.platform == 'win32':
+            ffmpegname = "ffmpeg.exe"
+        else:
+            ffmpegname = "ffmpeg"
+    
+        ffmpegpath = find_prog_in_PATH(ffmpegname)
+        if ffmpegpath is None:
+            if sys.platform == 'win32':
+                self.sessconfig['videoanalyserpath'] = ffmpegname
+            elif sys.platform == 'darwin':
+                self.sessconfig['videoanalyserpath'] = "macbinaries/ffmpeg"
+            else:
+                self.sessconfig['videoanalyserpath'] = ffmpegname
+        else:
+            self.sessconfig['videoanalyserpath'] = ffmpegpath
+
+        self.sessconfig['ipv6_binds_v4'] = autodetect_socket_style()
+
+
+
+    def set_state_dir(self,statedir):
+        """ Set the directory to store the Session's state in.
+        @param statedir  A preferably absolute path name. If the directory
+        does not yet exist it will be created at Session create time.
+        """
+        self.sessconfig['state_dir'] = statedir
+    
+    def get_state_dir(self):
+        """ Returns the directory the Session stores its state in. 
+        @return An absolute path name. """
+        return self.sessconfig['state_dir']
+    
+    def set_install_dir(self,installdir):
+        """ Set the directory in which the Tribler Core software is installed. 
+        @param installdir An absolute path name
+        """
+        self.sessconfig['install_dir'] = installdir
+    
+    def get_install_dir(self):
+        """ Returns the directory the Tribler Core software is installed in.
+        @return An absolute path name. """
+        return self.sessconfig['install_dir']
+    
+    
+    def set_permid_keypair_filename(self,keypairfilename):
+        """ Set the filename containing the Elliptic Curve keypair to use for 
+        PermID-based authentication in this Session. 
+        
+        Note: if a Session is started with a SessionStartupConfig that
+        points to an existing state dir and that state dir contains a saved
+        keypair, that keypair will be used unless a different keypair is
+        explicitly configured via this method.
+        """
+        self.sessconfig['eckeypairfilename'] = keypairfilename
+
+    def get_permid_keypair_filename(self):
+        """ Returns the filename of the Session's keypair.
+        @return An absolute path name. """
+        return self.sessconfig['eckeypairfilename']
+    
+
+    def set_listen_port(self,port):
+        """ Set the UDP and TCP listen port for this Session.
+        @param port A port number.
+        """
+        self.sessconfig['minport'] = port
+        self.sessconfig['maxport'] = port
+
+    def get_listen_port(self):
+        """ Returns the current UDP/TCP listen port.
+        @return Port number. """
+        return self.sessconfig['minport']
+        
+    #
+    # Advanced network settings
+    #
+    def set_ip_for_tracker(self,value):
+        """ IP address to report to the tracker (default = set automatically).
+        @param value  An IP address as string. """
+        self.sessconfig['ip'] = value
+
+    def get_ip_for_tracker(self):
+        """ Returns the IP address being reported to the tracker.
+        @return String """
+        return self.sessconfig['ip']
+
+    def set_bind_to_addresses(self,value):
+        """ Set the list of IP addresses/hostnames to bind to locally.
+        @param value A list of IP addresses as strings. """
+        self.sessconfig['bind'] = value
+
+    def get_bind_to_addresses(self):
+        """ Returns the list of IP addresses bound to.
+        @return list """
+        return self.sessconfig['bind']
+
+    def set_upnp_mode(self,value):
+        """ Use to autoconfigure a UPnP router to forward the UDP/TCP listen 
+        port to this host:
+        <pre>
+         * UPNPMODE_DISABLED: Autoconfigure turned off.
+         * UPNPMODE_WIN32_HNetCfg_NATUPnP: Use Windows COM interface (slow)
+         * UPNPMODE_WIN32_UPnP_UPnPDeviceFinder: Use Windows COM interface (faster)
+         * UPNPMODE_UNIVERSAL_DIRECT: Talk UPnP directly to the network (best)
+        </pre>
+        @param value UPNPMODE_* 
+        """
+        self.sessconfig['upnp_nat_access'] = value
+
+    def get_upnp_mode(self):
+        """ Returns the UPnP mode set. 
+        @return UPNPMODE_* """
+        return self.sessconfig['upnp_nat_access']
+
+    def set_autoclose_timeout(self,value):
+        """ Time to wait between closing sockets which nothing has been received
+        on.
+        @param value A number of seconds.
+        """
+        self.sessconfig['timeout'] = value
+
+    def get_autoclose_timeout(self):
+        """ Returns the autoclose timeout.
+        @return A number of seconds. """
+        return self.sessconfig['timeout']
+
+    def set_autoclose_check_interval(self,value):
+        """ Time to wait between checking if any connections have timed out.
+        @param value A number of seconds.
+        """
+        self.sessconfig['timeout_check_interval'] = value
+
+    def get_autoclose_check_interval(self):
+        """ Returns the autoclose check interval.
+        @return A number of seconds. """
+        return self.sessconfig['timeout_check_interval']
+
+    #
+    # Enable/disable Tribler features 
+    #
+    def set_megacache(self,value):
+        """ Enable megacache databases to cache peers, torrent files and 
+        preferences (default = True).
+        @param value Boolean. """
+        self.sessconfig['megacache'] = value
+
+    def get_megacache(self):
+        """ Returns whether Megacache is enabled.
+        @return Boolean. """
+        return self.sessconfig['megacache']
+
+    #
+    # Secure Overlay
+    #
+    def set_overlay(self,value):
+        """ Enable overlay swarm to enable Tribler's special features 
+        (default = True).
+        @param value Boolean. 
+        """
+        self.sessconfig['overlay'] = value
+
+    def get_overlay(self):
+        """ Returns whether overlay-swarm extension is enabled. The overlay
+        swarm allows strong authentication of peers and is used for all
+        Tribler-specific messages.
+        @return Boolean. """
+        return self.sessconfig['overlay']
+
+    def set_overlay_max_message_length(self,value):
+        """ Maximal message length for messages sent over the secure overlay.
+        @param value A number of bytes.
+        """
+        self.sessconfig['overlay_max_message_length'] = value
+
+    def get_overlay_max_message_length(self):
+        """ Returns the maximum overlay-message length.
+        @return A number of bytes. """
+        return self.sessconfig['overlay_max_message_length']
+
+
+    #
+    # Buddycast
+    #
+    def set_buddycast(self,value):
+        """ Enable buddycast recommendation system at startup (default = True)
+        @param value Boolean.
+        """
+        self.sessconfig['buddycast'] = value
+
+    def get_buddycast(self):
+        """ Returns whether buddycast is enabled at startup. 
+        @return Boolean."""
+        return self.sessconfig['buddycast']
+
+    def set_start_recommender(self,value):
+        """ Buddycast can be temporarily disabled via this parameter 
+        (default = True). Must have been enabled at startup, see
+        set_buddycast().
+        @param value Boolean. 
+        """
+        self.sessconfig['start_recommender'] = value
+
+    def get_start_recommender(self):
+        """ Returns whether Buddycast is temporarily enabled. 
+        @return Boolean."""
+        return self.sessconfig['start_recommender']
+
+    def set_buddycast_interval(self,value):
+        """ Number of seconds to pause between exchanging preference with a 
+        peer in Buddycast.
+        @param value A number of seconds.
+        """
+        self.sessconfig['buddycast_interval'] = value
+
+    def get_buddycast_interval(self):
+        """ Returns the number of seconds between Buddycast pref. exchanges. 
+        @return A number of seconds. """
+        return self.sessconfig['buddycast_interval']
+
+    def set_buddycast_collecting_solution(self,value):
+        """ Set the Buddycast collecting solution. Only one policy implemented
+        at the moment:
+        <pre>
+         * BCCOLPOLICY_SIMPLE: Simplest solution: per torrent/buddycasted peer/4 hours,
+         </pre>
+        @param value BCCOLPOLICY_* 
+        """
+        self.sessconfig['buddycast_collecting_solution'] = value
+
+    def get_buddycast_collecting_solution(self):
+        """ Returns the Buddycast collecting solution. 
+        @return BCOLPOLICY_* """
+        return self.sessconfig['buddycast_collecting_solution']
+
+    def set_buddycast_max_peers(self,value):
+        """ Set max number of peers to use for Buddycast recommendations """
+        self.sessconfig['buddycast_max_peers'] = value
+
+    def get_buddycast_max_peers(self):
+        """ Return the max number of peers to use for Buddycast recommendations.
+        @return A number of peers.
+        """
+        return self.sessconfig['buddycast_max_peers']
+
+    #
+    # ProxyService_ parameters
+    #
+    def set_download_help(self,value):
+        """ Enable download helping/cooperative download (default = True).
+        @param value Boolean. """
+        self.sessconfig['download_help'] = value
+
+    def get_download_help(self):
+        """ Returns whether download help is enabled. 
+        @return Boolean. """
+        return self.sessconfig['download_help']
+
+    def set_download_help_dir(self,value):
+        """ Set the directory for storing state and content for download
+        helping (default = Default destination dir (see get_default_dest_dir()
+        +'downloadhelp'.
+        @param value An absolute path. """
+        self.sessconfig['download_help_dir'] = value
+
+    def get_download_help_dir(self):
+        """ Returns the directory for download helping storage. 
+        @return An absolute path name. """
+        return self.sessconfig['download_help_dir']
+
+    def set_proxyservice_status(self,value):
+        """ Set the status of the proxyservice (on or off).
+        
+        ProxyService off means the current node could not be used as a proxy. ProxyService on means other nodes will be able to use it as a proxy.
+        
+        @param value: one of the possible two values: PROXYSERVICE_OFF, PROXYSERVICE_ON
+        """
+        if value == PROXYSERVICE_OFF or value == PROXYSERVICE_ON:
+            self.sessconfig['proxyservice_status'] = value
+        else:
+            # If the method is called with an incorrect value, turn off the ProxyService
+            self.sessconfig['proxyservice_status'] = PROXYSERVICE_OFF
+
+    def get_proxyservice_status(self):
+        """ Returns the status of the proxyservice (on or off).
+        @return: one of the possible two values: PROXYSERVICE_OFF, PROXYSERVICE_ON
+        """
+        return self.sessconfig['proxyservice_status']
+    #
+    # _ProxyService
+    #
+
+    #
+    # Torrent file collecting
+    #
+    def set_torrent_collecting(self,value):
+        """ Automatically collect torrents from peers in the network (default = 
+        True).
+        @param value Boolean. 
+        """
+        self.sessconfig['torrent_collecting'] = value
+
+    def get_torrent_collecting(self):
+        """ Returns whether to automatically collect torrents.
+        @return Boolean. """
+        return self.sessconfig['torrent_collecting']
+
+    def set_torrent_collecting_max_torrents(self,value):
+        """ Set the maximum number of torrents to collect from other peers.
+        @param value A number of torrents.
+        """
+        self.sessconfig['torrent_collecting_max_torrents'] = value
+
+    def get_torrent_collecting_max_torrents(self):
+        """ Returns the maximum number of torrents to collect.
+        @return A number of torrents. """
+        return self.sessconfig['torrent_collecting_max_torrents']
+
+    def set_torrent_collecting_dir(self,value):
+        """ Where to place collected torrents? (default is state_dir + 'collected_torrent_files')
+        @param value An absolute path.
+        """
+        self.sessconfig['torrent_collecting_dir'] = value
+
+    def get_torrent_collecting_dir(self):
+        """ Returns the directory to save collected torrents.
+        @return An absolute path name. """
+        return self.sessconfig['torrent_collecting_dir']
+    
+    def set_torrent_collecting_rate(self,value):
+        """ Maximum download rate to use for torrent collecting.
+        @param value A rate in KB/s. """
+        self.sessconfig['torrent_collecting_rate'] = value
+
+    def get_torrent_collecting_rate(self):
+        """ Returns the download rate to use for torrent collecting.
+        @return A rate in KB/s. """
+        return self.sessconfig['torrent_collecting_rate']
+
+    def set_torrent_checking(self,value):
+        """ Whether to automatically check the health of collected torrents by
+        contacting their trackers (default = True).
+        @param value Boolean 
+        """
+        self.sessconfig['torrent_checking'] = value
+
+    def get_torrent_checking(self):
+        """ Returns whether to check health of collected torrents.
+        @return Boolean. """
+        return self.sessconfig['torrent_checking']
+
+    def set_torrent_checking_period(self,value):
+        """ Interval between automatic torrent health checks.
+        @param value An interval in seconds.
+        """
+        self.sessconfig['torrent_checking_period'] = value
+
+    def get_torrent_checking_period(self):
+        """ Returns the check interval.
+        @return A number of seconds. """
+        return self.sessconfig['torrent_checking_period']
+
+    def set_stop_collecting_threshold(self,value):
+        """ Stop collecting more torrents if the disk has less than this limit 
+        @param value A limit in MB.
+        """
+        self.sessconfig['stop_collecting_threshold'] = value
+
+    def get_stop_collecting_threshold(self):
+        """ Returns the disk-space limit when to stop collecting torrents. 
+        @return A number of megabytes. """
+        return self.sessconfig['stop_collecting_threshold']
+
+    #
+    # The Tribler dialback mechanism is used to test whether a Session is
+    # reachable from the outside and what its external IP address is.
+    #
+    def set_dialback(self,value):
+        """ Use other peers to determine external IP address (default = True)
+        @param value Boolean 
+        """
+        self.sessconfig['dialback'] = value
+
+    def get_dialback(self):
+        """ Returns whether to use the dialback mechanism. 
+        @return Boolean. """
+        return self.sessconfig['dialback']
+
+    #
+    # Tribler's social networking feature transmits a nickname and picture
+    # to all Tribler peers it meets.
+    #
+    def set_social_networking(self,value):
+        """ Enable social networking. If enabled, a message containing the
+        user's nickname and icon is sent to each Tribler peer met
+        (default = True).
+        @param value Boolean 
+        """
+        self.sessconfig['socnet'] = value
+
+    def get_social_networking(self):
+        """ Returns whether social network is enabled.
+        @return Boolean. """
+        return self.sessconfig['socnet']
+
+    def set_nickname(self,value):
+        """ The nickname you want to show to others.
+        @param value A Unicode string.
+        """
+        self.sessconfig['nickname'] = value
+
+    def get_nickname(self):
+        """ Returns the set nickname.
+        @return A Unicode string. """
+        return self.sessconfig['nickname']
+
+    def set_mugshot(self,value, mime = 'image/jpeg'):
+        """ The picture of yourself you want to show to others.
+        @param value A string of binary data of your image.
+        @param mime A string of the mimetype of the data
+        """
+        self.sessconfig['mugshot'] = (mime, value)
+
+    def get_mugshot(self):
+        """ Returns binary image data and mime-type of your picture.
+        @return (String, String) value and mimetype. """
+        if self.sessconfig['mugshot'] is None:
+            return None, None
+        else:
+            return self.sessconfig['mugshot']
+    
+    def set_peer_icon_path(self,value):
+        """ Directory to store received peer icons (Default is statedir +
+        STATEDIR_PEERICON_DIR).
+        @param value An absolute path. """
+        self.sessconfig['peer_icon_path'] = value
+
+    def get_peer_icon_path(self):
+        """ Returns the directory to store peer icons.
+        @return An absolute path name. """
+        return self.sessconfig['peer_icon_path']
+
+    #
+    # Tribler remote query: ask other peers when looking for a torrent file 
+    # or peer
+    #
+    def set_remote_query(self,value):
+        """ Enable queries from other peers. At the moment peers can ask
+        whether this Session has collected or opened a torrent that matches
+        a specified keyword query. (default = True)
+        @param value Boolean"""
+        self.sessconfig['rquery'] = value
+
+    def get_remote_query(self):
+        """ Returns whether remote query is enabled. 
+        @return Boolean. """
+        return self.sessconfig['rquery']
+
+    #
+    # BarterCast
+    #
+    def set_bartercast(self,value):
+        """ Exchange upload/download statistics with peers (default = True)
+        @param value Boolean
+        """
+        self.sessconfig['bartercast'] = value
+
+    def get_bartercast(self):
+        """ Returns to exchange statistics with peers.
+        @return Boolean. """
+        return self.sessconfig['bartercast']
+
+
+    #
+    # For Tribler Video-On-Demand
+    #
+    def set_video_analyser_path(self,value):
+        """ Path to video analyser FFMPEG. The analyser is used to guess the
+        bitrate of a video if that information is not present in the torrent
+        definition. (default = look for it in $PATH)
+        @param value An absolute path name.
+        """
+        self.sessconfig['videoanalyserpath'] = value
+    
+    def get_video_analyser_path(self):
+        """ Returns the path of the FFMPEG video analyser.
+        @return An absolute path name. """
+        return self.sessconfig['videoanalyserpath'] # strings immutable
+    
+
+    #
+    # Tribler's internal tracker
+    #
+    def set_internal_tracker(self,value):
+        """ Enable internal tracker (default = True)
+        @param value Boolean.
+        """
+        self.sessconfig['internaltracker'] = value
+
+    def get_internal_tracker(self):
+        """ Returns whether the internal tracker is enabled.
+        @return Boolean. """
+        return self.sessconfig['internaltracker']
+
+    def set_internal_tracker_url(self,value):
+        """ Set the internal tracker URL (default = determined dynamically
+        from Session's IP+port)
+        @param value URL.
+        """
+        self.sessconfig['tracker_url'] = value
+
+    def get_internal_tracker_url(self):
+        """ Returns the URL of the tracker as set by set_internal_tracker_url().
+        Overridden at runtime by Session class.
+        @return URL. """
+        return self.sessconfig['tracker_url']
+
+
+    def set_mainline_dht(self,value):
+        """ Enable mainline DHT support (default = True)
+        @param value Boolean.
+        """
+        self.sessconfig['mainline_dht'] = value
+
+    def get_mainline_dht(self):
+        """ Returns whether mainline DHT support is enabled.
+        @return Boolean. """
+        return self.sessconfig['mainline_dht']
+
+
+    #
+    # Internal tracker access control settings
+    #
+    def set_tracker_allowed_dir(self,value):
+        """ Only accept tracking requests for torrent in this dir (default is
+        Session state-dir + STATEDIR_ITRACKER_DIR
+        @param value An absolute path name.
+        """
+        self.sessconfig['tracker_allowed_dir'] = value
+
+    def get_tracker_allowed_dir(self):
+        """ Returns the internal tracker's directory of allowed torrents.
+        @return An absolute path name. """
+        return self.sessconfig['tracker_allowed_dir']
+
+    def set_tracker_allowed_list(self,value):
+        """ Only allow peers to register for torrents that appear in the
+        specified file. Cannot be used in combination with set_tracker_allowed_dir()
+        @param value An absolute filename containing a list of torrent infohashes in hex format, one per 
+        line. """
+        self.sessconfig['tracker_allowed_list'] = value
+
+    def get_tracker_allowed_list(self):
+        """ Returns the filename of the list of allowed torrents.
+        @return An absolute path name. """
+        return self.sessconfig['tracker_allowed_list']
+
+    def set_tracker_allowed_controls(self,value):
+        """ Allow special keys in torrents in the allowed_dir to affect tracker
+        access.
+        @param value Boolean
+        """
+        self.sessconfig['tracker_allowed_controls'] = value
+
+    def get_tracker_allowed_controls(self):
+        """ Returns whether to allow allowed torrents to control tracker access.
+        @return Boolean. """
+        return self.sessconfig['tracker_allowed_controls']
+
+    def set_tracker_allowed_ips(self,value):
+        """ Only allow connections from IPs specified in the given file; file 
+        contains subnet data in the format: aa.bb.cc.dd/len.
+        @param value An absolute path name.
+        """
+        self.sessconfig['tracker_allowed_ips'] = value
+
+    def get_tracker_allowed_ips(self):
+        """ Returns the filename containing allowed IP addresses. 
+        @return An absolute path name."""
+        return self.sessconfig['tracker_allowed_ips']
+
+    def set_tracker_banned_ips(self,value):
+        """ Don't allow connections from IPs specified in the given file; file
+        contains IP range data in the format: xxx:xxx:ip1-ip2
+        @param value An absolute path name.
+        """
+        self.sessconfig['tracker_banned_ips'] = value
+
+    def get_tracker_banned_ips(self):
+        """ Returns the filename containing banned IP addresses. 
+        @return An absolute path name. """
+        return self.sessconfig['tracker_banned_ips']
+
+    def set_tracker_only_local_override_ip(self,value):
+        """ Ignore the 'ip' parameter in the GET announce from machines which 
+        aren't on local network IPs.
+        <pre>
+         * ITRACK_IGNORE_ANNOUNCEIP_NEVER
+         * ITRACK_IGNORE_ANNOUNCEIP_ALWAYS
+         * ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK
+        </pre>
+        @param value ITRACK_IGNORE_ANNOUNCEIP*
+        """
+        self.sessconfig['tracker_only_local_override_ip'] = value
+
+    def get_tracker_only_local_override_ip(self):
+        """ Returns the ignore policy for 'ip' parameters in announces. 
+        @return ITRACK_IGNORE_ANNOUNCEIP_* """
+        return self.sessconfig['tracker_only_local_override_ip']
+
+    def set_tracker_parse_dir_interval(self,value):
+        """ Seconds between reloading of allowed_dir or allowed_file and 
+        allowed_ips and banned_ips lists.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_parse_dir_interval'] = value
+
+    def get_tracker_parse_dir_interval(self):
+        """ Returns the number of seconds between refreshes of access control
+        info.
+        @return A number of seconds. """
+        return self.sessconfig['tracker_parse_dir_interval']
+
+    def set_tracker_scrape_allowed(self,value):
+        """ Allow scrape access on the internal tracker (with a scrape request
+        a BitTorrent client can retrieve information about how many peers are
+        downloading the content.
+        <pre>
+        * ITRACKSCRAPE_ALLOW_NONE: Don't allow scrape requests.
+        * ITRACKSCRAPE_ALLOW_SPECIFIC: Allow scrape requests for a specific torrent.
+        * ITRACKSCRAPE_ALLOW_FULL: Allow scrape of all torrents at once.
+        </pre>
+        @param value ITRACKSCRAPE_* 
+        """
+        self.sessconfig['tracker_scrape_allowed'] = value
+
+    def get_tracker_scrape_allowed(self):
+        """ Returns the scrape access policy.
+        @return ITRACKSCRAPE_ALLOW_* """
+        return self.sessconfig['tracker_scrape_allowed']
+
+    def set_tracker_allow_get(self,value):
+        """ Setting this parameter adds a /file?hash={hash} links to the
+        overview page that the internal tracker makes available via HTTP
+        at hostname:listenport. These links allow users to download the 
+        torrent file from the internal tracker. Use with 'allowed_dir' parameter.
+        @param value Boolean.
+        """
+        self.sessconfig['tracker_allow_get'] = value
+
+    def get_tracker_allow_get(self):
+        """ Returns whether to allow HTTP torrent-file downloads from the
+        internal tracker.
+        @return Boolean. """
+        return self.sessconfig['tracker_allow_get']
+
+
+    #
+    # Controls for internal tracker's output as Web server
+    #
+    def set_tracker_favicon(self,value):
+        """ File containing image/x-icon data to return when browser requests 
+        favicon.ico from the internal tracker. (Default = Tribler/Images/tribler.ico)
+        @param value An absolute filename. 
+        """
+        self.sessconfig['tracker_favicon'] = value
+
+    def get_tracker_favicon(self):
+        """ Returns the filename of the internal tracker favicon. 
+        @return An absolute path name. """
+        return self.sessconfig['tracker_favicon']
+
+    def set_tracker_show_infopage(self,value):
+        """ Whether to display an info page when the tracker's root dir is 
+        requested via HTTP.
+        @param value Boolean
+        """
+        self.sessconfig['tracker_show_infopage'] = value
+
+    def get_tracker_show_infopage(self):
+        """ Returns whether to show an info page on the internal tracker. 
+        @return Boolean. """
+        return self.sessconfig['tracker_show_infopage']
+
+    def set_tracker_infopage_redirect(self,value):
+        """ A URL to redirect the request for an info page to.
+        @param value URL.
+        """
+        self.sessconfig['tracker_infopage_redirect'] = value
+
+    def get_tracker_infopage_redirect(self):
+        """ Returns the URL to redirect request for info pages to. 
+        @return URL """
+        return self.sessconfig['tracker_infopage_redirect']
+
+    def set_tracker_show_names(self,value):
+        """ Whether to display names from the 'allowed dir'.
+        @param value Boolean.
+        """
+        self.sessconfig['tracker_show_names'] = value
+
+    def get_tracker_show_names(self):
+        """ Returns whether the tracker displays names from the 'allowed dir'. 
+        @return Boolean. """
+        return self.sessconfig['tracker_show_names']
+
+    def set_tracker_keep_dead(self,value):
+        """ Keep dead torrents after they expire (so they still show up on your
+        /scrape and web page)
+        @param value Boolean.
+        """
+        self.sessconfig['tracker_keep_dead'] = value
+
+    def get_tracker_keep_dead(self):
+        """ Returns whether to keep dead torrents for statistics. 
+        @return Boolean. """
+        return self.sessconfig['tracker_keep_dead']
+
+    #
+    # Controls for internal tracker replies
+    #
+    def set_tracker_reannounce_interval(self,value):
+        """ Seconds downloaders should wait between reannouncing themselves
+        to the internal tracker.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_reannounce_interval'] = value
+
+    def get_tracker_reannounce_interval(self):
+        """ Returns the reannounce interval for the internal tracker. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_reannounce_interval']
+
+    def set_tracker_response_size(self,value):
+        """ Number of peers to send to a peer in a reply to its announce
+        at the internal tracker (i.e., in the info message)
+        @param value A number of peers.
+        """
+        self.sessconfig['tracker_response_size'] = value
+
+    def get_tracker_response_size(self):
+        """ Returns the number of peers to send in a tracker reply. 
+        @return A number of peers. """
+        return self.sessconfig['tracker_response_size']
+
+    def set_tracker_nat_check(self,value):
+        """ How many times the internal tracker should attempt to check if a 
+        downloader is behind a  Network Address Translator (NAT) or firewall.
+        If it is, the downloader won't be registered at the tracker, as other
+        peers can probably not contact it. 
+        @param value A number of times, 0 = don't check.
+        """
+        self.sessconfig['tracker_nat_check'] = value
+
+    def get_tracker_nat_check(self):
+        """ Returns the number of times to check for a firewall.
+        @return A number of times. """
+        return self.sessconfig['tracker_nat_check']
+
+
+    #
+    # Internal tracker persistence
+    #
+    def set_tracker_dfile(self,value):
+        """ File to store recent downloader info in (default = Session state 
+        dir + STATEDIR_ITRACKER_DIR + tracker.db
+        @param value An absolute path name.
+        """
+        self.sessconfig['tracker_dfile'] = value
+
+    def get_tracker_dfile(self):
+        """ Returns the tracker database file. 
+        @return An absolute path name. """
+        return self.sessconfig['tracker_dfile']
+
+    def set_tracker_dfile_format(self,value):
+        """ Format of the tracker database file. *_PICKLE is needed when Unicode
+        filenames may appear in the tracker's state (=default).
+        <pre>
+         * ITRACKDBFORMAT_BENCODE: Use BitTorrent bencoding to store records.
+         * ITRACKDBFORMAT_PICKLE: Use Python pickling to store records.
+        </pre>
+        @param value ITRACKDBFORFMAT_* 
+        """
+        self.sessconfig['tracker_dfile_format'] = value
+
+    def get_tracker_dfile_format(self):
+        """ Returns the format of the tracker database file. 
+        @return ITRACKDBFORMAT_* """
+        return self.sessconfig['tracker_dfile_format']
+
+    def set_tracker_save_dfile_interval(self,value):
+        """ The interval between saving the internal tracker's state to
+        the tracker database (see set_tracker_dfile()).
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_save_dfile_interval'] = value
+
+    def get_tracker_save_dfile_interval(self):
+        """ Returns the tracker-database save interval. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_save_dfile_interval']
+
+    def set_tracker_logfile(self,value):
+        """ File to write the tracker logs to (default is NIL: or /dev/null).
+        @param value A device name.
+        """
+        self.sessconfig['tracker_logfile'] = value
+
+    def get_tracker_logfile(self):
+        """ Returns the device name to write log messages to. 
+        @return A device name. """
+        return self.sessconfig['tracker_logfile']
+
+    def set_tracker_min_time_between_log_flushes(self,value):
+        """ Minimum time between flushes of the tracker log.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_min_time_between_log_flushes'] = value
+
+    def get_tracker_min_time_between_log_flushes(self):
+        """ Returns time between tracker log flushes. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_min_time_between_log_flushes']
+
+    def set_tracker_log_nat_checks(self,value):
+        """ Whether to add entries to the tracker log for NAT-check results.
+        @param value Boolean
+        """
+        self.sessconfig['tracker_log_nat_checks'] = value
+
+    def get_tracker_log_nat_checks(self):
+        """ Returns whether to log NAT-check attempts to the tracker log. 
+        @return Boolean. """
+        return self.sessconfig['tracker_log_nat_checks']
+
+    def set_tracker_hupmonitor(self,value):
+        """ Whether to reopen the tracker log file upon receipt of a SIGHUP 
+        signal (Mac/UNIX only).
+        @param value Boolean.
+        """
+        self.sessconfig['tracker_hupmonitor'] = value
+
+    def get_tracker_hupmonitor(self):
+        """ Returns whether to reopen the tracker log file upon receipt of a 
+        SIGHUP signal. 
+        @return Boolean. """
+        return self.sessconfig['tracker_hupmonitor']
+
+
+    #
+    # Esoteric tracker config parameters 
+    #
+    def set_tracker_socket_timeout(self,value):
+        """ Set timeout for closing connections to trackers.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_socket_timeout'] = value
+
+    def get_tracker_socket_timeout(self):
+        """ Returns the tracker socket timeout. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_socket_timeout']
+
+    def set_tracker_timeout_downloaders_interval(self,value):
+        """ Interval between checks for expired downloaders, i.e., peers
+        no longer in the swarm because they did not reannounce themselves.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_timeout_downloaders_interval'] = value
+
+    def get_tracker_timeout_downloaders_interval(self):
+        """ Returns the number of seconds between checks for expired peers. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_timeout_downloaders_interval']
+
+    def set_tracker_timeout_check_interval(self,value):
+        """ Time to wait between checking if any connections to the internal
+        tracker have timed out.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_timeout_check_interval'] = value
+
+    def get_tracker_timeout_check_interval(self):
+        """ Returns timeout for connections to the internal tracker. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_timeout_check_interval']
+
+    def set_tracker_min_time_between_cache_refreshes(self,value):
+        """ Minimum time before a cache is considered stale and is 
+        flushed.
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_min_time_between_cache_refreshes'] = value
+
+    def get_tracker_min_time_between_cache_refreshes(self):
+        """ Return the minimum time between cache refreshes.
+        @return A number of seconds. """
+        return self.sessconfig['tracker_min_time_between_cache_refreshes']
+
+
+    #
+    # BitTornado's Multitracker feature
+    #
+    def set_tracker_multitracker_enabled(self,value):
+        """ Whether to enable multitracker operation in which multiple
+        trackers are used to register the peers for a specific torrent.
+        @param value Boolean.
+        """
+        self.sessconfig['tracker_multitracker_enabled'] = value
+
+    def get_tracker_multitracker_enabled(self):
+        """ Returns whether multitracking is enabled. 
+        @return Boolean. """
+        return self.sessconfig['tracker_multitracker_enabled']
+
+    def set_tracker_multitracker_allowed(self,value):
+        """ Whether to allow incoming tracker announces.
+        <pre>
+         * ITRACKMULTI_ALLOW_NONE: Don't allow.
+         * ITRACKMULTI_ALLOW_AUTODETECT: Allow for allowed torrents (see set_tracker_allowed_dir())
+         * ITRACKMULTI_ALLOW_ALL: Allow for all. 
+        </pre>
+        @param value ITRACKMULTI_ALLOW_*
+        """
+        self.sessconfig['tracker_multitracker_allowed'] = value
+
+    def get_tracker_multitracker_allowed(self):
+        """ Returns the multitracker allow policy of the internal tracker. 
+        @return ITRACKMULTI_ALLOW_* """
+        return self.sessconfig['tracker_multitracker_allowed']
+
+    def set_tracker_multitracker_reannounce_interval(self,value):
+        """ Seconds between outgoing tracker announces to the other trackers in
+        a multi-tracker setup.
+        @param value A number of seconds. 
+        """
+        self.sessconfig['tracker_multitracker_reannounce_interval'] = value
+
+    def get_tracker_multitracker_reannounce_interval(self):
+        """ Returns the multitracker reannouce interval. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_multitracker_reannounce_interval']
+
+    def set_tracker_multitracker_maxpeers(self,value):
+        """ Number of peers to retrieve from the other trackers in a tracker
+         announce in a multi-tracker setup. 
+         @param value A number of peers.
+         """
+        self.sessconfig['tracker_multitracker_maxpeers'] = value
+
+    def get_tracker_multitracker_maxpeers(self):
+        """ Returns the number of peers to retrieve from another tracker. 
+        @return A number of peers. """
+        return self.sessconfig['tracker_multitracker_maxpeers']
+
+    def set_tracker_aggregate_forward(self,value):
+        """ Set an URL to which, if set, all non-multitracker requests are
+        forwarded, with a password added (optional).
+        @param value A 2-item list with format: [<url>,<password>|None]
+        """
+        self.sessconfig['tracker_aggregate_forward'] = value
+
+    def get_tracker_aggregate_forward(self):
+        """ Returns the aggregate forward URL and optional password as a 2-item 
+        list. 
+        @return URL """
+        return self.sessconfig['tracker_aggregate_forward']
+
+    def set_tracker_aggregator(self,value):
+        """ Whether to act as a data aggregator rather than a tracker. 
+        To enable, set to True or <password>; if password is set, then an 
+        incoming password is required for access.
+        @param value Boolean or string.
+        """
+        self.sessconfig['tracker_aggregator'] = value
+
+    def get_tracker_aggregator(self):
+        """ Returns the tracker aggregator parameter. 
+        @return Boolean or string. """
+        return self.sessconfig['tracker_aggregator']
+
+    def set_tracker_multitracker_http_timeout(self,value):
+        """ Time to wait before assuming that an HTTP connection
+        to another tracker in a multi-tracker setup has timed out. 
+        @param value A number of seconds.
+        """
+        self.sessconfig['tracker_multitracker_http_timeout'] = value
+
+    def get_tracker_multitracker_http_timeout(self):
+        """ Returns timeout for inter-multi-tracker HTTP connections. 
+        @return A number of seconds. """
+        return self.sessconfig['tracker_multitracker_http_timeout']
+
+
+    #
+    # For Tribler superpeer servers
+    #
+    def set_superpeer(self,value):
+        """ Run Session in super peer mode (default = disabled).
+        @param value Boolean.
+        """
+        self.sessconfig['superpeer'] = value
+
+    def get_superpeer(self):
+        """ Returns whether the Session runs in superpeer mode. 
+        @return Boolean. """
+        return self.sessconfig['superpeer']
+
+    def set_superpeer_file(self,value):
+        """ File with addresses of superpeers (default = install_dir+
+        Tribler/Core/superpeer.txt).
+        @param value An absolute path name.
+        """
+        self.sessconfig['superpeer_file'] = value
+
+    def get_superpeer_file(self):
+        """ Returns the superpeer file.
+        @return An absolute path name. """
+        return self.sessconfig['superpeer_file']
+
+    def set_overlay_log(self,value):
+        """ File to log message to in super peer mode (default = No logging)
+        @param value An absolute path name.
+        """
+        self.sessconfig['overlay_log'] = value
+
+    def get_overlay_log(self):
+        """ Returns the file to log messages to or None.
+        @return An absolute path name. """
+        return self.sessconfig['overlay_log']
+
+    def set_coopdlconfig(self,dscfg):
+        """ Sets the DownloadStartupConfig with which to start Downloads
+        when you are asked to help in a cooperative download.
+        """
+        c = dscfg.copy()
+        self.sessconfig['coopdlconfig'] = c.dlconfig # copy internal dict
+        
+    def get_coopdlconfig(self):
+        """ Return the DownloadStartupConfig that is used when helping others
+        in a cooperative download.
+        @return DownloadStartupConfig
+        """
+        dlconfig = self.sessconfig['coopdlconfig']
+        if dlconfig is None:
+            return None
+        else:
+            from BaseLib.Core.DownloadConfig import DownloadStartupConfig 
+            return DownloadStartupConfig(dlconfig)
+        
+
+    #
+    # NAT Puncturing servers information setting
+    #
+    def set_nat_detect(self,value):
+        """ Whether to try to detect the type of Network Address Translator
+        in place.
+        @param value Boolean.
+        """
+        self.sessconfig['nat_detect'] = value
+    
+    def set_puncturing_internal_port(self, puncturing_internal_port):
+        """ The listening port of the puncturing module.
+        @param puncturing_internal_port integer. """
+        self.sessconfig['puncturing_internal_port'] = puncturing_internal_port
+
+    def set_stun_servers(self, stun_servers):
+        """ The addresses of the STUN servers (at least 2)
+        @param stun_servers List of (hostname/ip,port) tuples. """
+        self.sessconfig['stun_servers'] = stun_servers
+
+    def set_pingback_servers(self, pingback_servers):
+        """ The addresses of the pingback servers (at least 1)
+        @param pingback_servers List of (hostname/ip,port) tuples. """
+        self.sessconfig['pingback_servers'] = pingback_servers
+
+    # Puncturing servers information retrieval
+    def get_nat_detect(self):
+        """ Whether to try to detect the type of Network Address Translator
+        in place.
+        @return Boolean
+        """
+        return self.sessconfig['nat_detect']
+    
+    def get_puncturing_internal_port(self):
+        """ Returns the listening port of the puncturing module.
+        @return integer. """
+        return self.sessconfig['puncturing_internal_port']
+
+    def get_stun_servers(self):
+        """ Returns the addresses of the STUN servers.
+        @return List of (hostname/ip,port) tuples. """
+        return self.sessconfig['stun_servers']
+
+    def get_pingback_servers(self):
+        """ Returns the addresses of the pingback servers.
+        @return List of (hostname/ip,port) tuples. """
+        return self.sessconfig['pingback_servers']
+
+    #
+    # Crawler
+    #
+    def set_crawler(self, value):
+        """ Handle crawler messages when received (default = True)
+        @param value Boolean
+        """
+        self.sessconfig['crawler'] = value
+
+    def get_crawler(self):
+        """ Whether crawler messages are processed
+        @return Boolean. """
+        return self.sessconfig['crawler']
+    
+    # 
+    # Local Peer Discovery using IP Multicast
+    #
+    def set_multicast_local_peer_discovery(self,value):
+        """ Set whether the Session tries to detect local peers
+        using a local IP multicast. Overlay swarm (set_overlay()) must
+        be enabled as well.
+        @param value Boolean
+        """
+        self.sessconfig['multicast_local_peer_discovery'] = value
+        
+    def get_multicast_local_peer_discovery(self):
+        """
+        Returns whether local peer discovery is enabled.
+        @return Boolean
+        """
+        return self.sessconfig['multicast_local_peer_discovery']
+
+    #
+    # VoteCast
+    #
+    def set_votecast_recent_votes(self, value):
+        """ Sets the maximum limit for the recent votes by the user, 
+        that will be forwarded to connected peers 
+        @param value int 
+        """
+        self.sessconfig['votecast_recent_votes'] = value 
+
+    def get_votecast_recent_votes(self):
+        """ Returns the maximum limit for the recent votes by the user, 
+        that will be forwarded to connected peers 
+        @return int 
+        """
+        return self.sessconfig['votecast_recent_votes']
+    
+    def set_votecast_random_votes(self, value):
+        """ Sets the maximum limit for the user's votes that are different from recent ones
+        but selected randomly; these votes will be forwarded to connected peers along with recent votes 
+        @param value int 
+        """
+        self.sessconfig['votecast_random_votes'] = value
+
+    def get_votecast_random_votes(self):
+        """ Returns the maximum limit for the user's votes that are different from recent ones
+        but selected randomly; these votes will be forwarded to connected peers along with recent votes 
+        @return int 
+        """        
+        return self.sessconfig['votecast_random_votes']
+
+    #
+    # ChannelCast
+    #
+    def set_channelcast_recent_own_subscriptions(self, value):
+        """ Sets the maximum limit for the recent subscriptions by the user, 
+        that will be forwarded to connected peers 
+        @param value int 
+        """
+        self.sessconfig['channelcast_recent_own_subscriptions'] = value
+
+    def get_channelcast_recent_own_subscriptions(self):
+        """ Returns the maximum limit for the recent subscriptions by the user, 
+        that will be forwarded to connected peers 
+        @return int 
+        """
+        return self.sessconfig['channelcast_recent_own_subscriptions']
+    
+    def set_channelcast_random_own_subscriptions(self, value):
+        """ Sets the maximum limit for the user's subscriptions that are different from recent ones
+        but selected randomly; these subscriptions will be forwarded to connected peers 
+        @param value int 
+        """
+        self.sessconfig['channelcast_random_own_subscriptions'] = value
+
+    def get_channelcast_random_own_subscriptions(self):
+        """ Returns the maximum limit for the user's subscriptions that are different from recent ones
+        but selected randomly; these subscriptions will be forwarded to connected peers 
+        @return int 
+        """
+        return self.sessconfig['channelcast_random_own_subscriptions']
+    
+    #
+    # Subtitle collection via Andrea Reale's extension
+    #
+    def set_subtitles_collecting(self,value):
+        """ Automatically collect subtitles from peers in the network (default = 
+        False).
+        @param value Boolean. 
+        """
+        self.sessconfig['subtitles_collecting'] = value
+
+    def get_subtitles_collecting(self):
+        """ Returns whether to automatically collect subtitles.
+        @return Boolean. """
+        return self.sessconfig['subtitles_collecting']
+
+    def set_subtitles_collecting_dir(self,value):
+        """
+        Where to place collected subtitles? (default is state_dir + 'collected_subtitles_files')
+        @param value An absolute path.
+        """
+        self.sessconfig['subtitles_collecting_dir'] = value
+        
+    def get_subtitles_collecting_dir(self):
+        """ Returns the directory to save collected subtitles.
+        @return An absolute path name. """
+        return self.sessconfig['subtitles_collecting_dir']
+        
+    def set_subtitles_upload_rate(self,value):
+        """ Maximum upload rate to use for subtitles collecting.
+        @param value A rate in KB/s. """
+        self.sessconfig['subtitles_upload_rate'] = value
+    
+    def get_subtitles_upload_rate(self):
+        """ Returns the upload rate to use for subtitle collecting.
+        @return A rate in KB/s. """
+        return self.sessconfig['subtitles_upload_rate']
+
+
+
+class SessionStartupConfig(SessionConfigInterface,Copyable,Serializable):  
+    """ Class to configure a Session """
+    
+    def __init__(self,sessconfig=None):
+        SessionConfigInterface.__init__(self,sessconfig)
+
+    #
+    # Class method
+    #
+    def load(filename):
+        """
+        Load a saved SessionStartupConfig from disk.
+        
+        @param filename  An absolute Unicode filename
+        @return SessionStartupConfig object
+        """
+        # Class method, no locking required
+        f = open(filename,"rb")
+        sessconfig = pickle.load(f)
+        sscfg = SessionStartupConfig(sessconfig)
+        f.close()
+        return sscfg
+    load = staticmethod(load)
+
+    def save(self,filename):
+        """ Save the SessionStartupConfig to disk.
+        @param filename  An absolute Unicode filename
+        """
+        # Called by any thread
+        f = open(filename,"wb")
+        pickle.dump(self.sessconfig,f)
+        f.close()
+
+    #
+    # Copyable interface
+    # 
+    def copy(self):
+        config = copy.copy(self.sessconfig)
+        return SessionStartupConfig(config)
diff --git a/instrumentation/next-share/BaseLib/Core/SocialNetwork/FriendshipMsgHandler.py b/instrumentation/next-share/BaseLib/Core/SocialNetwork/FriendshipMsgHandler.py
new file mode 100644 (file)
index 0000000..672a831
--- /dev/null
@@ -0,0 +1,874 @@
+# Written by Ali Abbas, Arno Bakker
+# see LICENSE.txt for license information
+
+# TODO: either maintain connections to friends always or supplement the
+# list of friends with a number of on-line taste buddies.
+#
+# TODO: at least add fifo order to msgs, otherwise clicking 
+# "make friend", "delete friend", "make friend" could arive in wrong order
+# due to forwarding.
+#
+
+import threading
+import sys
+import os
+import random
+import cPickle
+from time import time
+from types import DictType
+from traceback import print_exc
+from sets import Set
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.CacheDB.CacheDBHandler import PeerDBHandler, FriendDBHandler
+from BaseLib.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str
+from BaseLib.Core.Utilities.utilities import *
+
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH
+
+DEBUG = False
+
+"""
+State diagram:
+
+NOFRIEND -> I_INVITED or HE_INVITED
+I_INVITED -> APPROVED or HE_DENIED
+HE_INVITED -> APPROVED
+HE_INVITED -> I_DENIED
+
+In theory it could happen that he sends an response=1 RESP, in which case
+he approved us. I consider that an HE_INIVITE
+"""
+
+RESCHEDULE_INTERVAL = 60
+RESEND_INTERVAL = 5*60
+
+
+class FriendshipMsgHandler:
+    __singleton = None
+    __lock = threading.Lock()
+
+    @classmethod
+    def getInstance(cls, *args, **kargs):
+        if not cls.__singleton:
+            cls.__lock.acquire()
+            try:
+                if not cls.__singleton:
+                    cls.__singleton = cls(*args, **kargs)
+            finally:
+                cls.__lock.release()
+        return cls.__singleton
+    
+    def __init__(self):
+        if FriendshipMsgHandler.__singleton:
+            raise RuntimeError, "FriendshipMsgHandler is singleton"
+        self.overlay_bridge = None
+        self.currmsgs = {}
+        self.online_fsext_peers = Set() # online peers that speak FRIENDSHIP ext
+        self.peerdb = PeerDBHandler.getInstance()
+        self.frienddb = FriendDBHandler.getInstance()
+        self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance()
+        self.list_no_of_conn_attempts_per_target= {}
+        self.usercallback = None
+    
+    def register(self, overlay_bridge, session):
+        if DEBUG:
+            print >> sys.stderr, "friendship: register"
+        self.overlay_bridge = overlay_bridge
+        self.session = session
+        try:
+            self.load_checkpoint()
+        except:
+            print_exc()
+        self.overlay_bridge.add_task(self.reschedule_connects,RESCHEDULE_INTERVAL)
+     
+     
+    def shutdown(self):
+        """
+        Delegate all outstanding messages to others
+        """
+        # Called by OverlayThread
+        self.delegate_friendship_making()
+        self.checkpoint()
+        
+
+    def register_usercallback(self,usercallback):
+        self.usercallback = usercallback
+    
+    def anythread_send_friendship_msg(self,permid,type,params):
+        """ Called when user adds someone from the person found, or by 
+        explicity adding someone with her credentials
+        It establishes overlay connection with the target peer """
+        # Called by any thread
+        
+        olthread_func = lambda:self.send_friendship_msg(permid,type,params,submit=True)
+        self.overlay_bridge.add_task(olthread_func,0)
+        
+        
+    def send_friendship_msg(self,permid,type,params,submit=False):
+        # Called by overlay thread 
+        
+        if submit:
+            if DEBUG:
+                print >>sys.stderr,"friendship: send_friendship_msg: Saving msg",show_permid_short(permid)
+            self.save_msg(permid,type,params)
+            
+            if type == F_REQUEST_MSG:
+                # Make him my friend, pending his approval
+                self.frienddb.setFriendState(permid, commit=True,state=FS_I_INVITED)
+            elif type == F_RESPONSE_MSG:
+                # Mark response in DB
+                if params['response']:
+                    state = FS_MUTUAL
+                else:
+                    state = FS_I_DENIED
+                self.frienddb.setFriendState(permid, commit=True,state=state)
+
+        func = lambda exc,dns,permid,selversion:self.fmsg_connect_callback(exc, dns, permid, selversion, type)
+        self.overlay_bridge.connect(permid,self.fmsg_connect_callback)
+        
+        
+    def fmsg_connect_callback(self,exc,dns,permid,selversion, type = None):
+        """ Callback function for the overlay connect function """
+        # Called by OverlayThread
+
+        if exc is None:
+            if selversion < OLPROTO_VER_SEVENTH:
+                self.remove_msgs_for_ltv7_peer(permid)
+                return
+            
+            # Reached him
+            sendlist = self.get_msgs_as_sendlist(targetpermid=permid)
+            if DEBUG:
+                print >> sys.stderr, 'friendship: fmsg_connect_callback: sendlist len',len(sendlist)
+                #print_stack()
+            
+            for i in range(0,len(sendlist)):
+                tuple = sendlist[i]
+                
+                permid,msgid,msg = tuple
+                send_callback = lambda exc,permid:self.fmsg_send_callback(exc,permid,msgid)
+                
+                if DEBUG:
+                    print >>sys.stderr,"friendship: fmsg_connect_callback: Sending",`msg`,msgid
+                
+                mypermid = self.session.get_permid()
+                
+                commit = (i == len(sendlist)-1)
+                isForwarder = 0
+                no_of_helpers = 0
+#                if type == F_REQUEST_MSG:
+#                    print
+#                elif type == F_RESPONSE_MSG:
+#                    print
+                #Set forwarder to True and also no of helpers to 10
+                if type == F_FORWARD_MSG:
+                    isForwarder = 1
+                    no_of_helpers = 10
+                    
+                  
+                no_of_attempts = 0
+                if permid in self.currmsgs:
+                    msgid2rec = self.currmsgs[permid]
+                    if msgid in msgid2rec:
+                        msgrec = msgid2rec[msgid]
+                        no_of_attempts = msgrec['attempt']
+                
+#                insertFriendshipStatistics(self, my_permid, target_permid, current_time, isForwarder = 0, no_of_attempts = 0, no_of_helpers = 0, commit = True):
+                
+                self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), 
+                                                                         bin2str(permid), 
+                                                                         int(time()),
+                                                                         isForwarder, 
+                                                                         no_of_attempts ,
+                                                                         no_of_helpers, 
+                                                                         commit=commit)
+                
+                self.overlay_bridge.send(permid, FRIENDSHIP + bencode(msg), send_callback)
+                
+                
+        else:
+            if DEBUG:
+                peer = self.peerdb.getPeer(permid)
+                if peer is None:
+                    print >>sys.stderr, 'friendship: Could not connect to peer', show_permid_short(permid),peer
+                else:
+                    print >>sys.stderr, 'friendship: Could not connect to peer', show_permid_short(permid),peer['name']
+                print >>sys.stderr,exc
+            
+            mypermid = self.session.get_permid()
+            
+            isForwarder = 0
+            no_of_helpers = 0
+            if type == F_FORWARD_MSG:
+                isForwarder = 1
+                no_of_helpers = 10
+                    
+                 
+            no_of_attempts = 0
+            if permid in self.currmsgs:
+                msgid2rec = self.currmsgs[permid]
+                for msgid in msgid2rec:
+                    msgrec = msgid2rec[msgid]
+                    no_of_attempts = msgrec['attempt']
+                
+                
+            self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), 
+                                                                         bin2str(permid), 
+                                                                         int(time()),
+                                                                         isForwarder, 
+                                                                         no_of_attempts ,
+                                                                         no_of_helpers)
+        
+        
+
+
+    def fmsg_send_callback(self,exc,permid,msgid):
+        
+        # If an exception arises
+        if exc is None:
+            self.delete_msg(permid,msgid)
+        else:
+            if DEBUG:
+                print >> sys.stderr, 'friendship: Could not send to ',show_permid_short(permid)  
+                print_exc()
+            
+        mypermid = self.session.get_permid()
+        
+        no_of_attempts = 0
+        no_of_helpers = 10
+        isForwarder = False
+        if permid in self.currmsgs:
+            msgid2rec = self.currmsgs[permid]
+            for msgid in msgid2rec:
+                msgrec = msgid2rec[msgid]
+                no_of_attempts = msgrec['attempt']
+                if msgrec['forwarded'] == True:
+                    isForwarder = 1
+            
+            
+        self.friendshipStatistics_db.insertOrUpdateFriendshipStatistics( bin2str(mypermid), 
+                                                                         bin2str(permid), 
+                                                                         int(time()),
+                                                                         isForwarder, 
+                                                                         no_of_attempts ,
+                                                                         no_of_helpers)
+                    
+
+    def remove_msgs_for_ltv7_peer(self,permid):
+        """ Remove messages destined for a peer that does not speak >= v7 of
+        the overlay protocol
+        """
+        sendlist = self.get_msgs_as_sendlist(targetpermid=permid)
+        if DEBUG:
+            print >> sys.stderr, 'friendship: remove_msgs_for_ltv7_peer: sendlist len',len(sendlist)
+        
+        for i in range(0,len(sendlist)):
+            tuple = sendlist[i]
+            
+            permid,msgid,msg = tuple
+            self.delete_msg(permid,msgid)
+
+
+    #
+    # Incoming connections
+    #
+    def handleConnection(self, exc, permid, selversion, locally_initiated):
+
+        if selversion < OLPROTO_VER_SEVENTH:
+            return True
+
+        if exc is None:
+            self.online_fsext_peers.add(permid)
+
+            # if we meet peer otherwise, dequeue messages
+            if DEBUG:
+                print >> sys.stderr,"friendship: Met peer, attempting to deliver msgs",show_permid_short(permid)
+            
+            # If we're initiating the connection from this handler, the 
+            # fmsg_connect_callback will get called twice:
+            # 1. here
+            # 2. just a bit later when the callback for a successful connect()
+            #    is called.
+            # Solution: we delay this call, which should give 2. the time to
+            # run and remove msgs from the queue.
+            #
+            # Better: remove msgs from queue when sent and reinsert if send fails
+            #
+            friendship_delay_func = lambda:self.fmsg_connect_callback(None,None,permid,selversion)
+            self.overlay_bridge.add_task(friendship_delay_func,4)
+        else:
+            try:
+                self.online_fsext_peers.remove(permid)
+            except:
+                pass
+            
+        return True        
+
+
+    #
+    # Incoming messages
+    # 
+    def handleMessage(self, permid, selversion, message):
+        """ Handle incoming Friend Request, and their response"""
+
+        if selversion < OLPROTO_VER_SEVENTH:
+            if DEBUG:
+                print >> sys.stderr,"friendship: Got FRIENDSHIP msg from peer with old protocol",show_permid_short(permid)
+            return False
+        
+        try:
+            d = bdecode(message[1:])
+        except:
+            print_exc()
+            return False
+        
+        return self.process_message(permid,selversion,d)
+    
+    
+    def process_message(self,permid,selversion,d):
+        
+        if self.isValidFriendMsg(d):
+
+            if DEBUG:
+                print >> sys.stderr,"friendship: Got FRIENDSHIP msg",d['msg type']
+        
+            # If the message is to become a friend, i.e., a friendship request
+            if d['msg type'] == F_REQUEST_MSG:
+                self.process_request(permid,d)                  
+                        
+            # If the message is to have a response on friend request    
+            elif d['msg type'] == F_RESPONSE_MSG: 
+                self.process_response(permid,d)
+                    
+            # If the receiving message is to delegate the Friendship request to the target peer
+            elif d['msg type'] == F_FORWARD_MSG:
+                return self.process_forward(permid,selversion,d)
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: Got unknown msg type",d['msg type']
+                return False
+            
+            return True
+        else:
+            if DEBUG:
+                print >>sys.stderr,"friendship: Got bad FRIENDSHIP message"
+            return False
+                
+    def process_request(self,permid,d):
+        # to see that the following peer is already a friend, or not
+        fs = self.frienddb.getFriendState(permid) 
+
+        if DEBUG:
+            print >>sys.stderr,"friendship: process_request: Got request, fs",show_permid_short(permid),fs
+
+        
+        if fs == FS_NOFRIEND or fs == FS_HE_DENIED:
+            # not on HE_INVITED, to filter out duplicates
+            
+            # And if that peer is not already added as a friend, either approved, or unapproved
+            # call friend dialog
+            self.frienddb.setFriendState(permid, commit=True, state = FS_HE_INVITED)
+            
+            # FUTURE: always do callback, such that we also know about failed
+            # attempts
+            if self.usercallback is not None:
+                friendship_usercallback = lambda:self.usercallback(permid,[])
+                self.session.uch.perform_usercallback(friendship_usercallback)
+        elif fs == FS_I_INVITED: 
+            # In case, requestee is already added as friend, just make this 
+            # requestee as an approved friend
+
+            if DEBUG:
+                print >>sys.stderr,"friendship: process_request: Got request but I already invited him"
+            
+            self.frienddb.setFriendState(permid, commit=True, state = FS_MUTUAL)
+
+            if DEBUG:
+                print >>sys.stderr,"friendship: process_request: Got request but I already invited him: sending reply"
+
+            self.send_friendship_msg(permid,F_RESPONSE_MSG,{'response':1},submit=True)
+        elif fs == FS_MUTUAL:
+            if DEBUG:
+                print >>sys.stderr,"friendship: process_request: Got request but already approved"
+        elif fs == FS_I_DENIED:
+            if DEBUG:
+                print >>sys.stderr,"friendship: process_request: Got request but I already denied"
+        elif DEBUG:
+            print >>sys.stderr,"friendship: process_request: Got request, but fs is",fs
+
+    def process_response(self,permid,d):
+
+        mypermid = self.session.get_permid()
+                     
+                
+        self.friendshipStatistics_db.updateFriendshipResponseTime( bin2str(mypermid), 
+                                                                         bin2str(permid), 
+                                                                         int(time()))
+
+        
+        fs = self.frienddb.getFriendState(permid)
+         
+        # If the request to add has been approved
+        if d['response'] == 1:
+            if fs == FS_I_INVITED:
+                self.frienddb.setFriendState(permid, commit=True, state = FS_MUTUAL)
+            elif fs != FS_MUTUAL:
+                # Unsollicited response, consider this an invite, if not already friend
+                self.frienddb.setFriendState(permid, commit=True, state = FS_HE_INVITED)
+        else:
+            # He denied our friendship
+            self.frienddb.setFriendState(permid, commit=True, state = FS_HE_DENIED)
+
+                
+    def process_forward(self,permid,selversion,d):
+        
+        mypermid = self.session.get_permid()
+        if d['dest']['permid'] == mypermid:
+            # This is a forward containing a message meant for me
+            
+            # First add original sender to DB so we can connect back to it
+            self.addPeerToDB(d['source'])
+            
+            self.process_message(d['source']['permid'],selversion,d['msg'])
+            
+            return True
+        
+            
+        else:
+            # Queue and forward
+            if DEBUG:
+                print >>sys.stderr,"friendship: process_fwd: Forwarding immediately to",show_permid_short(d['dest']['permid'])
+
+            if permid != d['source']['permid']:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: process_fwd: Forwarding: Illegal, source is not sender, and dest is not me"
+                return False
+            # First add dest to DB so we can connect to it
+            
+            # FUTURE: don't let just any peer overwrite the IP+port of a peer
+            # if self.peer_db.hasPeer(d['dest']['permid']):
+            self.addPeerToDB(d['dest'])
+            
+            self.send_friendship_msg(d['dest']['permid'],d['msg type'],d,submit=True)
+            return True
+
+    def addPeerToDB(self,mpeer):
+        peer = {}
+        peer['permid'] = mpeer['permid']
+        peer['ip'] = mpeer['ip']
+        peer['port'] = mpeer['port']
+        peer['last_seen'] = 0
+        self.peerdb.addPeer(mpeer['permid'],peer,update_dns=True,commit=True)
+        
+
+    def create_friendship_msg(self,type,params):
+        
+        if DEBUG:
+            print >>sys.stderr,"friendship: create_fs_msg:",type,`params`
+        
+        mypermid = self.session.get_permid()
+        myip = self.session.get_external_ip()
+        myport = self.session.get_listen_port()
+        
+        d ={'msg type':type}     
+        if type == F_RESPONSE_MSG:
+            d['response'] = params['response']
+        elif type == F_FORWARD_MSG:
+            
+            if DEBUG:
+                print >>sys.stderr,"friendship: create: fwd: params",`params`
+            peer = self.peerdb.getPeer(params['destpermid']) # ,keys=['ip', 'port']) 
+            if peer is None:
+                if DEBUG:
+                    print >> sys.stderr, "friendship: create msg: Don't know IP + port of peer", show_permid_short(params['destpermid'])
+                return
+            #if DEBUG:
+            #    print >> sys.stderr, "friendship: create msg: Peer at",peer
+            
+            # FUTURE: add signatures on ip+port
+            src = {'permid':mypermid,'ip':myip,'port':myport}
+            dst = {'permid':params['destpermid'],'ip':str(peer['ip']),'port':peer['port']}
+            d.update({'source':src,'dest':dst,'msg':params['msg']})
+        return d
+
+
+
+    def isValidFriendMsg(self,d):
+
+        if DEBUG:
+            print >>sys.stderr,"friendship: msg: payload is",`d`    
+
+        
+        if type(d) != DictType:
+            if DEBUG:
+                print >>sys.stderr,"friendship: msg: payload is not bencoded dict"    
+            return False
+        if not 'msg type' in d:
+            if DEBUG:
+                print >>sys.stderr,"friendship: msg: dict misses key",'msg type'    
+            return False
+        
+        if d['msg type'] == F_REQUEST_MSG:
+            keys = d.keys()[:]
+            if len(keys)-1 != 0:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: REQ: contains superfluous keys",keys    
+                return False
+            return True
+            
+        if d['msg type'] == F_RESPONSE_MSG:
+            if (d.has_key('response') and (d['response'] == 1 or d['response'] == 0)):
+                return True
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: RESP: something wrong",`d`    
+                return False
+            
+        if d['msg type'] == F_FORWARD_MSG:
+            if not self.isValidPeer(d['source']):
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: FWD: source bad",`d`    
+                return False
+            if not self.isValidPeer(d['dest']):
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: FWD: dest bad",`d`    
+                return False
+            if not 'msg' in d:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: FWD: no msg",`d`    
+                return False
+            if not self.isValidFriendMsg(d['msg']):
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: FWD: bad msg",`d`    
+                return False
+            if d['msg']['msg type'] == F_FORWARD_MSG:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: msg: FWD: cannot contain fwd",`d`    
+                return False
+            return True
+        
+        return False
+
+
+    def isValidPeer(self,d):
+        if (d.has_key('ip') and d.has_key('port') and d.has_key('permid') 
+            and validPermid(d['permid'])
+            and validIP(d['ip'])and validPort(d['port'])):
+            return True
+        else:
+            return False                 
+
+
+    def save_msg(self,permid,type,params):
+        
+        if not permid in self.currmsgs:
+            self.currmsgs[permid] = {}
+        
+        mypermid = self.session.get_permid()    
+        now = time()
+        attempt = 1
+        
+        base = mypermid+permid+str(now)+str(random.random())
+        msgid = sha(base).hexdigest()
+        msgrec = {'permid':permid,'type':type,'params':params,'attempt':attempt,'t':now,'forwarded':False}
+        
+        msgid2rec = self.currmsgs[permid]
+        msgid2rec[msgid] = msgrec
+        
+    def delete_msg(self,permid,msgid):
+        try: 
+            if DEBUG:
+                print >>sys.stderr,"friendship: Deleting msg",show_permid_short(permid),msgid
+            msgid2rec = self.currmsgs[permid]
+            del msgid2rec[msgid]
+        except:
+            #print_exc()
+            pass
+
+    def set_msg_forwarded(self,permid,msgid):
+        try: 
+            msgid2rec = self.currmsgs[permid]
+            msgid2rec[msgid]['forwarded'] = True
+        except:
+            print_exc()
+
+    def reschedule_connects(self):
+        """ This function is run periodically and reconnects to peers when
+        messages meant for it are due to be retried
+        """
+        now = time()
+        delmsgids = []
+        reconnectpermids = Set()
+        for permid in self.currmsgs:
+            msgid2rec = self.currmsgs[permid]
+            for msgid in msgid2rec:
+                msgrec = msgid2rec[msgid]
+
+                eta = self.calc_eta(msgrec)
+                
+                if DEBUG:
+                    diff = None
+                    if eta is not None:
+                        diff = eta - now
+                        
+                    if DEBUG:
+                        peer = self.peerdb.getPeer(permid)
+                        if peer is None:
+                            print >>sys.stderr,"friendship: reschedule: ETA: wtf, peer not in DB!",show_permid_short(permid)
+                        else:
+                            print >>sys.stderr,"friendship: reschedule: ETA",show_permid_short(permid),peer['name'],diff
+                
+                if eta is None: 
+                    delmsgids.append((permid,msgid))
+                elif now > eta-1.0: # -1 for round off
+                    # reconnect
+                    reconnectpermids.add(permid)
+                    msgrec['attempt'] = msgrec['attempt'] + 1
+                    
+                    # Delegate 
+                    if msgrec['type'] == F_REQUEST_MSG and msgrec['attempt'] == 2:
+                        self.delegate_friendship_making(targetpermid=permid,targetmsgid=msgid)
+        
+        # Remove timed out messages
+        for permid,msgid in delmsgids:
+            if DEBUG:
+                print >>sys.stderr,"friendship: reschedule: Deleting",show_permid_short(permid),msgid
+            self.delete_msg(permid,msgid)
+        
+        # Initiate connections to peers for which we have due messages    
+        for permid in reconnectpermids:
+            if DEBUG:
+                print >>sys.stderr,"friendship: reschedule: Reconnect to",show_permid_short(permid)
+
+            self.overlay_bridge.connect(permid,self.fmsg_connect_callback)
+        
+        # Reschedule this periodic task
+        self.overlay_bridge.add_task(self.reschedule_connects,RESCHEDULE_INTERVAL)
+        
+        
+    def calc_eta(self,msgrec):
+        if msgrec['type'] == F_FORWARD_MSG:
+            if msgrec['attempt'] >= 10:
+                # Stop trying to forward after a given period 
+                return None
+            # exponential backoff, on 10th attempt we would wait 24hrs
+            eta = msgrec['t'] + pow(3.116,msgrec['attempt'])
+        else:
+            if msgrec['attempt'] >= int(7*24*3600/RESEND_INTERVAL):
+                # Stop trying to forward after a given period = 1 week 
+                return None
+
+            eta = msgrec['t'] + msgrec['attempt']*RESEND_INTERVAL 
+        return eta
+        
+
+    def get_msgs_as_sendlist(self,targetpermid=None):
+
+        sendlist = []
+        if targetpermid is None:
+            permids = self.currmsgs.keys()
+        else:
+            permids = [targetpermid] 
+        
+        for permid in permids:
+            msgid2rec = self.currmsgs.get(permid,{})
+            for msgid in msgid2rec:
+                msgrec = msgid2rec[msgid]
+                
+                if DEBUG:
+                    print >>sys.stderr,"friendship: get_msgs: Creating",msgrec['type'],`msgrec['params']`,msgid
+                if msgrec['type'] == F_FORWARD_MSG:
+                    msg = msgrec['params']
+                else:
+                    msg = self.create_friendship_msg(msgrec['type'],msgrec['params'])
+                tuple = (permid,msgid,msg)
+                sendlist.append(tuple)
+        return sendlist
+
+
+    def get_msgs_as_fwd_sendlist(self,targetpermid=None,targetmsgid=None):
+
+        sendlist = []
+        if targetpermid is None:
+            permids = self.currmsgs.keys()
+        else:
+            permids = [targetpermid] 
+        
+        for permid in permids:
+            msgid2rec = self.currmsgs.get(permid,{})
+            for msgid in msgid2rec:
+                if targetmsgid is None or msgid == targetmsgid:
+                    msgrec = msgid2rec[msgid]
+                    if msgrec['type'] != F_FORWARD_MSG and msgrec['forwarded'] == False:
+                        # Don't forward forwards, or messages already forwarded
+                    
+                        # Create forward message for original
+                        params = {}
+                        params['destpermid'] = permid
+                        params['msg'] = self.create_friendship_msg(msgrec['type'],msgrec['params'])
+                    
+                        msg = self.create_friendship_msg(F_FORWARD_MSG,params)
+                        tuple = (permid,msgid,msg)
+                        sendlist.append(tuple)
+        return sendlist
+        
+
+                
+    def delegate_friendship_making(self,targetpermid=None,targetmsgid=None):
+        if DEBUG:
+            print >>sys.stderr,"friendship: delegate:",show_permid_short(targetpermid),targetmsgid
+
+        # 1. See if there are undelivered msgs
+        sendlist = self.get_msgs_as_fwd_sendlist(targetpermid=targetpermid,targetmsgid=targetmsgid)
+        if DEBUG:
+            print >>sys.stderr,"friendship: delegate: Number of messages queued",len(sendlist)
+        
+        if len(sendlist) == 0:
+            return
+  
+        # 2. Get friends, not necess. online
+        friend_permids = self.frienddb.getFriends()
+        
+        if DEBUG:
+            l = len(friend_permids)
+            print >>sys.stderr,"friendship: delegate: friend helpers",l
+            for permid in friend_permids:
+                print >>sys.stderr,"friendship: delegate: friend helper",show_permid_short(permid)
+        
+        # 3. Sort online peers on similarity, highly similar should be tastebuddies
+        if DEBUG:
+            print >>sys.stderr,"friendship: delegate: Number of online v7 peers",len(self.online_fsext_peers)
+        tastebuddies = self.peerdb.getPeers(list(self.online_fsext_peers),['similarity','name']) 
+        tastebuddies.sort(sim_desc_cmp)
+
+        if DEBUG:
+            print >>sys.stderr,"friendship: delegate: Sorted tastebuddies",`tastebuddies`
+
+        tastebuddies_permids = []
+        size = min(10,len(tastebuddies))
+        for i in xrange(0,size):
+            peer = tastebuddies[i]
+            if DEBUG:
+                print >>sys.stderr,"friendship: delegate: buddy helper",show_permid_short(peer['permid'])
+            tastebuddies_permids.append(peer['permid'])
+
+        # 4. Create list of helpers:
+        #
+        # Policy: Helpers are a mix of friends and online tastebuddies
+        # with 70% friends (if avail) and 30% tastebuddies
+        #
+        # I chose this policy because friends are not guaranteed to be online
+        # and waiting to see if we can connect to them before switching to
+        # the online taste buddies is complex code-wise and time-consuming.
+        # We don't have a lot of time when this thing is called by Session.shutdown()
+        #
+        nwant = 10
+        nfriends = int(nwant * .7)
+        nbuddies = int(nwant * .3)
+        
+        part1 = sampleorlist(friend_permids,nfriends)
+        fill = nfriends-len(part1) # if no friends, use tastebuddies
+        part2 = sampleorlist(tastebuddies_permids,nbuddies+fill)
+        helpers = part1 + part2
+
+        if DEBUG:
+            l = len(helpers)
+            print >>sys.stderr,"friendship: delegate: end helpers",l
+            for permid in helpers:
+                print >>sys.stderr,"friendship: delegate: end helper",show_permid_short(permid),self.frienddb.getFriendState(permid),self.peerdb.getPeers([permid],['similarity','name'])
+
+
+        for tuple in sendlist:
+            destpermid,msgid,msg = tuple
+            for helperpermid in helpers:
+                if destpermid != helperpermid:
+                    connect_callback = lambda exc,dns,permid,selversion:self.forward_connect_callback(exc,dns,permid,selversion,destpermid,msgid,msg)
+                    
+                    if DEBUG:
+                        print >>sys.stderr,"friendship: delegate: Connecting to",show_permid_short(helperpermid)
+                     
+                    self.overlay_bridge.connect(helperpermid, connect_callback)
+
+
+    def forward_connect_callback(self,exc,dns,permid,selversion,destpermid,msgid,msg):
+        if exc is None:
+            
+            if selversion < OLPROTO_VER_SEVENTH:
+                return
+            
+            send_callback = lambda exc,permid:self.forward_send_callback(exc,permid,destpermid,msgid)
+            if DEBUG:
+                print >>sys.stderr,"friendship: forward_connect_callback: Sending",`msg`
+            self.overlay_bridge.send(permid, FRIENDSHIP + bencode(msg), send_callback)
+        elif DEBUG:
+            print >>sys.stderr,"friendship: forward: Could not connect to helper",show_permid_short(permid)
+
+
+    def forward_send_callback(self,exc,permid,destpermid,msgid):
+        if DEBUG:
+            if exc is None:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: forward: Success forwarding to helper",show_permid_short(permid)
+                self.set_msg_forwarded(destpermid,msgid)
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"friendship: forward: Failed to forward to helper",show_permid_short(permid)
+        
+    def checkpoint(self):
+        statedir = self.session.get_state_dir()
+        newfilename = os.path.join(statedir,'new-friendship-msgs.pickle')
+        finalfilename = os.path.join(statedir,'friendship-msgs.pickle')
+        try:
+            f = open(newfilename,"wb")
+            cPickle.dump(self.currmsgs,f)
+            f.close()
+            try:
+                os.remove(finalfilename)
+            except:
+                # If first time, it doesn't exist
+                print_exc()
+            os.rename(newfilename,finalfilename)
+        except:
+            print_exc()
+        
+    def load_checkpoint(self):
+        statedir = self.session.get_state_dir()
+        finalfilename = os.path.join(statedir,'friendship-msgs.pickle')
+        try:
+            f = open(finalfilename,"rb")
+            self.currmsgs = cPickle.load(f)
+        except:
+            print >>sys.stderr, "friendship: could not read previous messages from", finalfilename
+
+        # Increase # attempts till current time
+        now = time()
+        for permid in self.currmsgs:
+            msgid2rec = self.currmsgs[permid]
+            for msgid in msgid2rec:
+                msgrec = msgid2rec[msgid]
+                diff = now - msgrec['t']
+                a = int(diff/RESEND_INTERVAL)
+                a += 1
+                if DEBUG:
+                    print >>sys.stderr,"friendship: load_checkp: Changing #attempts from",msgrec['attempt'],a
+                msgrec['attempt'] = a
+
+        
+def sim_desc_cmp(peera,peerb):
+    if peera['similarity'] < peerb['similarity']:
+        return 1
+    elif peera['similarity'] > peerb['similarity']:
+        return -1
+    else:
+        return 0
+    
+def sampleorlist(z,k):
+    if len(z) < k:
+        return z
+    else:
+        return random.sample(k)
diff --git a/instrumentation/next-share/BaseLib/Core/SocialNetwork/OverlapMsgHandler.py b/instrumentation/next-share/BaseLib/Core/SocialNetwork/OverlapMsgHandler.py
new file mode 100644 (file)
index 0000000..615a63f
--- /dev/null
@@ -0,0 +1,277 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+from time import time
+from traceback import print_exc
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.Utilities.utilities import *
+from BaseLib.Core.Utilities.unicode import str2unicode
+
+DEBUG = False
+
+MIN_OVERLAP_WAIT = 12.0*3600.0 # half a day in seconds
+
+ICON_MAX_SIZE = 10*1024
+
+class OverlapMsgHandler:
+    
+    def __init__(self):
+        
+        self.recentpeers = {}
+
+    def register(self, overlay_bridge, launchmany):
+        if DEBUG:
+            print >> sys.stderr,"socnet: bootstrap: overlap"
+        self.mypermid = launchmany.session.get_permid()
+        self.session = launchmany.session
+        self.peer_db = launchmany.peer_db 
+        self.superpeer_db = launchmany.superpeer_db
+        self.overlay_bridge = overlay_bridge
+
+    #
+    # Incoming SOCIAL_OVERLAP
+    # 
+    def recv_overlap(self,permid,message,selversion):
+        # 1. Check syntax
+        try:
+            oldict = bdecode(message[1:])
+        except:
+            print_exc()
+            if DEBUG:
+                print >> sys.stderr,"socnet: SOCIAL_OVERLAP: error becoding"
+            return False
+
+        if not isValidDict(oldict,permid):
+            return False
+
+        # 2. Process
+        self.process_overlap(permid,oldict)
+        return True
+
+    def process_overlap(self,permid,oldict):
+        #self.print_hashdict(oldict['hashnetwork'])
+
+        # 1. Clean recently contacted admin
+        self.clean_recentpeers()
+
+        # 3. Save persinfo + hrwidinfo + ipinfo
+        if self.peer_db.hasPeer(permid):
+            save_ssocnet_peer(self,permid,oldict,False,False,False)
+        elif DEBUG:
+            print >> sys.stderr,"socnet: overlap: peer unknown?! Weird, we just established connection"
+
+        # 6. Reply
+        if not (permid in self.recentpeers.keys()):
+            self.recentpeers[permid] = time()
+            self.reply_to_overlap(permid)
+
+    def clean_recentpeers(self):
+        newdict = {}
+        for permid2,t in self.recentpeers.iteritems():
+            if (t+MIN_OVERLAP_WAIT) > time():
+                newdict[permid2] = t
+            #elif DEBUG:
+            #    print >> sys.stderr,"socnet: overlap: clean recent: not keeping",show_permid_short(permid2)
+                
+        self.recentpeers = newdict
+
+    def reply_to_overlap(self,permid):
+        oldict = self.create_oldict()
+        self.send_overlap(permid,oldict)
+
+    #
+    # At overlay-connection establishment time.
+    #
+    def initiate_overlap(self,permid,locally_initiated):
+        self.clean_recentpeers()
+        if not (permid in self.recentpeers.keys() or permid in self.superpeer_db.getSuperPeers()):
+            if locally_initiated:
+                # Make sure only one sends it
+                self.recentpeers[permid] = time()
+                self.reply_to_overlap(permid)
+            elif DEBUG:
+                print >> sys.stderr,"socnet: overlap: active: he should initiate"
+        elif DEBUG:
+            print >> sys.stderr,"socnet: overlap: active: peer recently contacted already"
+
+    #
+    # General
+    #
+    def create_oldict(self):
+        """
+        Send:
+        * Personal info: name, picture, rwidhashes
+        * IP info: IP + port
+        Both are individually signed by us so dest can safely 
+        propagate. We distinguish between what a peer said
+        is his IP+port and the information obtained from the network
+        or from other peers (i.e. BUDDYCAST)
+        """
+
+        nickname = self.session.get_nickname().encode("UTF-8")
+        persinfo = {'name':nickname}
+        # See if we can find icon
+        iconmime, icondata = self.session.get_mugshot()
+        if icondata:
+            persinfo.update({'icontype':iconmime, 'icondata':icondata})
+        
+        oldict = {}
+        oldict['persinfo'] = persinfo
+
+        #print >> sys.stderr, 'Overlap: Sending oldict: %s' % `oldict`
+                            
+        #if DEBUG:
+        #    print >> sys.stderr,"socnet: overlap: active: sending hashdict"
+        #    self.print_hashdict(oldict['hashnetwork'])
+
+        return oldict
+
+
+    def send_overlap(self,permid,oldict):
+        try:
+            body = bencode(oldict)
+            ## Optimization: we know we're currently connected
+            self.overlay_bridge.send(permid, SOCIAL_OVERLAP + body,self.send_callback)
+        except:
+            if DEBUG:
+                print_exc(file=sys.stderr)
+
+    
+    def send_callback(self,exc,permid):
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr,"socnet: SOCIAL_OVERLAP: error sending to",show_permid_short(permid),exc
+
+    #
+    # Internal methods
+    #
+
+
+def isValidDict(oldict,source_permid):
+    if not isinstance(oldict, dict):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_OVERLAP: not a dict"
+        return False
+    k = oldict.keys()        
+
+    if DEBUG:
+        print >> sys.stderr,"socnet: SOCIAL_OVERLAP: keys",k
+
+    if not ('persinfo' in k) or not isValidPersinfo(oldict['persinfo'],False):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_OVERLAP: key 'persinfo' missing or value wrong type in dict"
+        return False
+
+    for key in k:
+        if key not in ['persinfo']:
+            if DEBUG:
+                print >> sys.stderr,"socnet: SOCIAL_OVERLAP: unknown key",key,"in dict"
+            return False
+
+    return True
+
+
+
+def isValidPersinfo(persinfo,signed):
+    if not isinstance(persinfo,dict):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_*: persinfo: not a dict"
+        return False
+
+    k = persinfo.keys()
+    #print >> sys.stderr,"socnet: SOCIAL_*: persinfo: keys are",k
+    if not ('name' in k) or not isinstance(persinfo['name'],str):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_*: persinfo: key 'name' missing or value wrong type"
+        return False
+
+    if 'icontype' in k and not isValidIconType(persinfo['icontype']):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_*: persinfo: key 'icontype' value wrong type"
+        return False
+
+    if 'icondata' in k and not isValidIconData(persinfo['icondata']):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_*: persinfo: key 'icondata' value wrong type"
+        return False
+
+    if ('icontype' in k and not ('icondata' in k)) or ('icondata' in k and not ('icontype' in k)):
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_*: persinfo: key 'icontype' without 'icondata' or vice versa"
+        return False
+
+    if signed:
+        if not ('insert_time' in k) or not isinstance(persinfo['insert_time'],int):
+            if DEBUG:
+                print >> sys.stderr,"socnet: SOCIAL_*: persinfo: key 'insert_time' missing or value wrong type"
+            return False
+
+    for key in k:
+        if key not in ['name','icontype','icondata','insert_time']:
+            if DEBUG:
+                print >> sys.stderr,"socnet: SOCIAL_*: persinfo: unknown key",key,"in dict"
+            return False
+
+    return True
+
+
+def isValidIconType(type):
+    """ MIME-type := type "/" subtype ... """
+    if not isinstance(type,str):
+        return False
+    idx = type.find('/')
+    ridx = type.rfind('/')
+    return idx != -1 and idx == ridx
+
+def isValidIconData(data):
+    if not isinstance(data,str):
+        return False
+    
+#    if DEBUG:
+#        print >>sys.stderr,"socnet: SOCIAL_*: persinfo: IconData length is",len(data)
+    
+    return len(data) <= ICON_MAX_SIZE
+
+
+
+def save_ssocnet_peer(self,permid,record,persinfo_ignore,hrwidinfo_ignore,ipinfo_ignore):
+    """ This function is used by both BootstrapMsgHandler and 
+        OverlapMsgHandler, and uses their database pointers. Hence the self 
+        parameter. persinfo_ignore and ipinfo_ignore are booleans that
+        indicate whether to ignore the personal info, resp. ip info in
+        this record, because they were unsigned in the message and
+        we already received signed versions before.
+    """
+    if permid == self.mypermid:
+        return
+    
+    # 1. Save persinfo
+    if not persinfo_ignore:
+        persinfo = record['persinfo']
+        
+        if DEBUG:
+            print >>sys.stderr,"socnet: Got persinfo",persinfo.keys()
+            if len(persinfo.keys()) > 1:
+                print >>sys.stderr,"socnet: Got persinfo THUMB THUMB THUMB THUMB"
+        
+        # Arno, 2008-08-22: to avoid UnicodeDecode errors when commiting 
+        # on sqlite
+        name = str2unicode(persinfo['name'])
+
+        if DEBUG:
+            print >> sys.stderr,"socnet: SOCIAL_OVERLAP",show_permid_short(permid),`name`
+        
+        if self.peer_db.hasPeer(permid):
+            self.peer_db.updatePeer(permid, name=name)
+        else:
+            self.peer_db.addPeer(permid,{'name':name})
+    
+        # b. Save icon
+        if 'icontype' in persinfo and 'icondata' in persinfo: 
+            if DEBUG:
+                print >> sys.stderr,"socnet: saving icon for",show_permid_short(permid),`name`
+            self.peer_db.updatePeerIcon(permid, persinfo['icontype'],persinfo['icondata'])    
diff --git a/instrumentation/next-share/BaseLib/Core/SocialNetwork/RemoteQueryMsgHandler.py b/instrumentation/next-share/BaseLib/Core/SocialNetwork/RemoteQueryMsgHandler.py
new file mode 100644 (file)
index 0000000..c56e1f5
--- /dev/null
@@ -0,0 +1,728 @@
+# Written by Arno Bakker, Jie Yang
+# see LICENSE.txt for license information
+#
+# Send free-form queries to all the peers you are connected to.
+#
+# TODO: make sure we return also items from download history, but need to verify if 
+# their status is still checked.
+#
+#
+
+import os
+import sys
+import re
+from time import time
+from sets import Set
+from traceback import print_stack, print_exc
+import datetime
+import time as T
+
+from M2Crypto import Rand
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str, str2bin
+from BaseLib.Core.CacheDB.CacheDBHandler import ChannelCastDBHandler,PeerDBHandler
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BuddyCast.moderationcast_util import *
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_SIXTH, OLPROTO_VER_NINETH, OLPROTO_VER_ELEVENTH, OLPROTO_VER_TWELFTH, OLPROTO_VER_THIRTEENTH
+from BaseLib.Core.Utilities.utilities import show_permid_short,show_permid
+from BaseLib.Core.Statistics.Logger import OverlayLogger
+from BaseLib.Core.Utilities.unicode import dunno2unicode
+from BaseLib.Core.Search.SearchManager import split_into_keywords
+
+MAX_RESULTS = 20
+QUERY_ID_SIZE = 20
+MAX_QUERY_REPLY_LEN = 100*1024    # 100K
+MAX_PEERS_TO_QUERY = 20
+
+DEBUG = False
+
+class FakeUtility:
+    
+    def __init__(self,config_path):
+        self.config_path = config_path
+        
+    def getConfigPath(self):
+        return self.config_path
+
+
+class RemoteQueryMsgHandler:
+    
+    __single = None
+    
+    def __init__(self):
+        if RemoteQueryMsgHandler.__single:
+            raise RuntimeError, "RemoteQueryMsgHandler is singleton"
+        RemoteQueryMsgHandler.__single = self
+
+        self.connections = {}    # only connected remote_search_peers -> selversion
+        self.query_ids2rec = {}    # ARNOCOMMENT: TODO: purge old entries...
+        self.overlay_log = None
+        self.registered = False
+        self.logfile = None 
+
+    def getInstance(*args, **kw):
+        if RemoteQueryMsgHandler.__single is None:
+            RemoteQueryMsgHandler(*args, **kw)
+        return RemoteQueryMsgHandler.__single
+    getInstance = staticmethod(getInstance)
+        
+
+    def register(self,overlay_bridge,launchmany,config,bc_fac,log=''):
+        if DEBUG:
+            print >> sys.stderr,"rquery: register"
+        self.overlay_bridge = overlay_bridge
+        self.session =  launchmany.session
+        self.torrent_db = launchmany.torrent_db
+        self.peer_db = launchmany.peer_db
+        self.channelcast_db = launchmany.channelcast_db
+        # debug
+        # self.superpeer_db = launchmany.superpeer_db
+        
+        self.config = config
+        self.bc_fac = bc_fac # May be None
+        if log:
+            self.overlay_log = OverlayLogger.getInstance(log)
+        self.torrent_dir = os.path.abspath(self.config['torrent_collecting_dir'])
+        self.registered = True
+        
+        # 14-04-2010, Andrea: limit the size of channel query results.
+        # see create_channel_query_reply (here) and process_query_reply
+        # for other details. (The whole thing is done to avoid freezes in the GUI
+        # when there are too many results)
+        self.max_channel_query_results = self.config['max_channel_query_results']
+        
+        
+
+    #
+    # Incoming messages
+    # 
+    def handleMessage(self,permid,selversion,message):
+        if not self.registered:
+            return True
+        
+        t = message[0]
+        if t == QUERY:
+            if DEBUG:
+                print >> sys.stderr,"rquery: Got QUERY",len(message)
+            return self.recv_query(permid,message,selversion)
+        if t == QUERY_REPLY:
+            if DEBUG:
+                print >> sys.stderr,"rquery: Got QUERY_REPLY",len(message)
+            return self.recv_query_reply(permid,message,selversion)
+        else:
+            if DEBUG:
+                print >> sys.stderr,"rquery: UNKNOWN OVERLAY MESSAGE", ord(t)
+            return False
+
+    #
+    # Incoming connections
+    #
+    def handleConnection(self,exc,permid,selversion,locally_initiated):
+        if not self.registered:
+            return True
+        
+        if DEBUG:
+            print >> sys.stderr,"rquery: handleConnection",exc,"v",selversion,"local",locally_initiated, ";#conn:", len(self.connections)
+        
+        if selversion < OLPROTO_VER_SIXTH:
+            return True
+
+        if exc is None:
+            self.connections[permid] = selversion
+            #superpeers = self.superpeer_db.getSuperPeers()
+            #if permid in superpeers:
+             #   print >> sys.stderr,"rquery: handleConnection: Connect to superpeer"
+        else:
+            try:
+                del self.connections[permid]
+            except:
+                pass
+                #print_exc()
+
+        return True
+
+    #
+    # Send query
+    # 
+    def send_query(self,query,usercallback,max_peers_to_query=MAX_PEERS_TO_QUERY):
+        """ Called by GUI Thread """
+        if max_peers_to_query is None or max_peers_to_query > MAX_PEERS_TO_QUERY:
+            max_peers_to_query = MAX_PEERS_TO_QUERY
+        if DEBUG:
+            print >>sys.stderr,"rquery: send_query",`query`,max_peers_to_query
+        if max_peers_to_query > 0:
+            send_query_func = lambda:self.network_send_query_callback(query,usercallback,max_peers_to_query)
+            self.overlay_bridge.add_task(send_query_func,0)
+
+
+    def network_send_query_callback(self,query,usercallback,max_peers_to_query):
+        """ Called by overlay thread """
+        p = self.create_query(query,usercallback)
+        m = QUERY+p
+        query_conn_callback_lambda = lambda exc,dns,permid,selversion:self.conn_callback(exc,dns,permid,selversion,m)
+        
+        if query.startswith("CHANNEL"):
+            wantminoversion = OLPROTO_VER_THIRTEENTH  # channel queries and replies only for the latest version (13) 
+        elif query.startswith("SIMPLE+METADATA"):
+            wantminoversion = OLPROTO_VER_TWELFTH
+        else:
+            wantminoversion =  OLPROTO_VER_SIXTH
+            
+        if DEBUG:
+            print >>sys.stderr,"rquery: send_query: Connected",len(self.connections),"peers; minoversion=", wantminoversion
+        
+        #print "******** send query net cb:", query, len(self.connections), self.connections
+        
+        # 1. See how many peers we already know about from direct connections
+        peers_to_query = 0
+        for permid,selversion in self.connections.iteritems():
+            if selversion >= wantminoversion:
+                self.overlay_bridge.connect(permid,query_conn_callback_lambda)
+                peers_to_query += 1
+        
+        # 2. If not enough, get some remote-search capable peers from BC
+        if peers_to_query < max_peers_to_query and self.bc_fac and self.bc_fac.buddycast_core:
+            query_cand = self.bc_fac.buddycast_core.getRemoteSearchPeers(max_peers_to_query-peers_to_query,wantminoversion)
+            for permid in query_cand:
+                if permid not in self.connections:    # don't call twice
+                    self.overlay_bridge.connect(permid,query_conn_callback_lambda)
+                    peers_to_query += 1
+        
+        if DEBUG:
+            print >>sys.stderr,"rquery: send_query: Sent to",peers_to_query,"peers; query=", query
+        
+    def create_query(self,query,usercallback):
+        d = {}
+        d['q'] = query.strip().encode("UTF-8")
+        d['id'] = self.create_and_register_query_id(query,usercallback)
+        return bencode(d)
+        
+    def create_and_register_query_id(self,query,usercallback):
+        id = Rand.rand_bytes(QUERY_ID_SIZE)
+        queryrec = {'query':query,'usercallback':usercallback}
+        self.query_ids2rec[id] = queryrec
+        return id
+        
+    def is_registered_query_id(self,id):
+        if id in self.query_ids2rec:
+            return self.query_ids2rec[id]
+        else:
+            return None
+        
+    def conn_callback(self,exc,dns,permid,selversion,message):
+        if exc is None and selversion >= OLPROTO_VER_SIXTH:
+            self.overlay_bridge.send(permid,message,self.send_callback)
+            
+    def send_callback(self,exc,permid):
+        #print "******* query was sent to", show_permid_short(permid), exc
+        pass
+    
+    
+    #
+    # Receive query
+    # 
+    
+    def recv_query(self,permid,message,selversion):
+        if selversion < OLPROTO_VER_SIXTH:
+            return False
+
+        # Unpack
+        try:
+            d = bdecode(message[1:])
+        except:
+            if DEBUG:
+                print >>sys.stderr,"rquery: Cannot bdecode QUERY message"
+            #print_exc()
+            return False
+        
+        if not isValidQuery(d,selversion):
+            if DEBUG:
+                print >>sys.stderr,"rquery: QUERY invalid",`d`
+            return False
+
+        # ACCESS CONTROL, INCLUDING CHECKING IF PEER HAS NOT EXCEEDED
+        # QUERY QUOTUM IS DONE in Tribler/Core/RequestPolicy.py
+        #
+
+        # Process
+        self.process_query(permid, d, selversion)
+        
+        return True
+
+    def set_log_file(self, logfile):
+        self.logfile = open(logfile, "a") 
+   
+   
+    def log(self, permid, decoded_message):        
+        lt = T.localtime(T.time())
+        timestamp = "%04d-%02d-%02d %02d:%02d:%02d" % (lt[0], lt[1], lt[2], lt[3], lt[4], lt[5])
+        ip = self.peer_db.getPeer(permid, "ip")
+        #ip = "x.y.z.1"
+        s = "%s\t%s\t%s\t%s\n"% (timestamp, bin2str(permid), ip, decoded_message)
+        
+        print dunno2unicode(s)
+        self.logfile.write(dunno2unicode(s)) # bin2str(
+        self.logfile.flush()
+    
+    
+    #
+    # Send query reply
+    #
+    def process_query(self, permid, d, selversion):
+        hits = None
+        p = None
+        sendtorrents = False
+
+        netwq = d['q']
+        if netwq.startswith("SIMPLE"): # remote query
+            # Format: 'SIMPLE '+string of space separated keywords or
+            #         'SIMPLE+METADATA' +string of space separated keywords
+            #
+            # In the future we could support full SQL queries:
+            # SELECT infohash,torrent_name FROM torrent_db WHERE status = ALIVE
+            
+            if netwq.startswith('SIMPLE+METADATA'):
+                q = d['q'][len('SIMPLE+METADATA '):]
+                sendtorrents = True
+            else:
+                q = d['q'][len('SIMPLE '):]
+                    
+            uq = self.clean_netwq(q)
+            kws = split_into_keywords(uq)
+            hits = self.search_torrents(kws, maxhits=MAX_RESULTS,sendtorrents=sendtorrents)
+            p = self.create_remote_query_reply(d['id'],hits,selversion)
+            
+        elif netwq.startswith("CHANNEL"): # channel query
+            if DEBUG:
+                print>>sys.stderr, "Incoming channel query", d['q']
+            q = d['q'][len('CHANNEL '):]
+            uq = self.clean_netwq(q,channelquery=True)
+            hits = self.channelcast_db.searchChannels(uq)
+            p = self.create_channel_query_reply(d['id'],hits,selversion)
+
+        # log incoming query, if logfile is set
+        if self.logfile:
+            self.log(permid, q)        
+     
+        m = QUERY_REPLY+p
+
+        if self.overlay_log:
+            nqueries = self.get_peer_nqueries(permid)
+            # RECV_MSG PERMID OVERSION NUM_QUERIES MSG
+            self.overlay_log('RECV_QRY', show_permid(permid), selversion, nqueries, repr(d))
+
+            # RPLY_QRY PERMID NUM_HITS MSG
+            self.overlay_log('RPLY_QRY', show_permid(permid), len(hits), repr(p))
+
+        self.overlay_bridge.send(permid, m, self.send_callback)
+        
+        self.inc_peer_nqueries(permid)
+
+ # This function need not be used, since it is handled quite well by split_into_keywords 
+    def clean_netwq(self,q,channelquery=False):
+        # Filter against bad input
+        uq = q.decode("UTF-8")
+        newq = u''
+        for i in range(0,len(uq)):
+            if uq[i].isalnum() or uq[i] == ' ' or (channelquery and uq[i] == '+') or (channelquery and uq[i] == '/'):
+                newq += uq[i]
+        return newq
+            
+        
+    def create_remote_query_reply(self,id,hits,selversion):
+        getsize = os.path.getsize
+        join = os.path.join
+        d = {}
+        d['id'] = id
+        d2 = {}
+        for torrent in hits:
+            r = {}
+            # NEWDBSTANDARD. Do not rename r's fields: they are part of the 
+            # rquery protocol spec.
+            # Arno, 2010-01-28: name DB record contains the Unicode object
+            r['content_name'] = torrent['name'].encode("UTF-8")  
+            r['length'] = torrent['length']
+            r['leecher'] = torrent['num_leechers']
+            r['seeder'] = torrent['num_seeders']
+            # Arno: TODO: sending category doesn't make sense as that's user-defined
+            # leaving it now because of time constraints
+            r['category'] = torrent['category']
+            if selversion >= OLPROTO_VER_NINETH:
+                r['torrent_size'] = getsize(join(self.torrent_dir, torrent['torrent_file_name']))
+            if selversion >= OLPROTO_VER_ELEVENTH:
+                r['channel_permid'] = torrent['channel_permid']
+                # Arno, 2010-01-28: name DB record contains the Unicode object
+                r['channel_name'] = torrent['channel_name'].encode("UTF-8")
+            if selversion >= OLPROTO_VER_TWELFTH and 'metadata' in torrent:
+                if DEBUG:
+                    print >>sys.stderr,"rqmh: create_query_reply: Adding torrent file"
+                r['metatype'] = torrent['metatype']
+                r['metadata'] = torrent['metadata']
+                
+            d2[torrent['infohash']] = r
+        d['a'] = d2
+        return bencode(d)
+
+    def create_channel_query_reply(self,id,hits,selversion):
+        d = {}
+        d['id'] = id
+        
+        # 14-04-2010, Andrea: sometimes apperently trivial queries like 'a' can produce
+        # enormouse amounts of hit that will keep the receiver busy in processing them.
+        # I made an "hack" in 'gotMessage" in ChannelSearchGridManager that drops results
+        # when they are more then a threshold. At this point is better to limit the results
+        # from the source to use less network bandwidth
+        hitslen = len(hits)
+        if hitslen > self.max_channel_query_results:
+            if DEBUG:
+                print >> sys.stderr, "Too many results for query (%d). Dropping to %d." % \
+                    (hitslen,self.max_channel_query_results) 
+            hits = hits[:self.max_channel_query_results] #hits are ordered by timestampe descending
+       
+        # 09-04-2010 Andrea: this code was exactly a duplicate copy of some
+        # code in channelcast module. Refactoring performed
+#        d2 = {}
+#        for hit in hits:
+#            r = {}
+#            r['publisher_id'] = str(hit[0]) # ARNOUNICODE: must be str
+#            r['publisher_name'] = hit[1].encode("UTF-8")  # ARNOUNICODE: must be explicitly UTF-8 encoded
+#            r['infohash'] = str(hit[2])     # ARNOUNICODE: must be str
+#            r['torrenthash'] = str(hit[3])  # ARNOUNICODE: must be str
+#            r['torrentname'] = hit[4].encode("UTF-8") # ARNOUNICODE: must be explicitly UTF-8 encoded
+#            r['time_stamp'] = int(hit[5])
+#            # hit[6]: signature, which is unique for any torrent published by a user
+#            signature = hit[6]
+#            d2[signature] = r
+        if self.bc_fac.channelcast_core is not None:
+            d2 = self.bc_fac.channelcast_core.buildChannelcastMessageFromHits(hits,selversion,fromQuery=True)
+            d['a'] = d2
+        else:
+            d['a'] = {}
+        return bencode(d)
+    
+    #
+    # Receive query reply
+    #
+    def recv_query_reply(self,permid,message,selversion):
+        
+        #print "****** recv query reply", len(message)
+        
+        if selversion < OLPROTO_VER_SIXTH:
+            return False
+        
+        #if len(message) > MAX_QUERY_REPLY_LEN:
+        #    return True    # don't close
+
+        # Unpack
+        try:
+            d = bdecode(message[1:])
+        except:
+            if DEBUG:
+                print >>sys.stderr,"rquery: Cannot bdecode QUERY_REPLY message", selversion
+            return False
+        
+        if not isValidQueryReply(d,selversion):
+            if DEBUG:
+                print >>sys.stderr,"rquery: not valid QUERY_REPLY message", selversion
+            return False
+
+        
+        # Check auth
+        queryrec = self.is_registered_query_id(d['id'])
+        if not queryrec:
+            if DEBUG:
+                print >>sys.stderr,"rquery: QUERY_REPLY has unknown query ID", selversion
+            return False
+
+        if selversion >= OLPROTO_VER_TWELFTH:
+            if queryrec['query'].startswith('SIMPLE+METADATA'):
+                for infohash,torrentrec in d['a'].iteritems():
+                    if not 'metatype' in torrentrec:
+                        if DEBUG:
+                            print >>sys.stderr,"rquery: QUERY_REPLY has no metatype field", selversion
+                        return False
+
+                    if not 'metadata' in torrentrec:
+                        if DEBUG:
+                            print >>sys.stderr,"rquery: QUERY_REPLY has no metadata field", selversion
+                        return False
+                    if torrentrec['torrent_size'] != len(torrentrec['metadata']):
+                        if DEBUG:
+                            print >>sys.stderr,"rquery: QUERY_REPLY torrent_size != len metadata", selversion
+                        return False
+                    try:
+                        # Validity test
+                        if torrentrec['metatype'] == URL_MIME_TYPE:
+                            tdef = TorrentDef.load_from_url(torrentrec['metadata'])
+                        else:
+                            metainfo = bdecode(torrentrec['metadata'])
+                            tdef = TorrentDef.load_from_dict(metainfo)
+                    except:
+                        if DEBUG:
+                            print_exc()
+                        return False
+                        
+
+        # Process
+        self.process_query_reply(permid,queryrec['query'],queryrec['usercallback'],d)
+        return True
+
+
+    def process_query_reply(self,permid,query,usercallback,d):
+        
+        if DEBUG:
+            print >>sys.stderr,"rquery: process_query_reply:",show_permid_short(permid),query,d
+        
+        if len(d['a']) > 0:
+            self.unidecode_hits(query,d)
+            if query.startswith("CHANNEL"):
+                # 13-04-2010 Andrea: The gotRemoteHits in SearchGridManager is too slow.
+                # Since it is run by the GUIThread when there are too many hits the GUI
+                # gets freezed.
+                # dropping some random results if they are too many. 
+                # It is just an hack, a better method to improve performance should be found.
+                
+                if len(d['a']) > self.max_channel_query_results:
+                    if DEBUG:
+                        print >> sys.stderr, "DROPPING some answers: they where %d" % len(d['a'])
+                    newAnswers = {}
+                    newKeys = d['a'].keys()[:self.max_channel_query_results] 
+                    for key in newKeys:
+                        newAnswers[key] = d['a'][key]
+                    d['a'] = newAnswers
+                    
+                # Andrea 05-06-2010: updates the database through channelcast. Before this was
+                # done by the GUIThread in SearchGridManager    
+                self.bc_fac.channelcast_core.updateChannel(permid,query,d['a'])
+
+                # Inform user of remote channel hits
+                remote_query_usercallback_lambda = lambda:usercallback(permid,query,d['a'])
+            else:
+                remote_query_usercallback_lambda = lambda:usercallback(permid,query,d['a'])
+            
+            self.session.uch.perform_usercallback(remote_query_usercallback_lambda)
+        elif DEBUG:
+            print >>sys.stderr,"rquery: QUERY_REPLY: no results found"
+
+
+    def unidecode_hits(self,query,d):
+        if query.startswith("SIMPLE"):
+            for infohash,r in d['a'].iteritems():
+                r['content_name'] = r['content_name'].decode("UTF-8")
+        elif query.startswith("CHANNEL"):
+            for signature,r in d['a'].iteritems():
+                r['publisher_name'] = r['publisher_name'].decode("UTF-8")
+                r['torrentname'] = r['publisher_name'].decode("UTF-8")
+            
+
+    def inc_peer_nqueries(self, permid):
+        peer = self.peer_db.getPeer(permid)
+        try:
+            if peer is not None:
+                nqueries = peer['num_queries']
+                if nqueries is None:
+                    nqueries = 0
+                self.peer_db.updatePeer(permid, num_queries=nqueries+1)
+        except:
+            print_exc()
+
+    def get_peer_nqueries(self, permid):
+        peer = self.peer_db.getPeer(permid)
+        if peer is None:
+            return 0
+        else:
+            return peer['num_queries']
+
+
+    def search_torrents(self,kws,maxhits=None,sendtorrents=False):
+        
+        if DEBUG:
+            print >>sys.stderr,"rquery: search for torrents matching",`kws`
+        
+        allhits = self.torrent_db.searchNames(kws,local=False)
+        if maxhits is None:
+            hits = allhits
+        else:
+            hits = allhits[:maxhits]
+            
+        colltorrdir = self.session.get_torrent_collecting_dir()
+        if sendtorrents:
+            
+            print >>sys.stderr,"rqmh: search_torrents: adding torrents"
+            for hit in hits:
+                filename = os.path.join(colltorrdir,hit['torrent_file_name'])
+                try:
+                    tdef = TorrentDef.load(filename)
+                    if tdef.get_url_compat():
+                        metatype = URL_MIME_TYPE
+                        metadata = tdef.get_url()
+                    else:
+                        metatype = TSTREAM_MIME_TYPE
+                        metadata = bencode(tdef.get_metainfo())
+                except:
+                    print_exc()
+                    metadata = None
+                hit['metatype'] = metatype
+                hit['metadata'] = metadata
+                
+            # Filter out hits for which we could not read torrent file (rare)
+            newhits = []
+            for hit in hits:
+                if hit['metadata'] is not None:
+                    newhits.append(hit)
+            hits = newhits
+            
+        return hits
+
+
+def isValidQuery(d,selversion):
+    if not isinstance(d,dict):
+        if DEBUG:
+            print >> sys.stderr, "rqmh: not dict"
+        return False
+    if not ('q' in d and 'id' in d):
+        if DEBUG:
+            print >> sys.stderr, "rqmh: some keys are missing", d.keys()
+        return False
+    if not (isinstance(d['q'],str) and isinstance(d['id'],str)):
+        if DEBUG:
+            print >> sys.stderr, "rqmh: d['q'] or d['id'] are not of string format", d['q'], d['id']
+        return False
+    if len(d['q']) == 0:
+        if DEBUG:
+            print >> sys.stderr, "rqmh: len(d['q']) == 0"
+        return False
+    if selversion < OLPROTO_VER_TWELFTH and d['q'].startswith('SIMPLE+METADATA'):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: SIMPLE+METADATA but old olversion",`d['q']`
+        return False
+    idx = d['q'].find(' ')
+    if idx == -1:
+        if DEBUG:
+            print >>sys.stderr,"rqmh: no space in q",`d['q']`
+        return False
+    try:
+        keyws = d['q'][idx+1:]
+        ukeyws = keyws.decode("UTF-8").strip().split()
+        for ukeyw in ukeyws:
+            if not ukeyw.isalnum():
+                # Arno, 2010-02-09: Allow for BASE64-encoded permid in CHANNEL queries
+                rep = ukeyw.replace("+","p").replace("/","s")
+                if not rep.isalnum():
+                    if DEBUG:
+                        print >>sys.stderr,"rqmh: not alnum",`ukeyw`
+                    return False
+    except:
+        print_exc()
+        if DEBUG:
+            print >>sys.stderr,"rqmh: not alnum query",`d['q']`
+        return False
+    if len(d) > 2: # no other keys
+        if DEBUG:
+            print >> sys.stderr, "rqmh: d has more than 2 keys"
+        return False
+    return True
+
+
+def isValidQueryReply(d,selversion):
+    if not isinstance(d,dict):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: not dict"
+        return False
+    if not ('a' in d and 'id' in d):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: a or id key missing"
+        return False
+    if not (isinstance(d['a'],dict) and isinstance(d['id'],str)):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: a or id key not dict/str"
+        return False
+    if not isValidHits(d['a'],selversion):
+        return False
+    if len(d) > 2: # no other keys
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: too many keys, got",d.keys()
+        return False
+    return True
+
+def isValidHits(d,selversion):
+    if not isinstance(d,dict):
+        return False
+    ls = d.values()
+    if len(ls)>0:
+        l = ls[0]
+        if 'publisher_id' in l: # channel search result
+            if not validChannelCastMsg(d):
+                return False
+        elif 'content_name' in l: # remote search
+            for key in d.keys():
+        #        if len(key) != 20:
+        #            return False
+                val = d[key]
+                if not isValidRemoteVal(val,selversion):
+                    return False
+    return True
+
+def isValidChannelVal(d, selversion):
+    if not isinstance(d,dict):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: torrentrec: value not dict"
+        return False
+    if not ('publisher_id' in d and 'publisher_name' in d and 'infohash' in d and 'torrenthash' in d and 'torrentname' in d and 'time_stamp' in d):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: a: key missing, got",d.keys()
+        return False
+    return True
+
+def isValidRemoteVal(d,selversion):
+    if not isinstance(d,dict):
+        if DEBUG:
+            print >>sys.stderr,"rqmh: reply: a: value not dict"
+        return False
+    if selversion >= OLPROTO_VER_TWELFTH:
+        if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d and 'torrent_size' in d and 'channel_permid' in d and 'channel_name' in d):
+            if DEBUG:
+                print >>sys.stderr,"rqmh: reply: torrentrec12: key missing, got",d.keys()
+            return False
+        if 'metatype' in d and 'metadata' in d:
+            try:
+                metatype = d['metatype']
+                metadata = d['metadata']
+                if metatype == URL_MIME_TYPE:
+                    tdef = TorrentDef.load_from_url(metadata)
+                else:
+                    metainfo = bdecode(metadata)
+                    tdef = TorrentDef.load_from_dict(metainfo)
+            except:
+                if DEBUG:
+                    print >>sys.stderr,"rqmh: reply: torrentrec12: metadata invalid"
+                    print_exc()
+                return False
+
+    elif selversion >= OLPROTO_VER_ELEVENTH:
+        if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d and 'torrent_size' in d and 'channel_permid' in d and 'channel_name' in d):
+            if DEBUG:
+                print >>sys.stderr,"rqmh: reply: torrentrec11: key missing, got",d.keys()
+            return False
+        
+    elif selversion >= OLPROTO_VER_NINETH:
+        if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d and 'torrent_size' in d):
+            if DEBUG:
+                print >>sys.stderr,"rqmh: reply: torrentrec9: key missing, got",d.keys()
+            return False
+    else:
+        if not ('content_name' in d and 'length' in d and 'leecher' in d and 'seeder' in d and 'category' in d):
+            if DEBUG:
+                print >>sys.stderr,"rqmh: reply: torrentrec6: key missing, got",d.keys()
+            return False
+        
+#    if not (isinstance(d['content_name'],str) and isinstance(d['length'],int) and isinstance(d['leecher'],int) and isinstance(d['seeder'],int)):
+#        return False
+#    if len(d) > 4: # no other keys
+#        return False
+    return True
+
+        
diff --git a/instrumentation/next-share/BaseLib/Core/SocialNetwork/RemoteTorrentHandler.py b/instrumentation/next-share/BaseLib/Core/SocialNetwork/RemoteTorrentHandler.py
new file mode 100644 (file)
index 0000000..5fbb053
--- /dev/null
@@ -0,0 +1,80 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# Handles the case where the user did a remote query and now selected one of the
+# returned torrents for download. 
+#
+
+import sys
+
+from BaseLib.Core.simpledefs import INFOHASH_LENGTH
+from BaseLib.Core.CacheDB.CacheDBHandler import TorrentDBHandler
+
+DEBUG = False
+
+class RemoteTorrentHandler:
+    
+    __single = None
+    
+    def __init__(self):
+        if RemoteTorrentHandler.__single:
+            raise RuntimeError, "RemoteTorrentHandler is singleton"
+        RemoteTorrentHandler.__single = self
+        self.torrent_db = TorrentDBHandler.getInstance()
+        self.requestedtorrents = {}
+
+    def getInstance(*args, **kw):
+        if RemoteTorrentHandler.__single is None:
+            RemoteTorrentHandler(*args, **kw)
+        return RemoteTorrentHandler.__single
+    getInstance = staticmethod(getInstance)
+
+
+    def register(self,overlay_bridge,metadatahandler,session):
+        self.overlay_bridge = overlay_bridge
+        self.metadatahandler = metadatahandler
+        self.session = session
+    
+    def download_torrent(self,permid,infohash,usercallback):
+        """ The user has selected a torrent referred to by a peer in a query 
+        reply. Try to obtain the actual .torrent file from the peer and then 
+        start the actual download. 
+        """
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        # Called by GUI thread 
+
+        olthread_remote_torrent_download_lambda = lambda:self.olthread_download_torrent_callback(permid,infohash,usercallback)
+        self.overlay_bridge.add_task(olthread_remote_torrent_download_lambda,0)
+        
+    def olthread_download_torrent_callback(self,permid,infohash,usercallback):
+        """ Called by overlay thread """
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+    
+        #if infohash in self.requestedtorrents:
+        #    return    # TODO RS:the previous request could have failed
+              
+        self.requestedtorrents[infohash] = usercallback
+        
+        self.metadatahandler.send_metadata_request(permid,infohash,caller="rquery")
+        if DEBUG:
+            print >>sys.stderr,'rtorrent: download: Requested torrent: %s' % `infohash`
+       
+    def metadatahandler_got_torrent(self,infohash,metadata,filename):
+        """ Called by MetadataHandler when the requested torrent comes in """
+        assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+
+        #Called by overlay thread
+
+        if DEBUG:
+            print >>sys.stderr,"rtorrent: got requested torrent from peer, wanted", infohash in self.requestedtorrents, `self.requestedtorrents`
+        if infohash not in self.requestedtorrents:
+           return
+
+        usercallback = self.requestedtorrents[infohash]
+        del self.requestedtorrents[infohash]
+        
+        remote_torrent_usercallback_lambda = lambda:usercallback(infohash,metadata,filename)
+        self.session.uch.perform_usercallback(remote_torrent_usercallback_lambda)
diff --git a/instrumentation/next-share/BaseLib/Core/SocialNetwork/SocialNetworkMsgHandler.py b/instrumentation/next-share/BaseLib/Core/SocialNetwork/SocialNetworkMsgHandler.py
new file mode 100644 (file)
index 0000000..02f8825
--- /dev/null
@@ -0,0 +1,79 @@
+# Written by Arno Bakker, Jie Yang
+# see LICENSE.txt for license information
+
+
+import sys
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTH
+from BaseLib.Core.SocialNetwork.OverlapMsgHandler import OverlapMsgHandler
+
+DEBUG = False
+
+class SocialNetworkMsgHandler:
+    
+    __single = None
+    
+    def __init__(self):
+        if SocialNetworkMsgHandler.__single:
+            raise RuntimeError, "SocialNetworkMsgHandler is singleton"
+        SocialNetworkMsgHandler.__single = self
+        self.overlap = OverlapMsgHandler()
+
+    def getInstance(*args, **kw):
+        if SocialNetworkMsgHandler.__single is None:
+            SocialNetworkMsgHandler(*args, **kw)
+        return SocialNetworkMsgHandler.__single
+    getInstance = staticmethod(getInstance)
+        
+
+    def register(self,overlay_bridge,launchmany,config):
+        if DEBUG:
+            print >> sys.stderr,"socnet: register"
+        self.overlay_bridge = overlay_bridge
+        self.config = config
+        self.overlap.register(overlay_bridge,launchmany)
+
+    #
+    # Incoming messages
+    # 
+    def handleMessage(self,permid,selversion,message):
+        
+        t = message[0]
+        if t == SOCIAL_OVERLAP:
+            if DEBUG:
+                print >> sys.stderr,"socnet: Got SOCIAL_OVERLAP",len(message)
+            if self.config['superpeer']:
+                if DEBUG:
+                    print >> sys.stderr,"socnet: overlap: Ignoring, we are superpeer"
+                return True
+            else:
+                return self.overlap.recv_overlap(permid,message,selversion)
+
+        else:
+            if DEBUG:
+                print >> sys.stderr,"socnet: UNKNOWN OVERLAY MESSAGE", ord(t)
+            return False
+
+    #
+    # Incoming connections
+    #
+    def handleConnection(self,exc,permid,selversion,locally_initiated):
+        
+        if DEBUG:
+            print >> sys.stderr,"socnet: handleConnection",exc,"v",selversion,"local",locally_initiated
+        if exc is not None:
+            return
+        
+        if selversion < OLPROTO_VER_FIFTH:
+            return True
+
+        if self.config['superpeer']:
+            if DEBUG:
+                print >> sys.stderr,"socnet: overlap: Ignoring connection, we are superpeer"
+            return True
+
+        self.overlap.initiate_overlap(permid,locally_initiated)
+        return True
+
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/SocialNetwork/__init__.py b/instrumentation/next-share/BaseLib/Core/SocialNetwork/__init__.py
new file mode 100644 (file)
index 0000000..86ac17b
--- /dev/null
@@ -0,0 +1,3 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/ChannelCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/ChannelCrawler.py
new file mode 100644 (file)
index 0000000..4be1183
--- /dev/null
@@ -0,0 +1,112 @@
+# Written by Boudewijn Schoon\r
+# see LICENSE.txt for license information\r
+\r
+import sys\r
+import cPickle\r
+from time import strftime\r
+\r
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_THIRTEENTH\r
+# OLPROTO_VER_SEVENTH --> Sixth public release, >= 4.5.0, supports CRAWLER_REQUEST and CRAWLER_REPLY messages\r
+# OLPROTO_VER_EIGHTH  --> Seventh public release, >= 5.0, supporting BuddyCast with clicklog info.\r
+\r
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_CHANNEL_QUERY\r
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB\r
+from BaseLib.Core.Utilities.utilities import show_permid, show_permid_short\r
+from BaseLib.Core.Statistics.Crawler import Crawler\r
+\r
+DEBUG = False\r
+\r
+class ChannelCrawler:\r
+    __single = None\r
+\r
+    @classmethod\r
+    def get_instance(cls, *args, **kargs):\r
+        if not cls.__single:\r
+            cls.__single = cls(*args, **kargs)\r
+        return cls.__single\r
+\r
+    def __init__(self):\r
+        self._sqlite_cache_db = SQLiteCacheDB.getInstance()\r
+\r
+        crawler = Crawler.get_instance()\r
+        if crawler.am_crawler():\r
+            self._file = open("channelcrawler.txt", "a")\r
+            self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n")))\r
+            self._file.flush()\r
+        else:\r
+            self._file = None\r
+\r
+    def query_initiator(self, permid, selversion, request_callback):\r
+        """\r
+        Established a new connection. Send a CRAWLER_CHANNEL_QUERY request.\r
+        @param permid The Tribler peer permid\r
+        @param selversion The oberlay protocol version\r
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)\r
+        """\r
+        if DEBUG: print >>sys.stderr, "channelcrawler: query_initiator", show_permid_short(permid)\r
+        sql = []\r
+        if selversion >= OLPROTO_VER_THIRTEENTH:\r
+            sql.extend(("SELECT 'channel_files', publisher_id, count(*) FROM ChannelCast group by publisher_id",\r
+                        "SELECT 'my_votes', mod_id, voter_id, vote, time_stamp FROM VoteCast where voter_id='" + show_permid(permid) + "' order by time_stamp desc limit 100",\r
+                        "SELECT 'other_votes', mod_id, voter_id, vote, time_stamp FROM VoteCast where voter_id<>'" + show_permid(permid) + "' order by time_stamp desc limit 100"))\r
+\r
+        request_callback(CRAWLER_CHANNEL_QUERY, ";".join(sql), callback=self._after_request_callback)\r
+\r
+    def _after_request_callback(self, exc, permid):\r
+        """\r
+        Called by the Crawler with the result of the request_callback\r
+        call in the query_initiator method.\r
+        """\r
+        if not exc:\r
+            if DEBUG: print >>sys.stderr, "channelcrawler: request send to", show_permid_short(permid)\r
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "REQUEST", show_permid(permid), "\n")))\r
+            self._file.flush()\r
+\r
+    def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):\r
+        """\r
+        Received a CRAWLER_CHANNEL_QUERY request.\r
+        @param permid The Crawler permid\r
+        @param selversion The overlay protocol version\r
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair\r
+        @param message The message payload\r
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])\r
+        """\r
+        if DEBUG:\r
+            print >> sys.stderr, "channelcrawler: handle_crawler_request", show_permid_short(permid), message\r
+\r
+        # execute the sql\r
+        try:\r
+            cursor = self._sqlite_cache_db.execute_read(message)\r
+\r
+        except Exception, e:\r
+            reply_callback(str(e), error=1)\r
+        else:\r
+            if cursor:\r
+                reply_callback(cPickle.dumps(list(cursor), 2))\r
+            else:\r
+                reply_callback("error", error=2)\r
+\r
+    def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):\r
+        """\r
+        Received a CRAWLER_CHANNEL_QUERY reply.\r
+        @param permid The Crawler permid\r
+        @param selversion The overlay protocol version\r
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair\r
+        @param error The error value. 0 indicates success.\r
+        @param message The message payload\r
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)\r
+        """\r
+        if error:\r
+            if DEBUG:\r
+                print >> sys.stderr, "channelcrawler: handle_crawler_reply", error, message\r
+\r
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  REPLY", show_permid(permid), str(error), message, "\n")))\r
+            self._file.flush()\r
+\r
+        else:\r
+            if DEBUG:\r
+                print >> sys.stderr, "channelcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message)\r
+\r
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  REPLY", show_permid(permid), str(error), str(cPickle.loads(message)), "\n")))\r
+            self._file.flush()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/Crawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/Crawler.py
new file mode 100644 (file)
index 0000000..cf3458f
--- /dev/null
@@ -0,0 +1,570 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+# todo
+# - try to connect first, than start the initiator. now we start the
+#   initiator and we often fail to connect
+
+import sys
+import time
+import random
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_REQUEST, CRAWLER_REPLY, getMessageName
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler
+from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH
+from BaseLib.Core.Utilities.utilities import show_permid_short
+
+DEBUG = False
+
+# when a message payload exceedes 32KB it is divided into multiple
+# messages
+MAX_PAYLOAD_LENGTH = 32 * 1024
+
+# after 1 hour the channels for any outstanding CRAWLER_REQUEST
+# messages will be closed
+CHANNEL_TIMEOUT = 60 * 60
+
+# the FREQUENCY_FLEXIBILITY tels the client how strict it must adhere
+# to the frequency. the value indicates how many seconds a request
+# will be allowed before the actual frequency deadline
+FREQUENCY_FLEXIBILITY = 5
+
+# Do not attempt to re-initiate communication after more than x
+# connection failures
+MAX_ALLOWED_FAILURES = 26
+
+class Crawler:
+    __singleton = None
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        if not cls.__singleton:
+            cls.__singleton = cls(*args, **kargs)
+        return cls.__singleton
+
+    def __init__(self, session):
+        if self.__singleton:
+            raise RuntimeError, "Crawler is Singleton"
+        self._overlay_bridge = OverlayThreadingBridge.getInstance()
+        self._session = session
+        self._crawler_db = CrawlerDBHandler.getInstance()
+
+        # _message_handlers contains message-id:(request-callback, reply-callback, last-request-timestamp)
+        # the handlers are called when either a CRAWL_REQUEST or CRAWL_REPLY message is received
+        self._message_handlers = {}
+
+        # _crawl_initiators is a list with (initiator-callback,
+        # frequency, accept_frequency) tuples the initiators are called
+        # when a new connection is received
+        self._crawl_initiators = []
+
+        # _initiator_dealines contains [deadline, frequency,
+        # accept_frequency, initiator-callback, permid, selversion,
+        # failure-counter] deadlines register information on when to
+        # call the crawl initiators again for a specific permid
+        self._initiator_deadlines = []
+        
+        # _dialback_deadlines contains message_id:(deadline, permid) pairs
+        # client peers should connect back to -a- crawler indicated by
+        # permid after deadline expired
+        self._dialback_deadlines = {}
+
+        # _channels contains permid:buffer-dict pairs. Where
+        # buffer_dict contains channel-id:(timestamp, buffer,
+        # channel_data) pairs. Where buffer is the payload from
+        # multipart messages that are received so far. Channels are
+        # used to match outstanding replies to given requests
+        self._channels = {}
+
+        # start checking for expired deadlines
+        self._check_deadlines(True)
+
+        # start checking for ancient channels
+        self._check_channels()
+
+    def register_crawl_initiator(self, initiator_callback, frequency=3600, accept_frequency=None):
+        """
+        Register a callback that is called each time a new connection
+        is made and subsequently each FREQUENCY seconds.
+
+        ACCEPT_FREQUENCY defaults to FREQUENCY and indicates the
+        minimum seconds that must expire before a crawler request
+        message is accepted.
+
+        Giving FREQUENCY = 10 and ACCEPT_FREQUENCY = 0 will call
+        INITIATOR_CALLBACK every 10 seconds and will let the receiving
+        peers accept allways.
+
+        Giving FREQUENCY = 10 and ACCEPT_FREQUENCY = 20 will call
+        INITIATOR_CALLBACK every 10 seconds and will cause frequency
+        errors 50% of the time.
+        """
+        if accept_frequency is None:
+            accept_frequency = frequency
+        self._crawl_initiators.append((initiator_callback, frequency, accept_frequency))
+
+    def register_message_handler(self, id_, request_callback, reply_callback):
+        self._message_handlers[id_] = (request_callback, reply_callback, 0)
+
+    def am_crawler(self):
+        """
+        Returns True if this running Tribler is a Crawler
+        """
+        return self._session.get_permid() in self._crawler_db.getCrawlers()
+
+    def _acquire_channel_id(self, permid, channel_data):
+        """
+        Claim a unique one-byte id to match a request to a reply.
+
+        PERMID the peer to communicate with
+        CHANNEL_DATA optional data associated with this channel
+        """
+        if permid in self._channels:
+            channels = self._channels[permid]
+        else:
+            channels = {}
+            self._channels[permid] = channels
+
+        # find a free channel-id randomly
+        channel_id = random.randint(1, 255)
+        attempt = 0
+        while channel_id in channels:
+            attempt += 1
+            if attempt > 64:
+                channel_id = 0
+                break
+            channel_id = random.randint(1, 255)
+
+        if channel_id == 0:
+            # find a free channel-id sequentialy
+            channel_id = 255
+            while channel_id in channels and channel_id != 0:
+                channel_id -= 1
+
+        if channel_id:
+            # create a buffer to receive the reply
+            channels[channel_id] = [time.time() + CHANNEL_TIMEOUT, "", channel_data]
+
+        # print >>sys.stderr, "crawler: _acquire_channel_id:", show_permid_short(permid), len(channels), "channels used"
+
+        # a valid channel-id or 0 when no channel-id is left
+        return channel_id
+
+    def _release_channel_id(self, permid, channel_id):
+        if permid in self._channels:
+            if channel_id in self._channels[permid]:
+                del self._channels[permid][channel_id]
+            if not self._channels[permid]:
+                del self._channels[permid]
+
+    def _post_connection_attempt(self, permid, success):
+        """
+        This method is called after a succesfull or failed connection
+        attempt
+        """
+        if success:
+            # reset all failure counters for this permid
+            for tup in (tup for tup in self._initiator_deadlines if tup[4] == permid):
+                tup[6] = 0
+
+        else:
+            def increase_failure_counter(tup):
+                if tup[4] == permid:
+                    if tup[6] > MAX_ALLOWED_FAILURES:
+                        # remove from self._initiator_deadlines
+                        return False
+                    else:
+                        # increase counter but leave in self._initiator_deadlines
+                        tup[6] += 1
+                        return True
+                else:
+                    return True
+
+            self._initiator_deadlines = filter(increase_failure_counter, self._initiator_deadlines)
+
+    def send_request(self, permid, message_id, payload, frequency=3600, callback=None, channel_data=None):
+        """
+        This method ensures that a connection to PERMID exists before
+        sending the message
+
+        Returns the channel-id.
+
+        MESSAGE_ID is a one character crawler specific ID (defined in MessageID.py).
+        PAYLOAD is message specific sting.
+        FREQUENCY is an integer defining the time, in seconds, until a next message with MESSAGE_ID is accepted by the client-side crawler.
+        CALLBACK is either None or callable. Called with parameters EXC and PERMID. EXC is None for success or an Exception for failure.
+        CHANNEL_DATA can be anything related to this specific request. It is supplied with the handle-reply callback.
+        """
+        # reserve a new channel-id
+        channel_id = self._acquire_channel_id(permid, channel_data)
+
+        def _after_connect(exc, dns, permid, selversion):
+            self._post_connection_attempt(permid, not exc)
+            if exc:
+                # could not connect.
+                if DEBUG: print >>sys.stderr, "crawler: could not connect", dns, show_permid_short(permid), exc
+                self._release_channel_id(permid, channel_id)
+                if callback:
+                    callback(exc, permid)
+            else:
+                self._send_request(permid, message_id, channel_id, payload, frequency=frequency, callback=callback)
+
+#         if DEBUG: print >>sys.stderr, "crawler: connecting (send_request)...", show_permid_short(permid)
+        if channel_id == 0:
+            if DEBUG: print >>sys.stderr, "crawler: send_request: Can not acquire channel-id", show_permid_short(permid)
+        else:
+            self._overlay_bridge.connect(permid, _after_connect)
+        return channel_id
+
+    def _send_request(self, permid, message_id, channel_id, payload, frequency=3600, callback=None):
+        """
+        Send a CRAWLER_REQUEST message to permid. This method assumes
+        that connection exists to the permid.
+
+        @param permid The destination peer
+        @param message_id The message id
+        @param payload The message content
+        @param frequency Destination peer will return a frequency-error when this message_id has been received within the last frequency seconds
+        @param callback Callable function/method is called when request is send with 2 paramaters (exc, permid)
+        @return The message channel-id > 0 on success, and 0 on failure
+        """
+        # Sending a request from a Crawler to a Tribler peer
+        #     SIZE    INDEX
+        #     1 byte: 0      CRAWLER_REQUEST (from BaseLib.Core.BitTornado.BT1.MessageID)
+        #     1 byte: 1      --MESSAGE-SPECIFIC-ID--
+        #     1 byte: 2      Channel id
+        #     2 byte: 3+4    Frequency
+        #     n byte: 5...   Request payload
+        def _after_send_request(exc, permid):
+            if DEBUG:
+                if exc:
+                    print >> sys.stderr, "crawler: could not send request to", show_permid_short(permid), exc
+            if exc:
+                self._release_channel_id(permid, channel_id)
+
+            # call the optional callback supplied with send_request
+            if callback:
+                callback(exc, permid)
+
+        if DEBUG: print >> sys.stderr, "crawler: sending", getMessageName(CRAWLER_REQUEST+message_id), "with", len(payload), "bytes payload to", show_permid_short(permid)
+        self._overlay_bridge.send(permid, "".join((CRAWLER_REQUEST,
+                                                   message_id,
+                                                   chr(channel_id & 0xFF),
+                                                   chr((frequency >> 8) & 0xFF) + chr(frequency & 0xFF),
+                                                   str(payload))), _after_send_request)
+        return channel_id
+
+    def handle_request(self, permid, selversion, message):
+        """
+        Received CRAWLER_REQUEST message from OverlayApps
+        """
+        if selversion >= OLPROTO_VER_SEVENTH and len(message) >= 5:
+
+            message_id = message[1]
+            channel_id = ord(message[2])
+            frequency = ord(message[3]) << 8 | ord(message[4])
+
+            if message_id in self._message_handlers:
+                now = time.time()
+                request_callback, reply_callback, last_request_timestamp = self._message_handlers[message_id]
+
+                # frequency: we will report a requency error when we have
+                # received this request within FREQUENCY seconds
+                if last_request_timestamp + frequency < now + FREQUENCY_FLEXIBILITY:
+
+                    if not permid in self._channels:
+                        self._channels[permid] = {}
+                    self._channels[permid][channel_id] = [time.time() + CHANNEL_TIMEOUT, "", None]
+
+                    # store the new timestamp
+                    self._message_handlers[message_id] = (request_callback, reply_callback, now)
+
+                    def send_reply_helper(payload="", error=0, callback=None):
+                        return self.send_reply(permid, message_id, channel_id, payload, error=error, callback=callback)
+
+                    # 20/10/08. Boudewijn: We will no longer disconnect
+                    # based on the return value from the message handler
+                    try:
+                        request_callback(permid, selversion, channel_id, message[5:], send_reply_helper)
+                    except:
+                        print_exc()
+
+                    # 11/11/08. Boudewijn: Because the client peers may
+                    # not always be connectable, the client peers will
+                    # actively seek to connect to -a- crawler after
+                    # frequency expires. 
+                    self._dialback_deadlines[message_id] = (now + frequency, permid)
+
+                    return True
+
+                else:
+                    # frequency error
+                    self.send_reply(permid, message_id, channel_id, "frequency error", error=254)
+                    return True
+            else:
+                # invalid / unknown message. may be caused by a
+                # crawler sending newly introduced messages
+                self.send_reply(permid, message_id, channel_id, "unknown message", error=253)
+                return True
+        else:
+            # protocol version conflict or invalid message
+            return False
+
+    def send_reply(self, permid, message_id, channel_id, payload, error=0, callback=None):
+        """
+        This method ensures that a connection to PERMID exists before sending the message
+        """
+        def _after_connect(exc, dns, permid, selversion):
+            self._post_connection_attempt(permid, not exc)
+            if exc:
+                # could not connect.
+                if DEBUG: print >>sys.stderr, "crawler: could not connect", dns, show_permid_short(permid), exc
+                if callback:
+                    callback(exc, permid)
+            else:
+                self._send_reply(permid, message_id, channel_id, payload, error=error, callback=callback)
+
+#         if DEBUG: print >>sys.stderr, "crawler: connecting... (send_reply)", show_permid_short(permid)
+        self._overlay_bridge.connect(permid, _after_connect)
+
+    def _send_reply(self, permid, message_id, channel_id, payload, error=0, callback=None):
+        """
+        Send a CRAWLER_REPLY message to permid. This method assumes
+        that connection exists to the permid.
+        
+        @param permid The destination peer
+        @param message_id The message id
+        @param channel_id The channel id. Used to match replies to requests
+        @param payload The message content
+        @param error The error code. (0: no-error, 253: unknown-message, 254: frequency-error, 255: reserved)
+        @param callback Callable function/method is called when request is send with 2 paramaters (exc, permid)
+        @return The message channel-id > 0 on success, and 0 on failure
+        """
+        # Sending a reply from a Tribler peer to a Crawler
+        #     SIZE    INDEX
+        #     1 byte: 0      CRAWLER_REPLY (from BaseLib.Core.BitTornado.BT1.MessageID)
+        #     1 byte: 1      --MESSAGE-SPECIFIC-ID--
+        #     1 byte: 2      Channel id
+        #     1 byte: 3      Parts left
+        #     1 byte: 4      Indicating success (0) or failure (non 0)
+        #     n byte: 5...   Reply payload
+        if len(payload) > MAX_PAYLOAD_LENGTH:
+            remaining_payload = payload[MAX_PAYLOAD_LENGTH:]
+
+            def _after_send_reply(exc, permid):
+                """
+                Called after the overlay attempted to send a reply message
+                """
+                if DEBUG:
+                    print >> sys.stderr, "crawler: _after_send_reply", show_permid_short(permid), exc
+                if not exc:
+                    self.send_reply(permid, message_id, channel_id, remaining_payload, error=error)
+                # call the optional callback supplied with send_request
+                if callback:
+                    callback(exc, permid)
+
+            # 03/06/09 boudewijn: parts_left may be no larger than 255
+            # because we only use one byte to store the 'parts
+            # left'. This does not mean that there can't be more than
+            # 255 parts!
+            parts_left = min(255, int(len(payload) / MAX_PAYLOAD_LENGTH))
+            payload = payload[:MAX_PAYLOAD_LENGTH]
+
+        else:
+            def _after_send_reply(exc, permid):
+                if DEBUG:
+                    if exc:
+                        print >> sys.stderr, "crawler: could not send request", show_permid_short(permid), exc
+                # call the optional callback supplied with send_request
+                if callback:
+                    callback(exc, permid)
+
+            parts_left = 0
+
+            # remove from self._channels if it is still there (could
+            # have been remove during periodic timeout check)
+            if permid in self._channels and channel_id in self._channels[permid]:
+                del self._channels[permid][channel_id]
+                if not self._channels[permid]:
+                    del self._channels[permid]
+
+        if DEBUG: print >> sys.stderr, "crawler: sending", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload to", show_permid_short(permid)
+        self._overlay_bridge.send(permid, "".join((CRAWLER_REPLY,
+                                                   message_id,
+                                                   chr(channel_id & 0xFF),
+                                                   chr(parts_left & 0xFF),
+                                                   chr(error & 0xFF),
+                                                   str(payload))), _after_send_reply)
+        return channel_id
+
+    def handle_reply(self, permid, selversion, message):
+        """
+        Received CRAWLER_REPLY message from OverlayApps
+        """
+        if selversion >= OLPROTO_VER_SEVENTH and len(message) >= 5 and message[1] in self._message_handlers:
+            
+            message_id = message[1]
+            channel_id = ord(message[2])
+            parts_left = ord(message[3])
+            error = ord(message[4])
+
+            # A request must exist in self._channels, otherwise we did
+            # not request this reply
+            if permid in self._channels and channel_id in self._channels[permid]:
+
+                # add part to buffer
+                self._channels[permid][channel_id][1] += message[5:]
+
+                if parts_left:
+                    # todo: register some event to remove the buffer
+                    # after a time (in case connection is lost before
+                    # all parts are received)
+
+                    if DEBUG: print >> sys.stderr, "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "with", parts_left, "parts left"
+                    # Can't do anything until all parts have been received
+                    return True
+                else:
+                    timestamp, payload, channel_data = self._channels[permid].pop(channel_id)
+                    if DEBUG:
+                        if error == 253:
+                            # unknown message error (probably because
+                            # the crawler is newer than the peer)
+                            print >> sys.stderr, "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "indicating an unknown message error"
+                        if error == 254:
+                            # frequency error (we did this request recently)
+                            print >> sys.stderr, "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes payload from", show_permid_short(permid), "indicating a frequency error"
+                        else:
+                            print >> sys.stderr, "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid)
+                    if not self._channels[permid]:
+                        del self._channels[permid]
+
+                    def send_request_helper(message_id, payload, frequency=3600, callback=None, channel_data=None):
+                        return self.send_request(permid, message_id, payload, frequency=frequency, callback=callback, channel_data=channel_data)
+
+                    # 20/10/08. Boudewijn: We will no longer
+                    # disconnect based on the return value from the
+                    # message handler
+                    try:
+                        self._message_handlers[message_id][1](permid, selversion, channel_id, channel_data, error, payload, send_request_helper)
+                    except:
+                        print_exc()
+                    return True
+            else:
+                # reply from unknown permid or channel
+                if DEBUG: print >> sys.stderr, "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(payload), "bytes payload from", show_permid_short(permid), "from unknown peer or unused channel"
+                
+        if DEBUG:
+            if len(message) >= 2:
+                message_id = message[1]
+            else:
+                message_id = ""
+            print >> sys.stderr, "crawler: received", getMessageName(CRAWLER_REPLY+message_id), "with", len(message), "bytes from", show_permid_short(permid), "from unknown peer or unused channel"
+        return False
+
+    def handle_connection(self, exc, permid, selversion, locally_initiated):
+        """
+        Called when overlay received a connection. Note that this
+        method is only registered with OverlayApps when running as a
+        crawler (determined by our public key).
+        """
+        if exc:
+            # connection lost
+            if DEBUG:
+                print >>sys.stderr, "crawler: overlay connection lost", show_permid_short(permid), exc
+                print >>sys.stderr, repr(permid)
+
+        elif selversion >= OLPROTO_VER_SEVENTH:
+            # verify that we do not already have deadlines for this permid
+            already_known = False
+            for tup in self._initiator_deadlines:
+                if tup[4] == permid:
+                    already_known = True
+                    break
+
+            if not already_known:
+                if DEBUG:
+                    print >>sys.stderr, "crawler: new overlay connection", show_permid_short(permid)
+                    print >>sys.stderr, repr(permid)
+                for initiator_callback, frequency, accept_frequency in self._crawl_initiators:
+                    self._initiator_deadlines.append([0, frequency, accept_frequency, initiator_callback, permid, selversion, 0])
+
+                self._initiator_deadlines.sort()
+
+                # Start sending crawler requests
+                self._check_deadlines(False)
+        else:
+            if DEBUG:
+                print >>sys.stderr, "crawler: new overlay connection (can not use version %d)" % selversion, show_permid_short(permid)
+                print >>sys.stderr, repr(permid)
+            
+    def _check_deadlines(self, resubmit):
+        """
+        Send requests to permid and re-register to be called again
+        after frequency seconds
+        """
+        now = time.time()
+
+        # crawler side deadlines...
+        if self._initiator_deadlines:
+            for tup in self._initiator_deadlines:
+                deadline, frequency, accept_frequency, initiator_callback, permid, selversion, failure_counter = tup
+                if now > deadline + FREQUENCY_FLEXIBILITY:
+                    def send_request_helper(message_id, payload, frequency=accept_frequency, callback=None, channel_data=None):
+                        return self.send_request(permid, message_id, payload, frequency=frequency, callback=callback, channel_data=channel_data)
+                    # 20/10/08. Boudewijn: We will no longer disconnect
+                    # based on the return value from the message handler
+                    try:
+                        initiator_callback(permid, selversion, send_request_helper)
+                    except Exception:
+                        print_exc()
+
+                    # set new deadline
+                    tup[0] = now + frequency
+                else:
+                    break
+
+            # resort
+            self._initiator_deadlines.sort()
+
+        # client side deadlines...
+        if self._dialback_deadlines:
+
+            def _after_connect(exc, dns, permid, selversion):
+                if DEBUG:
+                    if exc:
+                        print >>sys.stderr, "crawler: dialback to crawler failed", dns, show_permid_short(permid), exc
+                    else:
+                        print >>sys.stderr, "crawler: dialback to crawler established", dns, show_permid_short(permid)
+
+            for message_id, (deadline, permid) in self._dialback_deadlines.items():
+                if now > deadline + FREQUENCY_FLEXIBILITY:
+                    self._overlay_bridge.connect(permid, _after_connect)
+                    del self._dialback_deadlines[message_id]
+
+        if resubmit:
+            self._overlay_bridge.add_task(lambda:self._check_deadlines(True), 5)
+
+    def _check_channels(self):
+        """
+        Periodically removes permids after no connection was
+        established for a long time
+        """
+        now = time.time()
+        to_remove_permids = []
+        for permid in self._channels:
+            to_remove_channel_ids = []
+            for channel_id, (deadline, _, _) in self._channels[permid].iteritems():
+                if now > deadline:
+                    to_remove_channel_ids.append(channel_id)
+            for channel_id in to_remove_channel_ids:
+                del self._channels[permid][channel_id]
+            if not self._channels[permid]:
+                to_remove_permids.append(permid)
+        for permid in to_remove_permids:
+            del self._channels[permid]
+
+        # resubmit
+        self._overlay_bridge.add_task(self._check_channels, 60)
+
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/DatabaseCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/DatabaseCrawler.py
new file mode 100644 (file)
index 0000000..decc109
--- /dev/null
@@ -0,0 +1,125 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+import sys
+import cPickle
+from time import strftime
+
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_SEVENTH, OLPROTO_VER_EIGHTH, OLPROTO_VER_ELEVENTH
+# OLPROTO_VER_SEVENTH --> Sixth public release, >= 4.5.0, supports CRAWLER_REQUEST and CRAWLER_REPLY messages
+# OLPROTO_VER_EIGHTH  --> Seventh public release, >= 5.0, supporting BuddyCast with clicklog info.
+
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_DATABASE_QUERY
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
+from BaseLib.Core.Utilities.utilities import show_permid, show_permid_short
+from BaseLib.Core.Statistics.Crawler import Crawler
+
+DEBUG = False
+
+class DatabaseCrawler:
+    __single = None
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        if not cls.__single:
+            cls.__single = cls(*args, **kargs)
+        return cls.__single
+
+    def __init__(self):
+        self._sqlite_cache_db = SQLiteCacheDB.getInstance()
+
+        crawler = Crawler.get_instance()
+        if crawler.am_crawler():
+            self._file = open("databasecrawler.txt", "a")
+            self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n")))
+            self._file.flush()
+        else:
+            self._file = None
+
+    def query_initiator(self, permid, selversion, request_callback):
+        """
+        Established a new connection. Send a CRAWLER_DATABASE_QUERY request.
+        @param permid The Tribler peer permid
+        @param selversion The oberlay protocol version
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if DEBUG: print >>sys.stderr, "databasecrawler: query_initiator", show_permid_short(permid)
+        sql = []
+        if selversion >= OLPROTO_VER_SEVENTH:
+            sql.extend(("SELECT 'peer_count', count(*) FROM Peer",
+                        "SELECT 'torrent_count', count(*) FROM Torrent"))
+
+        if selversion >= OLPROTO_VER_ELEVENTH:
+            sql.extend(("SELECT 'my_subscriptions', count(*) FROM VoteCast where voter_id='" + show_permid(permid) + "' and vote=2",
+                        "SELECT 'my_negative_votes', count(*) FROM VoteCast where voter_id='" + show_permid(permid) + "' and vote=-1",
+                        "SELECT 'my_channel_files', count(*) FROM ChannelCast where publisher_id='" + show_permid(permid) + "'",
+                        "SELECT 'all_subscriptions', count(*) FROM VoteCast where vote=2",
+                        "SELECT 'all_negative_votes', count(*) FROM VoteCast where vote=-1"))
+
+        # if OLPROTO_VER_EIGHTH <= selversion <= 11:
+        #     sql.extend(("SELECT 'moderations_count', count(*) FROM ModerationCast"))
+
+        # if selversion >= OLPROTO_VER_EIGHTH:
+        #     sql.extend(("SELECT 'positive_votes_count', count(*) FROM Moderators where status=1",
+        #                 "SELECT 'negative_votes_count', count(*) FROM Moderators where status=-1"))
+
+        request_callback(CRAWLER_DATABASE_QUERY, ";".join(sql), callback=self._after_request_callback)
+
+    def _after_request_callback(self, exc, permid):
+        """
+        Called by the Crawler with the result of the request_callback
+        call in the query_initiator method.
+        """
+        if not exc:
+            if DEBUG: print >>sys.stderr, "databasecrawler: request send to", show_permid_short(permid)
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "REQUEST", show_permid(permid), "\n")))
+            self._file.flush()
+
+    def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        Received a CRAWLER_DATABASE_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
+        """
+        if DEBUG:
+            print >> sys.stderr, "databasecrawler: handle_crawler_request", show_permid_short(permid), message
+
+        # execute the sql
+        try:
+            cursor = self._sqlite_cache_db.execute_read(message)
+
+        except Exception, e:
+            reply_callback(str(e), error=1)
+        else:
+            if cursor:
+                reply_callback(cPickle.dumps(list(cursor), 2))
+            else:
+                reply_callback("error", error=2)
+
+    def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
+        """
+        Received a CRAWLER_DATABASE_QUERY reply.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param error The error value. 0 indicates success.
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if error:
+            if DEBUG:
+                print >> sys.stderr, "databasecrawler: handle_crawler_reply", error, message
+
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  REPLY", show_permid(permid), str(error), message, "\n")))
+            self._file.flush()
+
+        else:
+            if DEBUG:
+                print >> sys.stderr, "databasecrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message)
+
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  REPLY", show_permid(permid), str(error), str(cPickle.loads(message)), "\n")))
+            self._file.flush()
+
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/FriendshipCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/FriendshipCrawler.py
new file mode 100644 (file)
index 0000000..3e3a3d1
--- /dev/null
@@ -0,0 +1,136 @@
+# Written by Ali Abbas
+# see LICENSE.txt for license information
+
+import sys
+import time
+from traceback import print_exc
+
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_FRIENDSHIP_STATS
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str
+
+DEBUG = False
+
+class FriendshipCrawler:
+    __single = None
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        if not cls.__single:
+            cls.__single = cls(*args, **kargs)
+        return cls.__single
+
+    def __init__(self,session):
+        self.session = session
+        self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance()
+
+    def query_initiator(self, permid, selversion, request_callback):
+        """
+        Established a new connection. Send a CRAWLER_DATABASE_QUERY request.
+        @param permid The Tribler peer permid
+        @param selversion The oberlay protocol version
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if DEBUG: 
+            print >>sys.stderr, "FriendshipCrawler: friendship_query_initiator"
+
+        get_last_updated_time = self.friendshipStatistics_db.getLastUpdateTimeOfThePeer(permid)
+         
+        msg_dict = {'current time':get_last_updated_time}
+        msg = bencode(msg_dict)
+        return request_callback(CRAWLER_FRIENDSHIP_STATS,msg)
+
+    def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        Received a CRAWLER_FRIENDSHIP_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
+        """
+        if DEBUG:
+            print >> sys.stderr, "FriendshipCrawler: handle_friendship_crawler_database_query_request", message
+
+        try:
+            d = bdecode(message)
+            
+            stats = self.getStaticsFromFriendshipStatisticsTable(self.session.get_permid(),d['current time'])
+            msg_dict = {'current time':d['current time'],'stats':stats}
+            msg = bencode(msg_dict)
+            reply_callback(msg)
+
+        except Exception, e:
+            print_exc()
+            reply_callback(str(e), 1)
+
+        return True
+
+    def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
+        """
+        Received a CRAWLER_FRIENDSHIP_STATS request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param error The error value. 0 indicates success.
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+
+        if error:
+            if DEBUG:
+                print >> sys.stderr, "friendshipcrawler: handle_crawler_reply"
+                print >> sys.stderr, "friendshipcrawler: error", error, message
+
+        else:
+            try:
+                d = bdecode(message)
+            except Exception:
+                print_exc()
+            else:
+                if DEBUG:
+                    print >> sys.stderr, "friendshipcrawler: handle_crawler_reply"
+                    print >> sys.stderr, "friendshipcrawler: friendship: Got",`d`
+
+                self.saveFriendshipStatistics(permid,d['current time'],d['stats'])
+
+        return True 
+        
+    def getStaticsFromFriendshipStatisticsTable(self, mypermid, last_update_time):
+        ulist = self.friendshipStatistics_db.getAllFriendshipStatistics(mypermid, last_update_time)
+        # Arno, 2010-02-04: Make sure Unicode B64 permids are converted to str, 
+        # bencode can't do that anymore.
+        elist = []
+        for utuple in ulist:
+            etuple = []
+            for uelem in utuple:
+                if isinstance(uelem,unicode):
+                    eelem = uelem.encode("UTF-8")
+                else:
+                    eelem = uelem
+                etuple.append(eelem)
+            elist.append(etuple)
+                
+        return elist
+        
+    
+    def saveFriendshipStatistics(self,permid,currentTime,stats):
+        if stats:
+            # 20/10/08. Boudewijn: A mistake in the code results in
+            # only 7 items in the list instead of 8. We add one here
+            # to get things working.
+            for stat in stats:
+                if len(stat) == 7:
+                    stat.append(0)
+                if len(stat) == 7 or len(stat) == 8:
+                    stat.append(bin2str(permid))
+
+            self.friendshipStatistics_db.saveFriendshipStatisticData(stats)
+    
+    def getLastUpdateTime(self, permid):
+        
+        mypermid = self.session.get_permid()
+        
+        return self.friendshipStatistics_db.getLastUpdateTimeOfThePeer(permid)
+        
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/Logger.py b/instrumentation/next-share/BaseLib/Core/Statistics/Logger.py
new file mode 100644 (file)
index 0000000..198b67c
--- /dev/null
@@ -0,0 +1,220 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+#
+# Log version 3 = BuddyCast message V8
+
+import sys
+import os
+import time
+import socket
+import threading
+from traceback import print_exc
+            
+DEBUG = False
+
+log_separator = ' '
+logger = None
+
+# To be compatible with Logger from http://linux.duke.edu/projects/mini/logger/ 
+# for 2fastbt (revision <=825). 
+def create_logger(file_name):
+    global logger
+
+    logger = Logger(3, file_name) 
+
+
+def get_logger():    
+    global logger
+
+    if logger is None:
+        create_logger("global.log")
+        
+    return logger
+
+def get_today():    # UTC based
+    return time.gmtime(time.time())[:3]
+
+class Logger:
+    """
+    Atrributes (defulat value):
+      threshold (): message will not be logged if its output_level is bigger 
+                     than this threshould
+      file_name (): log file name
+      file_dir ('.'): directory of log file. It can be absolute or relative path.
+      prefix (''): prefix of log file
+      prefix_date (False): if it is True, insert 'YYYYMMDD-' between prefix 
+                     and file_name, e.g., sp-20060302-buddycast.log given 
+                     prefix = 'sp-' and file_name = 'buddycast.log'
+      open_mode ('a+b'): mode for open.
+    """
+    
+    def __init__(self, threshold, file_name, file_dir = '.', prefix = '', 
+                 prefix_date = False, open_mode = 'a+b'):
+        
+        self.threshold = threshold            
+        self.Log = self.log
+        if file_name == '':
+            self.logfile = sys.stderr
+        else:
+            try:
+                if not os.access(file_dir, os.F_OK):
+                    try: 
+                        os.mkdir(file_dir)
+                    except os.error, msg:
+                        raise "logger: mkdir error: " + msg
+                file_path = self.get_file_path(file_dir, prefix,
+                                               prefix_date, file_name)
+                self.logfile = open(file_path, open_mode)
+            except Exception, msg:
+                self.logfile = None
+                print >> sys.stderr, "logger: cannot open log file", \
+                         file_name, file_dir, prefix, prefix_date, msg
+                print_exc() 
+                
+    def __del__(self):
+        self.close()
+        
+    def get_file_path(self, file_dir, prefix, prefix_date, file_name):
+        if prefix_date is True:    # create a new file for each day
+            today = get_today()
+            date = "%04d%02d%02d" % today
+        else:
+            date = ''
+        return os.path.join(file_dir, prefix + date + file_name)
+    
+    def log(self, level, msg, showtime=True):
+        if level <= self.threshold:
+            if self.logfile is None: 
+                return
+            if showtime:
+                time_stamp = "%.01f"%time.time()
+                self.logfile.write(time_stamp + log_separator)
+            if isinstance(msg, str):
+                self.logfile.write(msg)
+            else:
+                self.logfile.write(repr(msg))
+            self.logfile.write('\n')
+            self.logfile.flush()
+            
+    def close(self):
+        if self.logfile is not None:
+            self.logfile.close()
+            
+
+class OverlayLogger:
+    __single = None
+    __lock = threading.RLock()
+    
+    def __init__(self, file_name, file_dir = '.'):
+        if OverlayLogger.__single:
+            raise RuntimeError, "OverlayLogger is singleton2"
+        
+        self.file_name = file_name
+        self.file_dir = file_dir
+        OverlayLogger.__single = self
+        self.Log = self.log
+        self.__call__ = self.log
+
+    def getInstance(*args, **kw):
+        OverlayLogger.__lock.acquire()
+        try:
+            if OverlayLogger.__single is None:
+                OverlayLogger(*args, **kw)
+            return OverlayLogger.__single
+        finally:
+            OverlayLogger.__lock.release()
+    getInstance = staticmethod(getInstance)
+    
+    def log(self, *msgs):
+        """
+        # MSG must be the last one. Permid should be in the rear to be readable
+        BuddyCast log for superpeer format: (V2)    
+          CONN_TRY IP PORT PERMID
+          CONN_ADD IP PORT PERMID SELVERSION
+          CONN_DEL IP PORT PERMID REASON
+          SEND_MSG IP PORT PERMID SELVERSION MSG_ID MSG
+          RECV_MSG IP PORT PERMID SELVERSION MSG_ID MSG
+          
+          #BUCA_CON Permid1, Permid2, ...
+          
+          BUCA_STA xx xx xx ...    # BuddyCast status
+              1  Pr    # nPeer
+              2  Pf    # nPref
+              3  Tr    # nTorrent
+              
+              #4  Cc    # nConntionCandidates (this one was missed before v4.1, and will not be included either in this version)
+              4  Bs    # nBlockSendList
+              5  Br    # nBlockRecvList
+              
+              6  SO    # nConnectionsInSecureOver
+              7  Co    # nConnectionsInBuddyCast
+              
+              8  Ct    # nTasteConnectionList
+              9  Cr   # nRandomConnectionList
+              10  Cu   # nUnconnectableConnectionList
+        """
+        
+        log_msg = ''
+        nmsgs = len(msgs)
+        if nmsgs < 2:
+            print >> sys.stderr, "Error message for log", msgs
+            return
+        
+        else:
+            for i in range(nmsgs):
+                if isinstance(msgs[i], tuple) or isinstance(msgs[i], list):
+                    log_msg += log_separator
+                    for msg in msgs[i]:
+                        try:
+                            log_msg += str(msg)
+                        except:
+                            log_msg += repr(msg)
+                        log_msg += log_separator
+                else:
+                    try:
+                        log_msg += str(msgs[i])
+                    except:
+                        log_msg += repr(msgs[i])
+                    log_msg += log_separator
+                
+        if log_msg:
+            self._write_log(log_msg)
+        
+    def _write_log(self, msg):
+        # one logfile per day. 
+        today = get_today()
+        if not hasattr(self, 'today'):
+            self.logger = self._make_logger(today)
+        elif today != self.today:    # make a new log if a new day comes
+            self.logger.close()
+            self.logger = self._make_logger(today)
+        self.logger.log(3, msg)
+            
+    def _make_logger(self, today):
+        self.today = today
+        hostname = socket.gethostname()
+        logger = Logger(3, self.file_name, self.file_dir, hostname, True)
+        logger.log(3, '# Tribler Overlay Log Version 3', showtime=False)    # mention the log version at the first line
+        logger.log(3, '# BUCA_STA: nRound   nPeer nPref nTorrent   ' + \
+                   'nBlockSendList nBlockRecvList   ' + \
+                   'nConnectionsInSecureOver nConnectionsInBuddyCast  ' + \
+                   'nTasteConnectionList nRandomConnectionList nUnconnectableConnectionList', 
+                   showtime=False)
+        logger.log(3, '# BUCA_STA: Rd  Pr Pf Tr  Bs Br  SO Co  Ct Cr Cu', showtime=False)
+        return logger
+        
+if __name__ == '__main__':
+    create_logger('test.log')
+    get_logger().log(1, 'abc' + ' ' + str(['abc', 1, (2,3)]))
+    get_logger().log(0, [1,'a',{(2,3):'asfadf'}])
+    #get_logger().log(1, open('log').read())
+    
+    ol = OverlayLogger('overlay.log')
+    ol.log('CONN_TRY', '123.34.3.45', 34, 'asdfasdfasdfasdfsadf')
+    ol.log('CONN_ADD', '123.34.3.45', 36, 'asdfasdfasdfasdfsadf', 3)
+    ol.log('CONN_DEL', '123.34.3.45', 38, 'asdfasdfasdfasdfsadf', 'asbc')
+    ol.log('SEND_MSG', '123.34.3.45', 39, 'asdfasdfasdfasdfsadf', 2, 'BC', 'abadsfasdfasf')
+    ol.log('RECV_MSG', '123.34.3.45', 30, 'asdfasdfasdfasdfsadf', 3, 'BC', 'bbbbbbbbbbbbb')
+    ol.log('BUCA_STA', (1,2,3), (4,5,6), (7,8), (9,10,11))
+    ol.log('BUCA_CON', ['asfd','bsdf','wevs','wwrewv'])
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/PunctureCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/PunctureCrawler.py
new file mode 100644 (file)
index 0000000..d776e54
--- /dev/null
@@ -0,0 +1,165 @@
+# Written by Gertjan Halkes
+# see LICENSE.txt for license information
+
+# Crawler and logging module for UDPPuncture testing
+
+from BaseLib.Core.Session import Session
+from BaseLib.Core.Statistics.Crawler import Crawler
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_PUNCTURE_QUERY
+from BaseLib.Core.Utilities.utilities import show_permid, show_permid_short
+import os
+import time
+import sys
+import zlib
+import thread
+
+DEBUG = False
+
+def get_reporter_instance():
+    return SimpleFileReporter.get_instance()
+
+class SimpleFileReporter:
+    __single = None
+    lock = thread.allocate_lock()
+
+    @classmethod
+    def get_instance(cls, *args, **kw):
+        cls.lock.acquire()
+        try:
+            if not cls.__single:
+                cls.__single = cls()
+        finally:
+            cls.lock.release()
+        return cls.__single
+    
+    def __init__(self):
+        self.file = None
+        self.path = os.path.join(Session.get_instance().get_state_dir(), "udppuncture.log")
+
+    def add_event(self, ignore, msg):
+        SimpleFileReporter.lock.acquire()
+        try:
+            try:
+                if not self.file:
+                    self.file = open(self.path, 'a+b')
+                self.file.write('%.2f %s\n' %(time.time(), msg))
+                self.file.flush()
+            except:
+                if DEBUG:
+                    print >>sys.stderr, 'Error writing puncture log'
+        finally:
+            SimpleFileReporter.lock.release()
+
+class PunctureCrawler:
+    __single = None
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        if not cls.__single:
+            cls.__single = cls(*args, **kargs)
+        return cls.__single
+
+    def __init__(self):
+        crawler = Crawler.get_instance()
+        if crawler.am_crawler():
+            self._file = open("puncturecrawler.txt", "a")
+            self._file.write("# Crawler started at %.2f\n" % time.time())
+            self._file.flush()
+            self._repexlog = None
+        else:
+            self.reporter = get_reporter_instance()
+
+    def query_initiator(self, permid, selversion, request_callback):
+        """
+        Established a new connection. Send a CRAWLER_PUNCTURE_QUERY request.
+        @param permid The Tribler peer permid
+        @param selversion The overlay protocol version
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        request_callback(CRAWLER_PUNCTURE_QUERY, '', callback=self._after_request_callback)
+
+    def _after_request_callback(self, exc, permid):
+        """
+        Called by the Crawler with the result of the request_callback
+        call in the query_initiator method.
+        """
+        if not exc:
+            if DEBUG: print >>sys.stderr, "puncturecrawler: request sent to", show_permid_short(permid)
+            self._file.write("REQUEST %s %.2f\n" % (show_permid(permid), time.time()))
+            self._file.flush()
+
+    def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        Received a CRAWLER_UDPUNCTURE_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
+        """
+        if DEBUG:
+            print >> sys.stderr, "puncturecrawler: handle_crawler_request", show_permid_short(permid), message
+
+        SimpleFileReporter.lock.acquire()
+        try:
+            if not self.reporter.file:
+                try:
+                    self.reporter.file = open(self.reporter.path, 'a+b')
+                except Exception, e:
+                    reply_callback(str(e), error=1)
+                    return
+
+            file = self.reporter.file
+            try:
+                file.seek(0)
+                result = ("%.2f CRAWL\n" % time.time()) + file.read()
+                result = zlib.compress(result)
+                reply_callback(result)
+                file.truncate(0)
+            except Exception, e:
+                reply_callback(str(e), error=1)
+            # Regardless of whether the whole operation succeeds, make sure that we continue writing at end of file
+            try:
+                file.seek(0, os.SEEK_END)
+            except:
+                pass
+        finally:
+            SimpleFileReporter.lock.release()
+
+    def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
+        """
+        Received a CRAWLER_UDPUNCTURE_QUERY reply.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param error The error value. 0 indicates success.
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        try:
+            if error:
+                if DEBUG:
+                    print >> sys.stderr, "puncturecrawler: handle_crawler_reply", error, message
+
+                self._file.write("ERROR %s %.2f %d %s\n" % (show_permid(permid), time.time(), error, message))
+                self._file.flush()
+
+            else:
+                if DEBUG:
+                    print >> sys.stderr, "puncturecrawler: handle_crawler_reply", show_permid_short(permid)
+
+                # 25/05/10 Boudewijn: We found that, for unknown
+                # reasons, the decompressed(message) contains many
+                # gigabytes worth of \0 characters.  For now we filter
+                # them out until Gert Jan can determine the actual
+                # cause.
+                data = zlib.decompress(message)
+                filtered = filter(lambda char: char != "\0", data)
+
+                self._file.write("REPLY %s %.2f\n" % (show_permid(permid), time.time()))
+                self._file.write("# reply sizes: on-the-wire=%d, decompressed=%d, filtered=%d\n" % (len(message), len(data), len(filtered)))
+                self._file.write(filtered)
+                self._file.flush()
+        except:
+            if DEBUG:
+                print >>sys.stderr, "puncturecrawler: error writing to file"
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/RepexCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/RepexCrawler.py
new file mode 100644 (file)
index 0000000..a3cef81
--- /dev/null
@@ -0,0 +1,115 @@
+# Based on DatabaseCrawler.py written by Boudewijn Schoon\r
+# Modified by Raynor Vliegendhart\r
+# see LICENSE.txt for license information\r
+\r
+import sys\r
+import cPickle\r
+import base64\r
+from time import strftime\r
+\r
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_REPEX_QUERY\r
+from BaseLib.Core.Utilities.utilities import show_permid, show_permid_short\r
+from BaseLib.Core.Statistics.Crawler import Crawler\r
+\r
+from BaseLib.Core.DecentralizedTracking.repex import RePEXLogDB\r
+\r
+DEBUG = False\r
+\r
+"""\r
+repexcrawler.txt:\r
+\r
+# ******************************************************************************\r
+# 2009/10/14 10:12:46 Crawler started\r
+2009/10/14 10:14:03; REQUEST; permid;\r
+2009/10/14 10:17:42;   REPLY; permid; 0; base64_pickle_peerhistory;\r
+2009/10/14 10:19:54;   REPLY; permid; 1; exception_msg;\r
+"""\r
+\r
+class RepexCrawler:\r
+    __single = None\r
+\r
+    @classmethod\r
+    def get_instance(cls, *args, **kargs):\r
+        if not cls.__single:\r
+            cls.__single = cls(*args, **kargs)\r
+        return cls.__single\r
+\r
+    def __init__(self,session):\r
+        crawler = Crawler.get_instance()\r
+        if crawler.am_crawler():\r
+            self._file = open("repexcrawler.txt", "a")\r
+            self._file.write("".join(("# ", "*" * 78, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n")))\r
+            self._file.flush()\r
+            self._repexlog = None\r
+        else:\r
+            self._file = None\r
+            self._repexlog = RePEXLogDB.getInstance(session)\r
+\r
+    def query_initiator(self, permid, selversion, request_callback):\r
+        """\r
+        Established a new connection. Send a CRAWLER_REPEX_QUERY request.\r
+        @param permid The Tribler peer permid\r
+        @param selversion The overlay protocol version\r
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)\r
+        """\r
+        if DEBUG: print >>sys.stderr, "repexcrawler: query_initiator", show_permid_short(permid)\r
+        \r
+        request_callback(CRAWLER_REPEX_QUERY, '', callback=self._after_request_callback)\r
+\r
+    def _after_request_callback(self, exc, permid):\r
+        """\r
+        Called by the Crawler with the result of the request_callback\r
+        call in the query_initiator method.\r
+        """\r
+        if not exc:\r
+            if DEBUG: print >>sys.stderr, "repexcrawler: request sent to", show_permid_short(permid)\r
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "REQUEST", show_permid(permid), "\n")))\r
+            self._file.flush()\r
+\r
+    def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):\r
+        """\r
+        Received a CRAWLER_REPEX_QUERY request.\r
+        @param permid The Crawler permid\r
+        @param selversion The overlay protocol version\r
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair\r
+        @param message The message payload\r
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])\r
+        """\r
+        if DEBUG:\r
+            print >> sys.stderr, "repexcrawler: handle_crawler_request", show_permid_short(permid), message\r
+\r
+        # retrieve repex history\r
+        try:\r
+            repexhistory = self._repexlog.getHistoryAndCleanup()\r
+\r
+        except Exception, e:\r
+            reply_callback(str(e), error=1)\r
+        else:\r
+            reply_callback(cPickle.dumps(repexhistory, 2))\r
+\r
+    def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):\r
+        """\r
+        Received a CRAWLER_REPEX_QUERY reply.\r
+        @param permid The Crawler permid\r
+        @param selversion The overlay protocol version\r
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair\r
+        @param error The error value. 0 indicates success.\r
+        @param message The message payload\r
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)\r
+        """\r
+        if error:\r
+            if DEBUG:\r
+                print >> sys.stderr, "repexcrawler: handle_crawler_reply", error, message\r
+\r
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  REPLY", show_permid(permid), str(error), message, "\n")))\r
+            self._file.flush()\r
+\r
+        else:\r
+            if DEBUG:\r
+                print >> sys.stderr, "repexcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message)\r
+            \r
+            # The message is pickled, which we will just write to file.\r
+            # To make later parsing easier, we base64 encode it\r
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  REPLY", show_permid(permid), str(error), base64.b64encode(message), "\n")))\r
+            self._file.flush()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/SeedingStatsCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/SeedingStatsCrawler.py
new file mode 100644 (file)
index 0000000..46c091e
--- /dev/null
@@ -0,0 +1,178 @@
+# Written by Boxun Zhang, Boudewijn Schoon
+# see LICENSE.txt for license information
+
+import sys
+import cPickle
+
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_SEEDINGSTATS_QUERY
+from BaseLib.Core.CacheDB.SqliteSeedingStatsCacheDB import *
+
+DEBUG = False
+
+class SeedingStatsCrawler:
+    __single = None
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        if not cls.__single:
+            cls.__single = cls(*args, **kargs)
+        return cls.__single
+
+    def __init__(self):
+        self._sqlite_cache_db = SQLiteSeedingStatsCacheDB.getInstance()
+    
+    def query_initiator(self, permid, selversion, request_callback):
+        """
+        Established a new connection. Send a CRAWLER_DATABASE_QUERY request.
+        @param permid The Tribler peer permid
+        @param selversion The oberlay protocol version
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if DEBUG: 
+            print >>sys.stderr, "crawler: SeedingStatsDB_update_settings_initiator"
+        read_query = "SELECT * FROM SeedingStats WHERE crawled = 0"
+        write_query = "UPDATE SeedingStats SET crawled = 1 WHERE crawled = 0"
+        return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps([("read", read_query), ("write", write_query)], 2))
+    
+    def update_settings_initiator(self, permid, selversion, request_callback):
+        """
+        Established a new connection. Send a CRAWLER_DATABASE_QUERY request.
+        @param permid The Tribler peer permid
+        @param selversion The oberlay protocol version
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if DEBUG: 
+            print >>sys.stderr, "crawler: SeedingStatsDB_update_settings_initiator"
+        
+        try:
+            sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s WHERE crawling_enabled=%s"%(1800, 1)
+        except:
+            print_exc()
+        else:
+            return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps(sql_update, 2))
+               
+    
+    def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        Received a CRAWLER_DATABASE_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
+
+        MESSAGE contains a cPickled list. Each list element is a
+        tuple. Each tuple consists of a string (either 'read' or
+        'write') and a string (the query)
+        """
+        if DEBUG:
+            print >> sys.stderr, "crawler: handle_crawler_request", len(message)
+
+        results = []
+        try:
+            items = cPickle.loads(message)
+            if DEBUG:
+                print >> sys.stderr, "crawler: handle_crawler_request", items
+
+            for action, query in items:
+                if action == "read":
+                    cursor = self._sqlite_cache_db.execute_read(query)
+                elif action == "write":
+                    cursor = self._sqlite_cache_db.execute_write(query)
+                else:
+                    raise Exception("invalid payload")
+
+                if cursor:
+                    results.append(list(cursor))
+                else:
+                    results.append(None)
+        except Exception, e:
+            if DEBUG:
+                print >> sys.stderr, "crawler: handle_crawler_request", e
+            results.append(str(e))
+            reply_callback(cPickle.dumps(results, 2), 1)
+        else:
+            reply_callback(cPickle.dumps(results, 2))
+
+        return True
+
+
+    def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, reply_callback):
+        """
+        Received a CRAWLER_DATABASE_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param error The error value. 0 indicates success.
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if error:
+            if DEBUG:
+                print >> sys.stderr, "seedingstatscrawler: handle_crawler_reply"
+                print >> sys.stderr, "seedingstatscrawler: error", error
+
+        else:
+            try:
+                results = cPickle.loads(message)
+
+                if DEBUG:
+                    print >> sys.stderr, "seedingstatscrawler: handle_crawler_reply"
+                    print >> sys.stderr, "seedingstatscrawler:", results
+
+                # the first item in the list contains the results from the select query
+                if results[0]:
+                    values = map(tuple, results[0])
+                    self._sqlite_cache_db.insertMany("SeedingStats", values)
+            except Exception, e:
+
+                # 04/11/08 boudewijn: cPickle.loads(...) sometimes
+                # results in EOFError. This may be caused by message
+                # being interpreted as non-binary.
+                f = open("seedingstats-EOFError.data", "ab")
+                f.write("--\n%s\n--\n" % message)
+                f.close()
+
+                print_exc()
+                return False
+
+        return True
+
+    
+    def handle_crawler_update_settings_request(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        Received a CRAWLER_DATABASE_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
+        """
+        if DEBUG:
+            print >> sys.stderr, "crawler: handle_crawler_SeedingStats_request", message
+
+        # execute the sql
+        sql_update = cPickle.loads(message)
+        
+        try:
+            self._sqlite_cache_db.execute_write(sql_update)
+        except Exception, e:
+            reply_callback(str(e), 1)
+        else:
+            reply_callback(cPickle.dumps('Update succeeded.', 2))
+        
+        return True
+
+    def handle_crawler_update_setings_reply(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        Received a CRAWLER_DATABASE_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if DEBUG:
+            print >> sys.stderr, "olapps: handle_crawler_SeedingStats_reply"
+
+        return True
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/Status/LivingLabReporter.py b/instrumentation/next-share/BaseLib/Core/Statistics/Status/LivingLabReporter.py
new file mode 100644 (file)
index 0000000..50f201d
--- /dev/null
@@ -0,0 +1,220 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+#
+# Arno TODO: Merge with Core/Statistics/Status/*
+#
+
+import time
+import sys
+
+import httplib
+
+import XmlPrinter
+import xml.dom.minidom
+
+import Status
+from BaseLib.Core.Utilities.timeouturlopen import find_proxy
+
+STRESSTEST = False
+DEBUG = False
+
+
+class LivingLabPeriodicReporter(Status.PeriodicStatusReporter):
+    """
+    This reporter creates an XML report of the status elements
+    that are registered and sends them using an HTTP Post at
+    the given interval.  Made to work with the P2P-Next lab.
+    """
+    
+    host = "p2pnext-statistics.comp.lancs.ac.uk"
+    #path = "/testpost/"
+    path = "/post/"
+    
+    def __init__(self, name, frequency, id, error_handler=None,
+                 print_post=False):
+        """
+        Periodically report to the P2P-Next living lab status service
+
+        name: The name of this reporter (ignored)
+        frequency: How often (in seconds) to report
+        id: The ID of this device (e.g. permid)
+        error_handler: Optional error handler that will be called if the
+        port fails
+        print_post: Print post to stderr when posting to the lab (largely
+        useful for debugging)
+        
+        """
+        Status.PeriodicStatusReporter.__init__(self,
+                                               name,
+                                               frequency,
+                                               error_handler)
+        self.device_id = id
+        self.print_post = print_post 
+        self.num_reports = 0
+
+    def new_element(self, doc, name, value):
+        """
+        Helper function to save some lines of code
+        """
+
+        element = doc.createElement(name)
+        value = doc.createTextNode(str(value))
+        element.appendChild(value)
+        
+        return element
+        
+    def report(self):
+        """
+        Create the report in XML and send it
+        """
+
+        # Create the report
+        doc = xml.dom.minidom.Document()
+        root = doc.createElement("nextsharedata")
+        doc.appendChild(root)
+        
+        # Create the header
+        header = doc.createElement("header")
+        root.appendChild(header)
+        header.appendChild(self.new_element(doc, "deviceid", self.device_id))
+        header.appendChild(self.new_element(doc, "timestamp",
+                                           long(round(time.time()))))
+        
+        version = "cs_v2a"
+        header.appendChild(self.new_element(doc, "swversion", version))
+        
+
+        elements = self.get_elements()
+        if len(elements) > 0:
+        
+            # Now add the status elements
+            if len(elements) > 0:
+                report = doc.createElement("event")
+                root.appendChild(report)
+
+                report.appendChild(self.new_element(doc, "attribute",
+                                                   "statusreport"))
+                report.appendChild(self.new_element(doc, "timestamp",
+                                                   long(round(time.time()))))
+                for element in elements:
+                    print element.__class__
+                    report.appendChild(self.new_element(doc,
+                                                       element.get_name(),
+                                                       element.get_value()))
+
+        events = self.get_events()
+        if len(events) > 0:
+            for event in events:
+                report = doc.createElement(event.get_type())
+                root.appendChild(report)
+                report.appendChild(self.new_element(doc, "attribute",
+                                                   event.get_name()))
+                if event.__class__ == Status.EventElement:
+                    report.appendChild(self.new_element(doc, "timestamp",
+                                                       event.get_time()))
+                elif event.__class__ == Status.RangeElement:
+                    report.appendChild(self.new_element(doc, "starttimestamp",
+                                                       event.get_start_time()))
+                    
+                    report.appendChild(self.new_element(doc, "endtimestamp",
+                                                       event.get_end_time()))
+                for value in event.get_values():
+                    report.appendChild(self.new_element(doc, "value", value))
+                    
+        if len(elements) == 0 and len(events) == 0:
+            return # Was nothing here for us
+        
+        # all done
+        xml_printer = XmlPrinter.XmlPrinter(root)
+        if self.print_post:
+            print >> sys.stderr, xml_printer.to_pretty_xml()
+        xml_str = xml_printer.to_xml()
+
+        # Now we send this to the service using a HTTP POST
+        self.post(xml_str)
+
+    def post(self, xml_str):
+        """
+        Post a status report to the living lab using multipart/form-data
+        This is a bit on the messy side, but it does work
+        """
+
+        #print >>sys.stderr, xml_str
+        
+        self.num_reports += 1
+        
+        boundary = "------------------ThE_bOuNdArY_iS_hErE_$"
+        headers = {"Host":self.host,
+                   "User-Agent":"NextShare status reporter 2009.4",
+                   "Content-Type":"multipart/form-data; boundary=" + boundary}
+
+        base = ["--" + boundary]
+        base.append('Content-Disposition: form-data; name="NextShareData"; filename="NextShareData"')
+        base.append("Content-Type: text/xml")
+        base.append("")
+        base.append(xml_str)
+        base.append("--" + boundary + "--")
+        base.append("")
+        base.append("")
+        body = "\r\n".join(base)
+
+        # Arno, 2010-03-09: Make proxy aware and use modern httplib classes
+        wanturl = 'http://'+self.host+self.path
+        proxyhost = find_proxy(wanturl)
+        if proxyhost is None:
+            desthost = self.host
+            desturl = self.path
+        else:
+            desthost = proxyhost
+            desturl = wanturl
+
+        h = httplib.HTTPConnection(desthost)
+        h.putrequest("POST", desturl)
+        h.putheader("Host",self.host)
+        h.putheader("User-Agent","NextShare status reporter 2010.3")
+        h.putheader("Content-Type", "multipart/form-data; boundary=" + boundary)
+        h.putheader("Content-Length",str(len(body)))
+        h.endheaders()
+        h.send(body)
+
+        resp = h.getresponse()
+        if DEBUG:
+            # print >>sys.stderr, "LivingLabReporter:\n", xml_str
+            print >>sys.stderr, "LivingLabReporter:", `resp.status`, `resp.reason`, "\n", resp.getheaders(), "\n", resp.read().replace("\\n", "\n")
+
+        if resp.status != 200:
+            if self.error_handler:
+                try:
+                    self.error_handler(resp.status, resp.read())
+                except Exception, e:
+                    pass
+            else:
+                print >> sys.stderr, "Error posting but no error handler:", \
+                      errcode, h.file.read()
+        
+
+if __name__ == "__main__":
+    """
+    Small test routine to check an actual post (unittest checks locally)
+    """
+
+    status = Status.get_status_holder("UnitTest")
+    def test_error_handler(code, message):
+        """
+        Test error-handler
+        """
+        print "Error:", code, message
+        
+    reporter = LivingLabPeriodicReporter("Living lab test reporter",
+                                         1.0, test_error_handler)
+    status.add_reporter(reporter)
+    s = status.create_status_element("TestString", "A test string")
+    s.set_value("Hi from Njaal")
+
+    time.sleep(2)
+
+    print "Stopping reporter"
+    reporter.stop()
+
+    print "Sent %d reports"% reporter.num_reports
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/Status/Status.py b/instrumentation/next-share/BaseLib/Core/Statistics/Status/Status.py
new file mode 100644 (file)
index 0000000..596fcbc
--- /dev/null
@@ -0,0 +1,560 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+import threading
+import time
+
+# Factory vars
+global status_holders
+status_holders = {}
+global status_lock
+status_lock = threading.Lock()
+
+def get_status_holder(name):
+    global status_lock
+    global status_holders
+    status_lock.acquire()
+    try:
+        if not name in status_holders:
+            status_holders[name] = StatusHolder(name)
+
+        return status_holders[name]
+    finally:
+        status_lock.release()
+
+class StatusException(Exception):
+    """
+    Parent exception for all status based exceptions
+    """
+    pass
+
+class NoSuchElementException(StatusException):
+    """
+    No such element found
+    """
+    pass
+
+class NoSuchReporterException(StatusException):
+    """
+    Unknown reporter
+    """
+    pass
+
+# Policies
+#ON_CHANGE = 1
+#PERIODIC = 2
+
+class StatusHolder:
+
+    """
+    A class to hold (and report) status information for an application.
+    A status holder can have multiple reporters, that will report status
+    information on change or periodically.
+
+    """
+
+
+    def __init__(self, name):
+        """
+        Do not create new status objects if you don't know what you're doing.
+        Use the getStatusHolder() function to retrieve status objects.
+        """
+        self.name = name
+        self.elements = {}
+        self.reporters = {}
+        self.lock = threading.Lock()
+        self.events = []
+
+    def reset(self):
+        """
+        Reset everything to blanks!
+        """
+        self.elements = {}
+        self.reporters = {}
+        self.events = []
+
+    def get_name(self):
+        """
+        Return the name of this status holder
+        """
+        return self.name
+
+    def get_reporter(self, name):
+        """
+        Get a given reporter from the status holder, using the name of the
+        reporter.
+        """
+        assert name
+        
+        self.lock.acquire()
+        try:
+            if not name in self.reporters:
+                raise Exception("No such reporter '%s'"%name)
+            return self.reporters[name]
+        finally:
+            self.lock.release()
+            
+    def add_reporter(self, reporter):
+        """
+        Add a reporter to this status object.
+        """
+        assert reporter
+        
+        self.lock.acquire()
+        try:
+            if reporter.name in self.reporters:
+                raise Exception("Already have reporter '%s' registered"% \
+                                reporter.name)
+            self.reporters[reporter.name] = reporter
+
+            # The reporter must contact me later
+            reporter.add_status_holder(self)
+            
+            # If we have any other reporters, copy the elements
+            # to the new one
+            for element in self.elements.values():
+                reporter.add_element(element)
+        finally:
+            self.lock.release()
+            
+
+    def _add_element(self, new_element):
+        for reporter in self.reporters.values():
+            reporter.add_element(new_element)
+        
+
+    def create_status_element(self, name, initial_value=None):
+        assert name
+        
+        new_element = StatusElement(name, initial_value)
+
+        self.lock.acquire()
+        try:
+            if name in self.elements:
+                raise Exception("Already have a status element with the given name")
+            self.elements[name] = new_element
+            self._add_element(new_element)
+        finally:
+            self.lock.release()
+
+        return new_element
+            
+    def get_status_element(self, name):
+        """
+        Get a status element from the Status Holder by name
+        """
+        assert name
+        
+        self.lock.acquire()
+        try:
+            if not name in self.elements:
+                raise NoSuchElementException(name)
+            return self.elements[name]
+        finally:
+            self.lock.release()
+        
+    def get_or_create_status_element(self, name, initial_value=None):
+        self.lock.acquire()
+        if not name in self.elements:
+            self.lock.release()
+            return self.create_status_element(name, initial_value)
+        try:
+            return self.elements[name]
+        finally:
+            self.lock.release()
+                                     
+    def remove_status_element(self, element):
+        """
+        Remove a status element
+        """
+        assert element
+        
+        self.lock.acquire()
+        try:
+            if not element.name in self.elements:
+                raise NoSuchElementException(element.name)
+            del self.elements[element.name]
+
+            # Also remove this element to the policy
+            for reporter in self.reporters.values():
+                # TODO: More elegant here
+                try:
+                    reporter.remove_element(element)
+                except:
+                    pass
+
+        finally:
+            self.lock.release()
+            
+    def create_event(self, name, values=[]):
+        return EventElement(name, values)
+
+    def add_event(self, event):
+        self.lock.acquire()
+        try:
+            self.events.append(event)
+            self._add_element(event)
+        finally:
+            self.lock.release()
+
+    def remove_range(self, range):
+        self.remove_event(range)
+        
+    def remove_event(self, event):
+        self.lock.acquire()
+        try:
+            if event in self.events:
+                self.events.remove(event)
+        finally:
+            self.lock.release()
+        
+    def create_and_add_event(self, name, values=[]):
+        self.add_event(self.create_event(name, values))
+
+    def create_range(self, name, values=[]):
+        return RangeElement(name, values)
+
+    def add_range(self, range):
+        self.add_event(range)
+        
+    def create_and_add_range(self, name, values=[]):
+        self.add_range(self.create_range(name, values))
+
+    def get_elements(self):
+        """
+        Reporters will use this to get a copy of all
+        elements that should be reported
+        """
+        self.lock.acquire()
+        try:
+            return self.elements.values()[:]
+        finally:
+            self.lock.release()
+
+    def get_events(self):
+        """
+        Reporters will use this to get a copy of all
+        events that should be reported
+        """
+        self.lock.acquire()
+        try:
+            events = self.events
+            self.events = []
+            return events
+        finally:
+            self.lock.release()
+
+        
+        
+class BaseElement:
+    type = "BaseElement"
+
+    def __init__(self, name):
+        """
+        Create a new element.  DO NOT USE THIS - use
+        create_status_element() using a Status Holder object
+        """
+        assert name
+        self.name = name
+        self.callbacks = []
+        self.lock = threading.Lock()
+
+    def get_type(self):
+        return self.type
+
+    def add_callback(self, callback):
+        """
+        Add a callback that will be executed when this element is changed.
+        The callback function will be passed the status element itself
+        """
+        self.callbacks.append(callback)
+
+    def remove_callback(self, callback):
+        """
+        Remove an already registered callback
+        """
+        if not callback in self.callbacks:
+            raise Exception("Cannot remove unknown callback")
+
+    def get_name(self):
+        return self.name
+
+                           
+    def _updated(self):
+        """
+        When a status element is changed, this method must be called to
+        notify any reporters
+        """
+
+        # TODO: Lock or make a copy?
+        
+        for callback in self.callbacks:
+            try:
+                callback(self)
+            except Exception, e:
+                import sys
+                print >> sys.stderr, "Exception in callback", \
+                      callback,"for parameter",self.name,":",e
+
+        
+class StatusElement(BaseElement):
+    """
+    Class to hold status information
+    """
+    type = "status report"
+
+    def __init__(self, name, initial_value=None):
+        """
+        Create a new element.  DO NOT USE THIS - use
+        create_status_element() using a Status Holder object
+        """
+        BaseElement.__init__(self, name)
+        self.value = initial_value
+
+    def set_value(self, value):
+        """
+        Update the value of this status element
+        """
+        
+        self.value = value
+        self._updated()
+        
+    def get_value(self):
+        return self.value
+
+    def inc(self, value=1):
+        """
+        Will only work for numbers!
+        """
+        self.lock.acquire()
+        try:
+            self.value += value
+            self._updated()
+        except:
+            raise Exception("Can only increment numbers")
+        finally:
+            self.lock.release()
+
+    def dec(self, value=1):
+        """
+        Will only work for numbers!
+        """
+        self.lock.acquire()
+        try:
+            self.value -= value
+            self._updated()
+        except:
+            raise Exception("Can only increment numbers")
+        finally:
+            self.lock.release()
+
+        
+class EventElement(BaseElement):
+    type = "event"
+
+    def __init__(self, name, values=[]):
+        """
+        Create a new element.  DO NOT USE THIS - use
+        create_status_element() using a Status Holder object
+        """
+        self.time = long(time.time())
+        BaseElement.__init__(self, name)
+        self.values = values
+
+    def get_time(self):
+        return self.time
+
+    def add_value(self, value):
+        self.lock.acquire()
+        try:
+            self.values.append(value)
+        finally:
+            self.lock.release()
+
+    def get_values(self):
+        """
+        Return the values as a copy to ensure that there are no
+        synchronization issues
+        """
+        self.lock.acquire()
+        try:
+            return self.values[:]
+        finally:
+            self.lock.release()
+
+class RangeElement(BaseElement):
+    type = "range"
+
+    def __init__(self, name, values=[]):
+        self.start_time = self.end_time = long(time.time())
+        BaseElement.__init__(self, name, "range")
+        self.values = values
+
+    def get_start_time(self):
+        return self.start_time
+
+    def get_end_time(self):
+        return self.end_time
+        
+    def add_value(self, value):
+        self.lock()
+        try:
+            self.end_time = long(time.time())
+            self.values.append(value)
+        finally:
+            self.lock.release()
+            
+    def get_values(self):
+        """
+        Return the values as a copy to ensure that there are no
+        synchronization issues
+        """
+        self.lock()
+        try:
+            return self.values[:]
+        finally:
+            self.lock.release()
+        
+class StatusReporter:
+    """
+    This is the basic status reporter class.  It cannot be used
+    directly, but provides a base for all status reporters.
+    The status reporter is threadsafe
+    """
+    
+    def __init__(self, name):
+        self.name = name
+        self.lock = threading.Lock()
+        self.status_holders = []
+        
+    def add_status_holder(self, holder):
+        if not holder in self.status_holders:
+            self.status_holders.append(holder)
+        
+    def get_elements(self):
+        """
+        Return all elements that should be reported
+        """
+        elements = []
+        for holder in self.status_holders:
+            elements += holder.get_elements()
+        return elements
+
+    def get_events(self):
+        """
+        Return all elements that should be reported
+        """
+        events = []
+        for holder in self.status_holders:
+            events += holder.get_events()
+        return events
+
+
+class OnChangeStatusReporter(StatusReporter):
+    """
+    A basic status reporter which calls 'report(element)' whenever
+    it is changed
+    """
+    elements = []
+    
+    def add_element(self, element):
+        """
+        Add element to this reporter
+        """
+        element.add_callback(self.report)
+        
+    def remove_element(self, element):
+        """
+        Remove an element from this reporter
+        """
+        element.remove_callback(self.report)
+        
+    def report(self, element):
+        """
+        This function must be implemented by and extending class. Does nothing.
+        """
+        pass # To be implemented by the actual reporter
+
+class PeriodicStatusReporter(StatusReporter):
+    """
+    Base class for a periodic status reporter, calling report(self)
+    at given times.  To ensure a nice shutdown, execute stop() when
+    stopping.
+    
+    """
+    
+    def __init__(self, name, frequency, error_handler=None):
+        """
+        Frequency is a float in seconds
+        Error-handler will get an error code and a string as parameters,
+        the meaning will be up to the implemenation of the
+        PeriodicStatusReporter.
+        """
+        
+        StatusReporter.__init__(self, name)
+        self.frequency = frequency
+        self.parameters = []
+        self.error_handler = error_handler
+
+        # Set up the timer
+        self.running = True
+        self.create_timer()
+        
+    def create_timer(self):
+        self.timer = threading.Timer(self.frequency, self.on_time_event)
+        self.timer.setName("PeriodicStatusReporter")
+        self.timer.setDaemon(True)
+        self.timer.start()
+
+    def stop(self, block=False):
+        """
+        Stop this reporter.  If block=True this function will not return
+        until the reporter has actually stopped
+        """
+        self.timer.cancel()
+        
+        self.on_time_event()
+
+        self.running = False
+        self.timer.cancel()
+        self.timer.join()
+        
+    def report(self):
+        """
+        This function must be overloaded, does nothing
+        """
+        raise Exception("Not implemented")
+
+    def add_element(self, element):
+        """
+        Overload if you want your periodic reporter to only
+        report certain elements of a holder. Normally this does
+        nothing, but report fetches all elements
+        """
+        pass
+
+    def on_time_event(self):
+        """
+        Callback function for timers
+        """
+        if self.running:
+            
+            self.create_timer()
+            try:
+                self.report()
+            except Exception, e:
+                if self.error_handler:
+                    try:
+                        self.error_handler(0, str(e))
+                    except:
+                        pass
+                else:
+                    print "Error but no error handler:", e
+                    #import traceback
+                    #traceback.print_stack()
+        
+if __name__ == "__main__":
+    # Some basic testing (full unit tests are in StatusTest.py)
+    
+    print "Run unit tests"
+    raise SystemExit(-1)
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/Status/XmlPrinter.py b/instrumentation/next-share/BaseLib/Core/Statistics/Status/XmlPrinter.py
new file mode 100644 (file)
index 0000000..a39f12e
--- /dev/null
@@ -0,0 +1,186 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+def to_unicode(string):
+    """
+    Function to change a string (unicode or not) into a unicode string
+    Will try utf-8 first, then latin-1.
+    TODO: Is there a better way?  There HAS to be!!!
+    """
+
+    if string.__class__ != str:
+        return string
+    try:
+        return unicode(string, "utf-8")
+    except:
+        pass
+    print "Warning: Fallback to latin-1 for unicode conversion"
+    return unicode(string, "latin-1")
+
+
+class XmlPrinter:
+
+    """
+    An XML printer that will print XML *with namespaces*
+
+    Why minidom.toxml() does not do so really makes absolutenly no sense
+    
+    """
+
+
+    def __init__(self, doc):
+        """
+        doc should be a xml.dom.minidom document
+        
+        """
+
+        self.root = doc
+        self.namespace_counter=0
+        
+    def to_xml(self, encoding="UTF8"):
+        """
+        Like minidom toxml, just using namespaces too
+        """
+        return self._toxml(self.root, indent='', newl='').encode(encoding, "replace")
+    
+    def to_pretty_xml(self, indent=' ', newl='\n', encoding="UTF8"):
+        """
+        Like minidom toxml, just using namespaces too
+        """
+        return self._toxml(self.root, indent, newl).encode(encoding, "replace")
+    
+
+    def _make_header(self, encoding):
+        
+        return u'<?xml version="1.0" encoding="%s" ?>\n'%encoding
+
+    def _new_namespace(self, namespace):
+        # Make new namespace
+        ns_short = "ns%d"%self.namespace_counter
+        self.namespace_counter += 1
+        return ns_short
+        
+    def _toxml(self, element, indent=' ', newl='\n', encoding='UTF8', namespaces=None):
+        """
+        Recursive, internal function - do not use directly
+        """
+
+        if not element:
+            return ""
+
+        if not namespaces:
+            namespaces = {}
+        buffer = u""
+        define_ns_list = []
+
+        if element == self.root:
+            # Print the header
+            buffer = self._make_header(encoding)
+
+        if element.nodeType == element.TEXT_NODE:
+            buffer += indent + to_unicode(element.nodeValue) + newl
+            return buffer
+        if element.nodeType == element.ELEMENT_NODE:
+            ns = element.namespaceURI
+            name = to_unicode(element.localName)
+            if name.find(" ") > -1:
+                raise Exception("Refusing spaces in tag names")
+            
+            if namespaces.has_key(ns):
+                ns_short = namespaces[ns]
+                define_ns = False
+            else:
+                if ns not in ["", None]:
+                    ns_short = self._new_namespace(ns)
+                    define_ns_list.append((ns, ns_short))
+                else:
+                    ns_short = None
+                    
+                define_ns = True
+                namespaces[ns] = ns_short
+
+            # Should we define more namespaces?  Will peak into the
+            # children and see if there are any
+            for child in element.childNodes:
+                if child.nodeType != child.ELEMENT_NODE:
+                    continue
+
+                if not namespaces.has_key(child.namespaceURI) and \
+                       child.namespaceURI not in [None, ""]:
+                    # Should define this one too!
+                    new_ns = self._new_namespace(child.namespaceURI)
+                    define_ns_list.append((child.namespaceURI, new_ns))
+                    namespaces[child.namespaceURI] = new_ns
+            buffer += indent
+            
+            # If we have no children, we will write <tag/>
+            if not element.hasChildNodes():
+                if ns != None:
+                    if define_ns:
+                        if ns_short:
+                            buffer += '<%s:%s xmlns:%s="%s"/>%s'%\
+                                      (ns_short, name,ns_short,ns,newl)
+                        else:
+                            buffer += '<%s xmlns="%s"/>%s'%(name,ns,newl)
+                    else:
+                        if ns_short:
+                            buffer += '<%s:%s/>%s'%(ns_short, name, newl)
+                        else:
+                            buffer += '<%s/>%s'%(name, newl)
+
+                else:
+                    buffer += '<%s/>%s'%(name, newl)
+
+                # Clean up - namespaces is passed as a reference, and is
+                # as such not cleaned up.  Let it be so to save some speed
+                for (n,short) in define_ns_list:
+                    del namespaces[n]
+                return buffer
+
+            # Have children
+            ns_string = ""
+            if len(define_ns_list) > 0:
+                for (url, short) in define_ns_list:
+                    ns_string += ' xmlns:%s="%s"'%(short, url)
+                        
+            if ns != None:
+                if define_ns:
+                    if ns_short:
+                        # Define all namespaces of next level children too
+                        buffer += '<%s:%s xmlns:%s="%s"%s>%s'%\
+                                  (ns_short, name, ns_short, ns, ns_string, newl)
+                    else:
+                        buffer += '<%s xmlns="%s"%s>%s'%(name,ns,ns_string,newl)
+                else:
+                    if ns_short:
+                        buffer += '<%s:%s%s>%s'%(ns_short, name, ns_string, newl)
+                    else:
+                        buffer += '<%s%s>%s'%(name, ns_string, newl)
+            elif ns_string:
+                buffer += '<%s %s>%s'%(name, ns_string, newl)
+            else:
+                buffer += '<%s>%s'%(name, newl)
+
+            # Recursively process
+            for child in element.childNodes:
+                new_indent = indent
+                if new_indent:
+                    new_indent += "  "
+                buffer += self._toxml(child, new_indent, newl, encoding, namespaces)
+            if ns_short:
+                buffer += "%s</%s:%s>%s"%(indent, ns_short, name, newl)
+            else:
+                buffer += "%s</%s>%s"%(indent, name, newl)
+
+            for (n, short) in define_ns_list:
+                del namespaces[n]
+            try:
+                return buffer
+            except Exception,e:
+                print "-----------------"
+                print "Exception:",e
+                print "Buffer:",buffer
+                print "-----------------"
+                raise e
+
+        raise Exception("Could not serialize DOM")
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/Status/__init__.py b/instrumentation/next-share/BaseLib/Core/Statistics/Status/__init__.py
new file mode 100644 (file)
index 0000000..c49ce08
--- /dev/null
@@ -0,0 +1,42 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+"""
+Status gathering module with some simple reporting functionality
+
+Usage example:
+
+status = Status.get_status_holder("somename") # Get status object
+reporter = MyReporter("MyReporter")           # Create the reporter
+status.add_reporter(reporter)                 # Add the reporter to the status object
+
+# Create new element
+elem = status.create_status_element("ElementName",
+                                    "Description",
+                                    initial_value=None)
+elem.set_value(somevalue)
+
+# The element will now be reported by the reporter.
+
+A reporter can be created easily like this:
+
+# Print name=value when the element is changed
+class MyOnChangeStatusReporter(Status.OnChangeStatusReporter):
+
+    def report(self, element):
+        print element.name,"=",element.value
+
+
+# Print name=value for all elements when the periodic reporter runs
+class MyPeriodicStatusReporter(Status.PeriodicStatusReporter):
+    def report(self):
+        for elems in self.elements[:]:
+            print element.name,"=",element.value
+
+
+See the StatusTest.py class for more examples
+
+"""
+
+from Status import *
+from LivingLabReporter import LivingLabPeriodicReporter
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/VideoPlaybackCrawler.py b/instrumentation/next-share/BaseLib/Core/Statistics/VideoPlaybackCrawler.py
new file mode 100644 (file)
index 0000000..d1a5117
--- /dev/null
@@ -0,0 +1,211 @@
+"""
+Crawling the VideoPlayback statistics database
+"""
+
+from time import strftime
+import cPickle
+import sys
+import threading
+import zlib
+
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_VIDEOPLAYBACK_INFO_QUERY, CRAWLER_VIDEOPLAYBACK_EVENT_QUERY
+from BaseLib.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH, OLPROTO_VER_TENTH
+from BaseLib.Core.Statistics.Crawler import Crawler
+from BaseLib.Core.Utilities.utilities import show_permid, show_permid_short
+
+DEBUG = False
+
+class VideoPlaybackCrawler:
+    __single = None    # used for multi-threaded singletons pattern
+    lock = threading.Lock()
+
+    @classmethod
+    def get_instance(cls, *args, **kargs):
+        # Singleton pattern with double-checking to ensure that it can only create one object
+        if cls.__single is None:
+            cls.lock.acquire()   
+            try:
+                if cls.__single is None:
+                    cls.__single = cls(*args, **kargs)
+            finally:
+                cls.lock.release()
+        return cls.__single
+    
+    def __init__(self):
+        if VideoPlaybackCrawler.__single is not None:
+            raise RuntimeError, "VideoPlaybackCrawler is singleton"
+
+        crawler = Crawler.get_instance()
+        if crawler.am_crawler():
+            self._file = open("videoplaybackcrawler.txt", "a")
+            self._file.write("".join(("# ", "*" * 80, "\n# ", strftime("%Y/%m/%d %H:%M:%S"), " Crawler started\n")))
+            self._file.flush()
+            self._event_db = None
+
+        else:
+            self._file = None
+            self._event_db = VideoPlaybackDBHandler.get_instance()
+
+    def query_initiator(self, permid, selversion, request_callback):
+        """
+        <<Crawler-side>>
+        Established a new connection. Send a CRAWLER_VIDEOPLAYBACK_INFO_QUERY request.
+        @param permid The Tribler peer permid
+        @param selversion The oberlay protocol version
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if selversion >= OLPROTO_VER_TENTH:
+            if DEBUG: print >>sys.stderr, "videoplaybackcrawler: query_initiator", show_permid_short(permid), "version", selversion
+            # Overlay version 10 provided a simplification in the VOD
+            # stats collecting. We now have only one database table:
+            # playback_event that has only 3 columns: key, timestamp,
+            # and event.
+            request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, "SELECT key, timestamp, event FROM playback_event; DELETE FROM playback_event;", callback=self._after_event_request_callback)
+            
+        elif selversion >= OLPROTO_VER_EIGHTH:
+            if DEBUG: print >>sys.stderr, "videoplaybackcrawler: query_initiator", show_permid_short(permid), "version", selversion
+            # boudewijn: order the result DESC! From the resulting
+            # list we will not remove the first entries from the
+            # database because this (being the last item added) may
+            # still be actively used.
+            request_callback(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, "SELECT key, timestamp, piece_size, num_pieces, bitrate, nat FROM playback_info ORDER BY timestamp DESC LIMIT 50", callback=self._after_info_request_callback)
+
+        else:
+            if DEBUG: print >>sys.stderr, "videoplaybackcrawler: query_info_initiator", show_permid_short(permid), "unsupported overlay version"
+
+    def _after_info_request_callback(self, exc, permid):
+        """
+        <<Crawler-side>>
+        Called by the Crawler with the result of the request_callback
+        call in the query_initiator method.
+        """
+        if not exc:
+            if DEBUG: print >>sys.stderr, "videoplaybackcrawler: request send to", show_permid_short(permid)
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "INFO REQUEST", show_permid(permid), "\n")))
+            self._file.flush()
+
+    def handle_info_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
+        pass
+
+    def handle_info_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
+        """
+        <<Crawler-side>>
+        Received a CRAWLER_VIDEOPLAYBACK_INFO_QUERY reply.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param error The error value. 0 indicates success.
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if error:
+            if DEBUG:
+                print >> sys.stderr, "videoplaybackcrawler: handle_crawler_reply", error, message
+
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "   INFO REPLY", show_permid(permid), str(error), message, "\n")))
+            self._file.flush()
+
+        else:
+            if DEBUG:
+                print >> sys.stderr, "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message)
+
+            info = cPickle.loads(message)
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "   INFO REPLY", show_permid(permid), str(error), str(info), "\n")))
+            self._file.flush()
+
+            i = 0
+            for key, timestamp, piece_size, num_pieces, bitrate, nat in info:
+                i += 1
+                # do not remove the first item. the list is ordered
+                # DESC so the first item is the last that is added to
+                # the database and we can't affored to remove it, as
+                # it may cause exceptions in the running playback.
+                if i == 1:
+                    sql = """
+SELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50;
+DELETE FROM playback_event WHERE key = '%s';
+""" % (key, key)
+                else:
+                    sql = """
+SELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50;
+DELETE FROM playback_event WHERE key = '%s';
+DELETE FROM playback_info WHERE key = '%s';
+""" % (key, key, key)
+                    
+                # todo: optimize to not select key for each row
+                request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, sql, channel_data=key, callback=self._after_event_request_callback, frequency=0)
+
+    def _after_event_request_callback(self, exc, permid):
+        """
+        <<Crawler-side>>
+        Called by the Crawler with the result of the request_callback
+        call in the handle_crawler_reply method.
+        """
+        if not exc:
+            if DEBUG: print >>sys.stderr, "videoplaybackcrawler: request send to", show_permid_short(permid)
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), " INFO REQUEST", show_permid(permid), "\n")))
+            self._file.flush()
+
+    def handle_event_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
+        """
+        <<Crawler-side>>
+        Received a CRAWLER_VIDEOPLAYBACK_EVENT_QUERY reply.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param channel_data Data associated with the request
+        @param error The error value. 0 indicates success.
+        @param message The message payload
+        @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
+        """
+        if error:
+            if DEBUG:
+                print >> sys.stderr, "videoplaybackcrawler: handle_crawler_reply", error, message
+
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  EVENT REPLY", show_permid(permid), str(error), str(channel_data), message, "\n")))
+            self._file.flush()
+
+        elif selversion >= OLPROTO_VER_TENTH:
+            # Overlay version 10 sends the reply pickled and zipped
+            if DEBUG:
+                print >> sys.stderr, "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), len(message), "bytes zipped"
+
+            info = cPickle.loads(zlib.decompress(message))
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  EVENT REPLY", show_permid(permid), str(error), str(channel_data), str(info), "\n")))
+            self._file.flush()
+            
+        elif selversion >= OLPROTO_VER_EIGHTH:
+            if DEBUG:
+                print >> sys.stderr, "videoplaybackcrawler: handle_crawler_reply", show_permid_short(permid), cPickle.loads(message)
+
+            info = cPickle.loads(message)
+            self._file.write("; ".join((strftime("%Y/%m/%d %H:%M:%S"), "  EVENT REPLY", show_permid(permid), str(error), str(channel_data), str(info), "\n")))
+            self._file.flush()
+
+    def handle_event_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
+        """
+        <<Peer-side>>
+        Received a CRAWLER_VIDEOPLAYBACK_EVENT_QUERY request.
+        @param permid The Crawler permid
+        @param selversion The overlay protocol version
+        @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
+        @param message The message payload
+        @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
+        """
+        if DEBUG:
+            print >> sys.stderr, "videoplaybackcrawler: handle_event_crawler_request", show_permid_short(permid), message
+
+        # execute the sql
+        try:
+            cursor = self._event_db._db.execute_read(message)
+
+        except Exception, e:
+            reply_callback(str(e), error=1)
+        else:
+            if cursor:
+                reply_callback(zlib.compress(cPickle.dumps(list(cursor), 2), 9))
+            else:
+                reply_callback("error", error=2)
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/__init__.py b/instrumentation/next-share/BaseLib/Core/Statistics/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/crawler.txt b/instrumentation/next-share/BaseLib/Core/Statistics/crawler.txt
new file mode 100644 (file)
index 0000000..fec1ba1
--- /dev/null
@@ -0,0 +1,31 @@
+#
+#    Anonymous performance data gathering
+#
+# For improvements to our algorithms the scientists behind this
+# software need to have some insight how the P2P network operates.
+# The PermIDs listed in this file have some access to internal P2P
+# statistics when conducting a network crawl.
+# Collected data will never be shared with third parties or used
+# for non-scientific purposes.
+# 
+# Please delete the PermIDs to disable this feature.
+#
+# permid
+# lucia's old crawler
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKa2aWZv65UoFv0OR8BbVSnlmTPrYKcwwpGHEhK3AO2PpxiGlv/Y2mTP2kg+VXLaBBmfpdYWPA4eSdpq
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe6fNHWhKsReFj8/RIN6rBHWRzT4VkLddvhJZ5jmAQf5c7ZmqkdFQ/F21DKbC8V1Otmf6YO00ufe5D/o
+
+# General crawler PermIDs: DO NOT CHANGE OR ADD WITHOUT CONSULTION!
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOydlMAfRpmhT+jKr0gI8EanNLyt+Y/FEFcjTAoFAKCmNGMGrBl22ZICZBi+oPo0p6FpWECrf2oGg2WM
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK1cQH+R2B6oOPNgCcgiAruKlWAYZGzryZm6P0B3AMzocJszITiPPIsGujeg0saYZ6+VmzuncOCvVOWY
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPKqwAWYmpi3yjhnQTV1kOHU3y8gbNVyFGbAJaQMAAQjDYrSOHJTeKIAaYZFieGU6K8FnmJKlC4qLHxh
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMOfw9qZb/9Eqqy+75FWQGOi8vAkt7P32S+EEjVbAN67PY2fTjHNdFlZlhjqotTzJdYc1299OWCV3Nf+
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2CbiMwdGFLFQK93Je7H1a+Gi2QWV8B9n+Fwdq6AdAH04s1unhfTEP6cw1UlAdg4rZEY27GsINGsmD+
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABZzmfFlN7PryBasdECMITSm8XJEQ4WU2Te99YeqARS2i2aLDxPYhFTOfBuYN4MrFLwpDxmRm7Gvdp2m
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbR4tcEbSSikh7oULmXjpl5tYKdKvR3Qn1UH913lAW2GK0k2bF8hO7RIdu971gZpgNUew33kiWE/IREP
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXvNe65EBsnBAy/s4dp1kJDa9KXnfTHAOO8OADt+Abm83AAXdeeTwyBboyioaMMlIuUyS/9GwXay2ZLA
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHXzQ+9sH0II55c3TfpFz+LZwqNpHCOHYq0iXkmFALZKYSNA3/WvyncKCh9mbpWUtbusf06/HYhHHxUg
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFuyErJwV2MBqhjLbjXA0D5PkvY1O9thUbx4QB3CAQOxYlZUtgUP09mc8K+uEuoHzOKdN2h4KoB/G8Ae
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/tribler_friendship_stats_sdb.sql b/instrumentation/next-share/BaseLib/Core/Statistics/tribler_friendship_stats_sdb.sql
new file mode 100644 (file)
index 0000000..f215540
--- /dev/null
@@ -0,0 +1,38 @@
+-- Tribler Friendship Statistics Database\r
+\r
+BEGIN TRANSACTION create_table;\r
+\r
+----------------------------------------\r
+\r
+CREATE TABLE FriendshipStatistics (\r
+  source_permid        text NOT NULL,\r
+  target_permid        text NOT NULL,\r
+  isForwarder          integer DEFAULT 0,\r
+  request_time         numeric,\r
+  response_time        numeric,\r
+  no_of_attempts       integer DEFAULT 0,\r
+  no_of_helpers                   integer DEFAULT 0,\r
+  modified_on             numeric,\r
+  crawled_permid       text NOT NULL DEFAULT client\r
+);\r
+\r
+----------------------------------------\r
+\r
+CREATE TABLE MyInfo (\r
+  entry  PRIMARY KEY,\r
+  value  text\r
+);\r
+\r
+----------------------------------------\r
+\r
+COMMIT TRANSACTION create_table;\r
+\r
+----------------------------------------\r
+\r
+BEGIN TRANSACTION init_values;\r
+\r
+-- Version 1: Initial version, published in Tribler 4.5.0\r
+-- Version 2: Added crawled_permid to FriendshipStatistics table.\r
+INSERT INTO MyInfo VALUES ('version', 2);\r
+\r
+COMMIT TRANSACTION init_values;
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/tribler_seedingstats_sdb.sql b/instrumentation/next-share/BaseLib/Core/Statistics/tribler_seedingstats_sdb.sql
new file mode 100644 (file)
index 0000000..b2bad53
--- /dev/null
@@ -0,0 +1,41 @@
+-- Tribler Seeding Statistics Database\r
+\r
+BEGIN TRANSACTION create_table;\r
+
+----------------------------------------
+
+CREATE TABLE SeedingStats (\r
+  timestamp    real,
+  permID               text,\r
+  info_hash            text,\r
+  seeding_time real,
+  reputation   real,\r
+  crawled              integer\r
+);
+\r
+----------------------------------------\r
+\r
+CREATE TABLE SeedingStatsSettings (\r
+  version                              integer PRIMARY KEY,\r
+  crawling_interval    integer,\r
+  crawling_enabled             integer\r
+);\r
+\r
+----------------------------------------\r
+\r
+CREATE TABLE MyInfo (\r
+  entry  PRIMARY KEY,\r
+  value  text\r
+);\r
+\r
+----------------------------------------\r\r
+COMMIT TRANSACTION create_table;\r
+
+----------------------------------------\r
+
+BEGIN TRANSACTION init_values;\r
+\r
+INSERT INTO MyInfo VALUES ('version', 1);\r
+INSERT INTO SeedingStatsSettings VALUES (1, 1800, 1);\r
+\r
+COMMIT TRANSACTION init_values;
diff --git a/instrumentation/next-share/BaseLib/Core/Statistics/tribler_videoplayback_stats.sql b/instrumentation/next-share/BaseLib/Core/Statistics/tribler_videoplayback_stats.sql
new file mode 100644 (file)
index 0000000..869b9bf
--- /dev/null
@@ -0,0 +1,35 @@
+-- Tribler Video Playback Statistics Database
+
+BEGIN TRANSACTION create_table;
+
+----------------------------------------
+
+CREATE TABLE playback_event (
+  key                   text NOT NULL,
+  timestamp             real NOT NULL,
+  event                 text NOT NULL
+);  
+
+CREATE INDEX playback_event_idx 
+  ON playback_event (key, timestamp);
+
+----------------------------------------
+
+CREATE TABLE MyInfo (
+  entry  PRIMARY KEY,
+  value  text
+);
+
+----------------------------------------
+
+COMMIT TRANSACTION create_table;
+
+----------------------------------------
+
+BEGIN TRANSACTION init_values;
+
+-- Version 1: Initial version, published in Tribler 5.0.0
+-- Version 2: Simplified the database. Now everything is an event. Published in Tribler 5.1.0
+INSERT INTO MyInfo VALUES ('version', 2);
+
+COMMIT TRANSACTION init_values;
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/Languages.py b/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/Languages.py
new file mode 100644 (file)
index 0000000..1d7a025
--- /dev/null
@@ -0,0 +1,219 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+import csv
+import codecs
+from BaseLib.Core.Utilities.utilities import binaryStringToUint, uintToBinaryString
+
+MAX_SUPPORTED_LANGS = 32
+
+DEFAULT_LANG_CONF_FILE = "res/subs_languages.csv"
+
+
+def _loadLanguages(langFilePath):
+    """
+    Read a list of languages from a csv file
+    
+    Reads a list of language codes and the relative language
+    description from a csv text file. On each line of the file
+    there must be a couple of ISO 693-2 formatted language code
+    'code' and the textual description for the language.
+    e.g. ita, Italian
+    """
+    languages = {}
+    with codecs.open(langFilePath, "r","utf-8") as csvfile:
+        csvreader = csv.reader(csvfile)
+        for row in csvreader:
+            # Must be exactly two entries code, description
+            if len(row) != 2 :
+                raise ValueError("Erroneous format in csv")
+            # Only check if the code is a three character code, not
+            # if it is really a valid ISO 639-2 Cod
+            if len(row[0]) != 3 :
+                raise ValueError("Lang codes must be 3 characters length")
+                
+            languages[row[0]] = row[1]
+    
+    return languages
+
+_languages = {
+    'ara':'Arabic',
+    'ben':'Bengali',
+    'ces':'Czech',
+    'dan':'Danish',
+    'deu':'German',
+    'ell':'Greek',
+    'eng':'English',
+    'fas':'Persian',
+    'fin':'Finnish',
+    'fra':'French',
+    'hin':'Hindi',
+    'hrv':'Croatian',
+    'hun':'Hungarian',
+    'ita':'Italian',
+    'jav':'Javanese',
+    'jpn':'Japanese',
+    'kor':'Korean',
+    'lit':'Latvia',
+    'msa':'Malay',
+    'nld':'Dutch',
+    'pan':'Panjabi',
+    'pol':'Polish',
+    'por':'Portuguese',
+    'ron':'Romanian',
+    'rus':'Russian',
+    'spa':'Spanish',
+    'srp':'Serbian',
+    'swe':'Swedish',
+    'tur':'Turkish',
+    'ukr':'Ukranian',
+    'vie':'Vietnamese',
+    'zho':'Chinese'
+}
+
+
+class Languages(object):
+    '''
+    Performs the translation between supported languages and bitstrings.
+    '''
+            
+    def __init__(self, lang_dict=_languages):
+        '''
+        Constructor
+        '''
+        
+        # Contains paris of the type { lang_code : Long language Name}
+        # its values are read from a file
+        self.supportedLanguages = {}
+        
+        # for each language code defined in supportedLanguages
+        # maps contains the bit string representing that language
+        self.langMappings = {}
+        
+        self.supportedLanguages = lang_dict
+        
+        self._supportedCodes = frozenset(self.supportedLanguages.keys())
+        
+        if len(self.supportedLanguages) > MAX_SUPPORTED_LANGS:
+            raise ValueError("Maximum number of supported languages is %d" %
+                             MAX_SUPPORTED_LANGS)
+            
+        self._initMappings()                
+
+
+    def _initMappings(self):
+        """
+        Assigns bitmasks to languages.
+        
+        Assigns bitmasks to language codes. Language codes are sorted
+        lexicographically and the first bitmask (i.e. 0x1) is given to
+        the first code in this order.
+        """
+        counter = 0
+        sortedKeys = sorted(self.supportedLanguages.keys())
+        for code in sortedKeys:
+            self.langMappings[code] = 1 << counter
+            counter += 1
+        
+        
+            
+    def getMaskLength(self):
+        """
+        Returns the length of the languages bit mask.
+        
+        Returns the necessary length to contain the language bit mask
+        for the languages represented by this instance.
+        It is always a power of two, even if less bits would actually be
+        required
+        """
+        
+        # always returnes the maximum number of supported languages
+        return MAX_SUPPORTED_LANGS
+    
+
+    
+    def maskToLangCodes(self, mask):
+        """
+        Given a int bitmask returns the list of languages it represents.
+        
+        Translates the bitmask passed in as parameters into a list
+        of language codes that represent that bitmask.
+        
+        @param mask: a bitmask representing languages (integer)
+        @return: a list of language codes string
+        @precondition: mask < 2**32 -1
+
+        """
+        assert mask < 2**32 , "Mask mast be a 32 bit value"
+        assert mask >=0 , "Mask must  be positive"
+        codeslist = []
+        
+        for code, cur_mask in self.langMappings.iteritems():
+            if mask & cur_mask != 0 :
+                codeslist.append(code)
+        
+        return sorted(codeslist)
+    
+    
+    
+    def langCodesToMask(self, codes):
+        """
+        Given a list of languages returns the bitmask representing it.
+        
+        Translates a list of language codes in a bitmask representing it.
+        Converse operation of masktoLangCodes.
+        
+        @param codes: a list of language codes. That code must be one of the
+                      keys of self.supportedLanguages.keys()
+        """
+        
+        validCodes = self.supportedLanguages.keys()
+        
+        #mask is the integer value of the bitfield
+        mask = 0
+        for lang in codes:
+            #precondition: every entry in codes is contained in 
+            #self.supportedLanguages.keys
+            if lang not in validCodes:
+                raise ValueError(lang + " is not a supported language code")
+            mask = mask | self.langMappings[lang]
+        
+        return mask
+    
+    
+    def isLangCodeSupported(self, langCode):
+        """
+        Checks whether a given language code is supported.
+        
+        Returns true if the language code is one of the supported languages
+        for subtitles
+        """
+        return langCode in self._supportedCodes
+    
+    def isLangListSupported(self, listOfLangCodes):
+        """
+        Checks whether a list of language codes is fully supported.
+        
+        Returns true only if every entry in the list passed in as parameter
+        is supported as a language for subtitles.
+        """
+        givenCodes = set(listOfLangCodes)
+        return givenCodes & self._supportedCodes == givenCodes
+    
+    
+    
+            
+
+class LanguagesProvider(object):
+    
+    _langInstance = None
+        
+    @staticmethod
+    def getLanguagesInstance():
+        if LanguagesProvider._langInstance is None:
+            #lang_dict = _loadLanguages(DEFAULT_LANG_CONF_FILE)
+            LanguagesProvider._langInstance = Languages(_languages)    
+        return LanguagesProvider._langInstance
+    
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/MetadataDTO.py b/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/MetadataDTO.py
new file mode 100644 (file)
index 0000000..bce956f
--- /dev/null
@@ -0,0 +1,300 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import \
+    LanguagesProvider
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import \
+    SerializationException
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from BaseLib.Core.Overlay.permid import sign_data, verify_data
+from BaseLib.Core.Utilities.utilities import isValidInfohash, isValidPermid, \
+    uintToBinaryString, binaryStringToUint
+from math import floor
+import sys
+import time
+
+DEBUG = False
+
+_languagesUtil = LanguagesProvider.getLanguagesInstance()
+
+class MetadataDTO(object):
+    '''
+    Metdata DataTransferObject
+    '''
+
+
+    def __init__(self, publisher,infohash,timestamp = None,
+                 description=u"", subtitles=None,signature=None):
+        """
+        Create a MetataDTO instance.
+        
+        publisher and infohash are mandatory to be not null
+        @param publisher: the permid  of the owner of the 
+                          channel this instance refers to
+        @param infohash: the infohash of the item in the channel this instance
+                         refers to 
+        @param timestamp: the timestamp of the creation of this metadata
+                          instance. This can be later reset with 
+                          resetTimestamp()
+        @param description: an optional utf-8 string description for the item. 
+                            Defaults to an empty string
+        @param subtitles: a dictionary of type {langCode : SubtitleInfo}
+        @param signature: signature of the packed version of this metadataDTO.
+                          Defaults to None. It can be later signed with sign()
+        """
+        
+        assert publisher is not None
+        assert infohash is not None
+        assert isValidPermid(publisher)
+        assert isValidInfohash(infohash)
+        
+        #stringified permid of the owner of the channel
+        self.channel = publisher
+        
+        #stringified infohash (bin2str) of the torrent
+        self.infohash = infohash
+        if timestamp is not None:
+            timestring = int(floor(timestamp))
+        else:
+            timestring = int(floor(time.time()))
+        
+        #integer timestamp of the creation of this content
+        #(the content, not the MetadataDTO instance)
+        self.timestamp = timestring
+        
+        #utf-8 string description
+        if isinstance(description, str):
+            description = unicode(description, "utf-8")
+            
+        self.description = description
+        
+        if subtitles is None:
+            subtitles = {}
+        self._subtitles = subtitles
+        self.signature = signature
+        
+        
+    def resetTimestamp(self):
+        """
+        Sets the timestamp to the current time.
+        """
+        self.timestamp = int(floor(time.time()))
+    
+    def addSubtitle(self, subtitle):
+        '''
+        Adds a subtitle instance to the metadata dto.
+        
+        subtitle must be an instance of SubtitleInfo, and its language
+        field must be correctly set to an ISO-639-2 language code
+        (see Languages).
+        
+        @param subtitle: a SubtitleInfo instance
+        @precondition: subtitle.lang is not None
+        '''
+        assert isinstance(subtitle, SubtitleInfo)
+        assert subtitle.lang is not None
+        
+        self._subtitles[subtitle.lang] = subtitle
+    
+    def removeSubtitle(self, lang):
+        '''
+        Remove a subtitle instance from the dto.
+        
+        If the subtitles with the given language does not exist, it
+        does nothing.
+        
+        @param lang: a language code for the subtitle to be removed
+        '''
+        if lang in self._subtitles.keys():
+            del self._subtitles[lang]
+        
+    def getSubtitle(self,lang):
+        '''
+        Returns a SubtitleInfo instance for the given language if it exists.
+        
+        @param lang: an ISO-639-2 3 characters language code
+        
+        @rtype: SubtitleInfo.SubtitleInfo
+        @return: a SubtitleInfo instance, or None
+        '''
+        if lang not in self._subtitles.keys():
+            return None
+        else:
+            return self._subtitles[lang]
+    
+    def getAllSubtitles(self):
+        '''
+        Returns a copy of the subtitles for this dto.
+        
+        Notice that modifying this copy does not affect the languages in the
+        metadata dto
+        '''
+        return self._subtitles.copy()
+        
+        
+        
+    def sign(self,keypair):
+        """
+        Signs the packed version of this instance.
+        
+        See _packData to see what packed version means.
+        
+        @param keypair: an ec keypair that will be used to create
+                        the signature
+        """ 
+        bencoding = self._packData()
+        signature = sign_data(bencoding, keypair)
+        self.signature = signature
+    
+    def verifySignature(self):
+        """
+        Verifies the signature field of this instance.
+        
+        The signature is verified agains the packed version of this
+        instance. See _packData
+        
+        """
+        assert self.signature is not None
+        toVerify = self._packData()
+        binaryPermId = self.channel
+        return verify_data(toVerify, binaryPermId, self.signature)
+        
+      
+        
+    def _packData(self):
+        """
+        Creates a bencode binary representation of this metadata instance.
+        
+        This representation is the one that is sent with ChannelCast messages.
+        """
+        if self.description is not None:
+            assert isinstance(self.description, unicode)      
+        if self.description is None:
+            self.description = u""
+        
+        
+        
+        bitmask, checksums = self._getSubtitlesMaskAndChecksums()
+        
+        binaryMask = uintToBinaryString(bitmask)
+        # The signature is taken over the bencoding of
+        # binary representations of (channel,infohash,description,timestamp,bitmask)
+        # that is the same message that is sent with channelcast
+        tosign = (self.channel, 
+                  self.infohash, 
+                  self.description.encode("utf-8"),
+                  self.timestamp,
+                  binaryMask,
+                  checksums )
+    
+        bencoding = bencode(tosign)
+        return bencoding
+    
+    def serialize(self):
+        if self.signature is None:
+            raise SerializationException("The content must be signed")
+        pack = bdecode(self._packData())
+        pack.append(self.signature)
+        
+        return pack
+    
+
+        
+        
+        
+    
+    
+    def _getSubtitlesMaskAndChecksums(self):
+        '''
+        computes bitmask and checksums for subtitles.
+        
+        Computes the bitmask for available subtitles and produces also a tuple
+        containing the checksums for the subtitles that are in the bitmask.
+        The checksums are in the same order as the bits in the bitmask.
+        '''
+        
+        languagesList = []
+        checksumsList = []
+        
+        #cycling by sorted keys
+        sortedKeys = sorted(self._subtitles.keys())
+        
+        for key in sortedKeys:
+            sub = self._subtitles[key]
+            assert sub.lang is not None
+            assert sub.lang == key
+            
+            if sub.checksum is None:
+                if sub.subtitleExists():
+                    sub.computueCheksum()
+                else :
+                    if DEBUG:
+                        print >> sys.stderr, "Warning: Cannot get checksum for " + sub.lang \
+                                +" subtitle. Skipping it."
+                    continue
+            languagesList.append(sub.lang)
+            checksumsList.append(sub.checksum)
+            
+            
+        bitmask = _languagesUtil.langCodesToMask(languagesList)
+        checksums = tuple(checksumsList)
+        
+        return bitmask, checksums
+    
+    def __eq__(self, other):
+        if self is other:
+            return True
+        return self.channel == other.channel and \
+               self.infohash == other.infohash and \
+               self.description == other.description and \
+               self.timestamp == other.timestamp and \
+               self.getAllSubtitles() == other.getAllSubtitles()
+    
+    def __ne__(self, other):
+        return not self.__eq__(other)
+    
+    
+#-- Outside the class
+
+def deserialize(packed):
+    assert packed is not None
+        
+    message = packed
+    if(len(message) != 7):
+        raise SerializationException("Wrong number of fields in metadata")
+        
+    channel = message[0]
+    infohash = message[1]
+    description = message[2].decode("utf-8")
+    timestamp = message[3]
+    binarybitmask = message[4]
+    bitmask = binaryStringToUint(binarybitmask)
+    listOfChecksums = message[5]
+    signature = message[6]
+    subtitles = _createSubtitlesDict(bitmask,listOfChecksums)
+    
+    dto = MetadataDTO(channel, infohash, timestamp, description, subtitles, signature)
+    if not dto.verifySignature():
+        raise SerializationException("Invalid Signature!")
+    return dto
+    
+                                      
+
+def _createSubtitlesDict(bitmask, listOfChecksums):
+    langList = _languagesUtil.maskToLangCodes(bitmask)
+    if len(langList) != len(listOfChecksums):
+        raise SerializationException("Unexpected num of checksums")
+     
+    subtitles = {}
+    for i in range(0, len(langList)):
+        sub = SubtitleInfo(langList[i])
+        sub.checksum = listOfChecksums[i]
+        subtitles[langList[i]] = sub
+    return subtitles
+     
+     
+     
+     
+    
+        
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/MetadataExceptions.py b/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/MetadataExceptions.py
new file mode 100644 (file)
index 0000000..afc50ff
--- /dev/null
@@ -0,0 +1,72 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+
+class RichMetadataException(Exception):
+    '''
+    General exception of the RichMetadata subsystem
+    '''
+
+
+    def __init__(self, value):
+        self.value = value
+        
+    def __str__(self):
+        return repr(self.value)
+
+class SerializationException(RichMetadataException):
+    '''
+    Thrown when some problem occurs when trying to transform a Metadata
+    object into the external representation
+    '''
+
+
+    def __init__(self, value):
+        self.value = value
+        
+    def __str__(self):
+        return repr(self.value)
+
+class SignatureException(RichMetadataException):
+    '''
+    Thrown when some problem occurs concerning metadata signature.
+    '''
+    def __init__(self, value):
+        self.value = value
+        
+    def __str__(self):
+        return repr(self.value)
+
+class MetadataDBException(RichMetadataException):
+    '''
+    Thrown when something  violated Metadata and Subtitles DB constraints.
+    '''
+    def __init__(self, value):
+        self.value = value
+        
+    def __str__(self):
+        return repr(self.value)
+    
+class SubtitleMsgHandlerException(RichMetadataException):
+    """
+    Thrown when a problem is encountered in sending o receiving a subtitle
+    message.
+    """
+    def __init__(self, value):
+        self.value = value
+        
+    def __str__(self):
+        return repr(self.value)
+    
+class DiskManagerException(RichMetadataException):
+    '''
+    Thrown by the Disk Manager when problems dealing with disk reading
+    and writings occur
+    '''
+    def __init__(self, value):
+        self.value = value
+        
+    def __str__(self):
+        return repr(self.value)
+
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/SubtitleInfo.py b/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/SubtitleInfo.py
new file mode 100644 (file)
index 0000000..7b2cf36
--- /dev/null
@@ -0,0 +1,226 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import \
+    LanguagesProvider
+import base64
+import codecs
+import hashlib
+import os.path
+import sys
+
+DEBUG = False
+
+class SubtitleInfo(object):
+    '''
+    Represents a subtitles in a given language.
+    
+    It contains three fields, namely lang (an ISO 693-2 code), path that is
+    the path into the filesystem to the subtitles file, and checksum that is
+    a base64 representation of the sha1 checksum for that file.
+    It also manages the computation and verification of a sha1 checksum for 
+    a the subtitle.
+        
+    Notice that the path property can be None. This means that tha actual
+    subtitle hasn't been collected and is not available on the local
+    filesystem, In that case the checksum field will be none as well.
+    
+    Also notice that this object is meant to be used as a DTO. Simply changing
+    property in this object won't by themself affect values contained in the
+    Database
+    
+    SYNCHRONIZATION: This objects act only as copies of the data in the DB.
+    If the instance is nevere passed by between different threads
+    no synchronization is needed. 
+    '''
+
+    
+
+    def __init__(self, lang, path=None, checksum=None):
+        """
+        Create a subtitle instance.
+        
+        @param lang: an ISO 639-2 language code. Notice that not every language
+                     code described by the standard is supported, but only
+                     a small subset. See the Languages module
+        @param path: a file system path to the subtitles file
+        @param checksum: a sha1 checksum of the contents 
+                         of the subitles file
+        """
+        self._languages = LanguagesProvider.getLanguagesInstance()
+        if lang not in self._languages.supportedLanguages.keys():
+            raise ValueError("Language" + lang + " not supported")
+        
+        
+        #ISO 639-2 code. See Languages for supported languages
+        self._lang = lang #final property
+        #A string representing the path in the filesystme for this subtitle
+        self._path = path
+        #sha1 checksum
+        self._checksum = checksum
+        
+            
+    def getLang(self):
+        '''
+        Returns the language of the subtitle as a three characters code
+
+        @rtype: str
+        @return: a three characters ISO 639-2 code
+        '''
+        return self._lang
+
+    lang = property(getLang) # "final" property
+        
+    def setPath(self, path):
+        '''
+        Sets the local path for the subtitle. 
+
+        Calling this method does not change what is stored in the DB. You will
+        have to update that data separately (see L{MetadataDBHandler})
+
+        @type path: str
+        @param path: the local path were the subtitle is stored
+        '''
+        self._path = path
+
+            
+    def getPath(self):
+        '''
+        Get the path on the local host for the subtitle file, if available.
+
+        @rtype: str
+        @return: the local path if the subtitle is locally available. Otherwise
+            None.
+        '''
+        return self._path
+
+    
+    path = property(getPath, setPath)
+            
+    def setChecksum(self, checksum):
+        '''
+        Set the checksum for this subtitle instance. 
+
+        ATTENTION: This method should be never called, but instead a the
+        L{computeChecksum} method should be called instead.
+
+        @type checksum: str
+        @param checksum: a 160bit sha1 checksum of the subtitle
+        '''
+        self._checksum = checksum
+
+            
+    def getChecksum(self):
+        '''
+        Returns the SHA-1 checksum of the subtitle.
+
+        @rtype: str
+        @return: a 20byte string representing the SHA-1 checksum of the
+            subtitle
+        '''
+        return self._checksum
+
+        
+    checksum = property(getChecksum, setChecksum)
+        
+    def subtitleExists(self):
+        """
+        Checks wheter a subtitle exist in its specified path.
+        
+        @return: True if self.path is pointing to a local existing file.
+            Otherwise false
+        """
+
+        if self.path is None:
+            return False
+        return os.path.isfile(self.path)
+
+    
+    def computeChecksum(self):
+        """
+        Computes the checksum of the file containing the subtitles
+        and sets its corresponding property.
+
+        @precondition: self.subtitleExists()
+        @postcondition: self.checksum is not None
+        """
+        assert self.subtitleExists()
+
+        self.checksum = self._doComputeChecksum()
+        
+    def _doComputeChecksum(self):
+        """
+        Computes the checksum of the file containing the subtitles
+        
+        @precondition: self.subtitleExists()
+        """
+        try:
+            with codecs.open(self.path, "rb", "utf-8", "replace") as subFile:
+                content = subFile.read()
+      
+            hasher = hashlib.sha1()
+            hasher.update(content.encode('utf-8','replace'))
+            
+            return hasher.digest()
+        
+        except IOError:
+            print >> sys.stderr, "Warning: Unable to open " + self.path + " for reading"
+        
+    
+    def verifyChecksum(self):
+        """
+        Verifies the checksum of the file containing the subtitles.
+        
+        Computes the checksum of the file pointed by self.path
+        and checks whether it is equal to the one in self.checksum
+        
+        @precondition: self.subtitleExists()
+        @precondition: self.checksum is not None
+
+        @rtype: boolean
+        @return: True if the verification is ok.
+
+        @raises AssertionError: if one of the preconditions does not hold
+        """
+
+        assert self.subtitleExists(), "Cannot compute checksum: subtitle file not found"
+        assert self.checksum is not None, "Cannot verify checksum: no checksum to compare with"
+        
+        computed = self._doComputeChecksum()
+        return computed == self.checksum
+
+    
+    def __str__(self):
+
+        if self.path is not None:
+            path = self.path
+        else:
+            path = "None"
+        return "subtitle: [lang=" + self.lang +"; path=" + path \
+                + "; sha1=" + base64.encodestring(self.checksum).rstrip() + "]"
+
+            
+    def __eq__(self,other):
+        '''
+        Test instances of SubtitleInfo for equality.
+        
+        Two subtitle instances are considered equal if they have the same
+        language and the same file checksum
+        '''
+
+        if self is other:
+            return True
+        return self.lang == other.lang and self.checksum == other.checksum
+                #and self.path == other.path
+
+                
+        
+    def __ne__(self,other):
+        return not self.__eq__(other)
+        
+        
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/__init__.py b/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/__init__.py
new file mode 100644 (file)
index 0000000..bdef2ba
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/res/subs_languages.csv b/instrumentation/next-share/BaseLib/Core/Subtitles/MetadataDomainObjects/res/subs_languages.csv
new file mode 100644 (file)
index 0000000..2261bc6
--- /dev/null
@@ -0,0 +1,32 @@
+ara,Arabic 
+ben,Bengali 
+ces,Czech
+dan,Danish
+deu,German
+ell,Greek
+eng,English
+fas,Persian
+fin,Finnish
+fra,French
+hin,Hindi 
+hrv,Croatian
+hun,Hungarian
+ita,Italian
+jav,Javanese
+jpn,Japanese
+kor,Korean
+lit,Latvia 
+msa,Malay
+nld,Dutch
+pan,Panjabi
+pol,Polish
+por,Portuguese
+ron,Romanian
+rus,Russian
+spa,Spanish
+srp,Serbian
+swe,Swedish
+tur,Turkish
+ukr,Ukranian
+vie,Vietnamese
+zho,Chinese
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/PeerHaveManager.py b/instrumentation/next-share/BaseLib/Core/Subtitles/PeerHaveManager.py
new file mode 100644 (file)
index 0000000..6bddd0a
--- /dev/null
@@ -0,0 +1,204 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+import time
+from BaseLib.Core.Subtitles.MetadataDomainObjects import Languages
+import threading
+
+
+
+PEERS_RESULT_LIMIT = 5
+HAVE_VALIDITY_TIME = 7*86400 # one week (too big? to small?)
+
+# how often (in seconds) old have messages will be removed from the database
+# -1 means that they will be cleaned up only at Tribler's startup
+CLEANUP_PERIOD = -1
+
+class PeersHaveManager(object):
+    '''
+    Manages the insertion, retrieval and manipulation of 
+    subtitle have messages from other peers.
+    
+    The public interface consists only of the two methods:
+    
+    + getPeersHaving(channel, infohash, bitmask)
+    + newHaveReceived(channel, infohash, peer_id, havemask)
+    
+    See method descriptions for further details
+    '''
+    
+    __single = None
+    _singletonLock = threading.RLock()
+    def __init__(self):
+        
+        with PeersHaveManager._singletonLock:
+            #Singleton pattern not enforced: this makes testing easier
+            PeersHaveManager.__single = self
+            
+        self._haveDb = None
+        self._olBridge = None
+        self._cleanupPeriod = CLEANUP_PERIOD
+        self._haveValidityTime = HAVE_VALIDITY_TIME
+        self._langsUtility = Languages.LanguagesProvider.getLanguagesInstance()
+        self._firstCleanedUp = False
+        
+        self._registered = False
+        
+    @staticmethod
+    def getInstance():
+        with PeersHaveManager._singletonLock:
+            if PeersHaveManager.__single == None:
+                PeersHaveManager()
+        
+        return PeersHaveManager.__single
+        
+    def register(self, haveDb, olBridge):
+        '''
+        Inject dependencies
+        
+        @type haveDb: BaseLib.Core.CacheDB.MetadataDBHandler
+        @type olBridge: OverlayBridge
+
+        '''
+        assert haveDb is not None
+        assert olBridge is not None
+        
+        self._haveDb = haveDb
+        self._olBridge = olBridge
+        
+        self._registered = True
+        
+    def isRegistered(self):
+        return self._registered
+    
+    
+    def getPeersHaving(self, channel, infohash, bitmask, limit=PEERS_RESULT_LIMIT):
+        '''
+        Returns a list of permids of peers having all the subtitles for
+        (channel, infohash) specified in the bitmask
+        
+        Notice that if there exist a peer that has only some of the subtitles
+        specified in the bitmask, that peer will not be included
+        in the returned list.
+        
+        This implementation returns the peers sorted by most recently received
+        have message first.
+        
+        @type channel: str
+        @param channel: binary channel_id
+        
+        @type infohash: str
+        @param infohash: binary infohash
+        
+        @type bitmask: int
+        @param bitmask: a 32 bit bitmask specifieng the desired subtitles languages
+                        for returned peers to have.
+                        
+        @type limit: int
+        @param limit: an upper bound on the size of the returned list. Notice
+                      that anyway the returned list may be smaller then limit
+                      (Default 5)
+                      
+        @rtype: list
+        @return: a list of binary permids of peers that have all the subitles
+                 specified by the bitmask. If there is no suitable entry the returned
+                 list will be empty
+        '''
+        
+        # results are already ordered by timestamp due the current
+        # MetadataDBHandler implementation
+        peersTuples = self._haveDb.getHaveEntries(channel, infohash)
+        peers_length = len(peersTuples)
+        length = peers_length if peers_length < limit else limit
+        
+        results = list()
+        
+        for i in range(length):
+            peer_id, havemask, timestamp = peersTuples[i]
+            if havemask & bitmask == bitmask:
+                results.append(peer_id)
+                
+        if len(results) == 0:
+            #if no results, and if the channel owner was not in the initial
+            #list, consider him always as a valid source
+            results.append(channel)
+                
+        return results
+        
+    
+    def newHaveReceived(self, channel, infohash, peer_id, havemask):
+        '''
+        Notify the PeerHaveManager that a new SUBTITLE HAVE announcement has been
+        received.
+        
+        @type channel: str
+        @param channel: binary channel_id 
+        
+        @type infohash: str
+        @param infohash: binary infohash
+        
+        @type peer_id: str
+        @param channel: binary permid of the peer that sent
+                        this havemask
+                        
+        @type havemask: int
+        @param havemask: integer bitmask representing which combination of subtitles
+                         peer_id has for the given (channel, infohash) pair
+        '''
+        
+        
+        timestamp = int(time.time())
+        self._haveDb.insertOrUpdateHave(channel, infohash, peer_id, havemask, timestamp)
+        
+    
+    def retrieveMyHaveMask(self, channel, infohash):
+        '''
+        Creates the havemask for locally available subtitles for channel,infohash
+        
+        @type channel: str
+        @param channel: a channelid to retrieve the local availability mask for (binary)
+        @type infohash: str
+        @param infohash: the infohash of the torrent to retrieve to local availability mask
+                        for (binary)
+        
+        @rtype: int
+        @return: a bitmask reprsenting wich subtitles languages are locally available
+                 for the given (channel, infohash) pair. If no one is available, or even
+                 if no rich metadata has been ever received for that pair, a zero bitmask
+                 will be returned. (i.e. this method should never thorow an exception if the
+                 passed parametrers are formally valid)
+        '''
+        
+        localSubtitlesDict = self._haveDb.getLocalSubtitles(channel, infohash)
+        
+        havemask = self._langsUtility.langCodesToMask(localSubtitlesDict.keys())
+        
+        return havemask
+    
+    def startupCleanup(self):
+        '''
+        Cleanup old entries in the have database.
+        
+        This method is meant to be called only one time in PeersManager instance lifetime,
+        i.e. at Tribler's startup. Successive calls will have no effect.
+        
+        If CLEANUP_PERIOD is set to a positive value, period cleanups actions will be
+        scheduled.
+        '''
+        if not self._firstCleanedUp:
+            self._firstCleanedUp = True
+            self._schedulePeriodicCleanup()
+        
+    def _schedulePeriodicCleanup(self):
+        
+        minimumAllowedTS = int(time.time()) - self._haveValidityTime
+        self._haveDb.cleanupOldHave(minimumAllowedTS)
+        
+        if self._cleanupPeriod > 0:
+            self._olBridge.add_task(self._schedulePeriodicCleanup, self._cleanupPeriod)
+            
+        
+
+
+        
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/RichMetadataInterceptor.py b/instrumentation/next-share/BaseLib/Core/Subtitles/RichMetadataInterceptor.py
new file mode 100644 (file)
index 0000000..a57d157
--- /dev/null
@@ -0,0 +1,304 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+import sys
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import deserialize
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import SerializationException,\
+    RichMetadataException
+from BaseLib.Core.Utilities.utilities import isValidPermid, bin2str,\
+    show_permid_short, uintToBinaryString, binaryStringToUint
+from copy import copy
+from BaseLib.Core.simpledefs import NTFY_RICH_METADATA, NTFY_UPDATE, NTFY_INSERT
+
+
+DEBUG = False
+
+
+class RichMetadataInterceptor(object):
+    
+  
+    
+    def __init__(self, metadataDbHandler, voteCastDBHandler, myPermId,
+                 subSupport=None, peerHaveManager = None, notifier = None):
+        '''
+        Builds an instance of RichMetadataInterceptor.
+        
+        @param metadataDbHandler: an registered instance of
+            L{MetadataDBHandler}
+        @param voteCastDBHandler: a registered instance of VoteCastDBHandler
+        @param myPermId: the PermId of the client.
+        @param subSupport: a registered instance of L{SubtitlesSupport}
+        @param peerHaveManager: an instance of L{PeerHaveManager}
+        @param notifier: an instance of Notifier
+        '''
+#        assert isinstance(metadataDbHandler, MetadataDBHandler), \
+#            "Invalid RichMetadata DB Handler"
+#        assert isinstance(voteCastDBHandler, VoteCastDBHandler), \
+#            "Invalid Votecast DB Handler"
+        #hack to make a fast test DELETE THIS CONDITION
+#        if subSupp != None:
+#            assert isinstance(subSupp, SubtitlesSupport)
+        assert isValidPermid(myPermId),  "Invalid Permid"
+            
+        self.rmdDb = metadataDbHandler
+        self.votecastDB = voteCastDBHandler
+        self.my_permid = myPermId
+        self.subSupport = subSupport
+        self.peerHaveManager = peerHaveManager
+        self.notifier = notifier
+
+    
+    def _splitChannelcastAndRichMetadataContents(self,enrichedChannelcastMessage):
+        '''
+        Takes a "enriched" channelcast message (protocol v.14 - the one with
+        the 'rich_metadata' field inside), and extracts the rich metadata info
+        from it
+        
+        @param enrichedChannelcastMessage: a channelcast message from protocol 
+                                           version 14
+                                           
+        @return: a list tuples like (MetadataDTO, haveMask) instances extracted from the message. or
+                 an empty list if nothing. Along with it there is a list
+                 of the size of each entry in the message that is used to 
+                 collect stats. if the announceStatsLog is disable this list
+                 will always be empty
+        '''
+        if not isinstance(enrichedChannelcastMessage, dict):
+            if DEBUG:
+                print >> sys.stderr, "Invalid channelcast message received"
+            return None
+        
+        rmdData = list()
+        
+        sizeList = list()
+        for signature in iter(enrichedChannelcastMessage):
+            msg = enrichedChannelcastMessage[signature]
+            
+            if 'rich_metadata' in msg.keys():
+                metadataEntry = msg['rich_metadata']
+                if metadataEntry is None \
+                    or not validMetadataEntry(metadataEntry):
+                    continue
+                else:
+                    channel_id = msg['publisher_id']
+                    infohash = msg['infohash']
+
+                    # rebuilding the serialized MetadataDTO structure
+                    # that was broken in self.addRichMetadataContent
+                    binary_havemask = metadataEntry.pop(-1)
+                    havemask = binaryStringToUint(binary_havemask)
+                    
+                    metadataEntry.insert(0,infohash)
+                    metadataEntry.insert(0,channel_id)
+                    try:
+                        curMetadataDTO = deserialize(metadataEntry)
+                    except SerializationException,e:
+                        if DEBUG:
+                            print >> sys.stderr, "Invalid metadata message content: %s" % e
+                        continue
+                    
+                    rmdData.append((curMetadataDTO,havemask))
+        
+        return rmdData, sizeList
+    
+    def handleRMetadata(self, sender_permid, channelCastMessage, fromQuery = False):
+        '''
+        Handles the reception of rich metadata.
+        
+        Called when an "erniched" channelCastMessage (v14) is received.
+        @param sender_permid: the PermId of the peer who sent the message
+        @param channelCastMessage: the received message
+        @return: None
+        '''
+        metadataDTOs, sizeList = \
+          self._splitChannelcastAndRichMetadataContents(channelCastMessage)
+          
+        if DEBUG:
+            print >> sys.stderr, "Handling rich metadata from %s..." % show_permid_short(sender_permid)
+        i=0
+        for md_and_have in metadataDTOs:
+            md = md_and_have[0]
+            havemask = md_and_have[1]
+            
+            vote = self.votecastDB.getVote(bin2str(md.channel), 
+                                       bin2str(self.my_permid))
+            
+            # the next if may seem useless, but since sizeList is defined only when
+            # logging is enabled for debug, I get an error without this conditional statement
+            # because the argument for the debug() call getsEvaluated before the logging
+            # system understands that debug is disabled
+            #if announceStatsLog.isEnabledFor(logging.INFO):
+            if DEBUG:
+                id = "RQ" if fromQuery else "R"
+                print >> sys.stderr, "%c, %s, %s, %s, %d, %d" % \
+                                       (id, md.channel, md.infohash, \
+                                        show_permid_short(sender_permid), md.timestamp,
+                                        sizeList[i])
+                #format "R|S (R: received - S: sent), channel, infohash, sender|destination,metadataCreationTimestamp"
+                # 30-06-2010: "RQ" as received from query
+                i += 1
+        
+            # check if the record belongs to a channel 
+            # who we have "reported spam" (negative vote)
+            if  vote == -1:
+                # if so, ignore the incoming record
+                continue
+            
+            isUpdate =self.rmdDb.insertMetadata(md)
+            
+            self.peerHaveManager.newHaveReceived(md.channel,md.infohash,sender_permid,havemask)
+            
+            if isUpdate is not None:
+                #retrieve the metadataDTO from the database in the case it is an update
+                md = self.rmdDb.getMetadata(md.channel,md.infohash)
+                self._notifyRichMetadata(md, isUpdate)
+            
+            # if I am a subscriber send immediately a GET_SUBS to the 
+            # sender
+            if vote == 2:
+                if DEBUG:
+                    print >> sys.stderr, "Subscribed to channel %s, trying to retrieve" \
+                         "all subtitle contents" % (show_permid_short(md.channel),)
+                
+                self._getAllSubtitles(md)
+
+    def _computeSize(self,msg):
+        import BaseLib.Core.BitTornado.bencode as bencode
+        bencoded = bencode.bencode(msg)
+        return len(bencoded)
+    
+    
+    def _notifyRichMetadata(self, metadataDTO, isUpdate):
+        if self.notifier is not None:
+            eventType = NTFY_UPDATE if isUpdate else NTFY_INSERT
+            self.notifier.notify(NTFY_RICH_METADATA, eventType, (metadataDTO.channel, metadataDTO.infohash))
+            
+    
+    def _getAllSubtitles(self, md):
+        
+        subtitles = md.getAllSubtitles()
+        
+        try:
+            self.subSupport.retrieveMultipleSubtitleContents(md.channel,md.infohash,
+                                                             subtitles.values())
+        except RichMetadataException,e:
+            print >> sys.stderr, "Warning: Retrievement of all subtitles failed: " + str(e)
+        
+    
+    def addRichMetadataContent(self,channelCastMessage, destPermid = None, fromQuery = False):
+        '''
+        Takes plain channelcast message (from OLProto v.13) and adds to it
+        a 'rich_metadata' field.
+        
+        @param channelCastMessage: the old channelcast message in the format of
+                                   protocol v13
+        @param destPermid: the destination of the message. If not None it is used
+                            for logging purposes only. If None, nothing bad happens.
+        @return: the "enriched" channelcast message
+        '''
+        if not len(channelCastMessage) > 0:
+            if DEBUG:
+                print >> sys.stderr, "no entries to enrich with rmd"
+            return channelCastMessage
+        
+        if DEBUG:
+            if fromQuery: 
+                print >> sys.stderr, "Intercepted a channelcast message as answer to a query"
+            else:
+                print >> sys.stderr, "Intercepted a channelcast message as normal channelcast"
+        #otherwise I'm modifying the old one (even if there's nothing bad
+        #it's not good for the caller to see its parameters changed :)
+        newMessage = dict()
+            
+        # a channelcast message is made up of a dictionary of entries
+        # keyed the signature. Every value in the dictionary is itself
+        # a dictionary with the item informatino
+        for key in iter(channelCastMessage):
+            entryContent = copy(channelCastMessage[key])
+            newMessage[key] = entryContent
+            
+            channel_id = entryContent['publisher_id']
+            infohash = entryContent['infohash']
+            #not clean but the fastest way :(
+            # TODO: make something more elegant
+            metadataDTO = self.rmdDb.getMetadata(channel_id, infohash)
+            if metadataDTO is not None:
+                try:
+                    if DEBUG:
+                        print >> sys.stderr, "Enriching a channelcast message with subtitle contents"
+                    metadataPack = metadataDTO.serialize()
+                    
+                    # I can remove from the metadata pack the infohash, and channelId
+                    # since they are already in channelcast and they would be redundant
+                    metadataPack.pop(0)
+                    metadataPack.pop(0)
+                    
+                    #adding the haveMask at the end of the metadata pack
+                    havemask = self.peerHaveManager.retrieveMyHaveMask(channel_id, infohash)
+                    binary_havemask = uintToBinaryString(havemask)
+                    metadataPack.append(binary_havemask)
+                    
+                    
+                    entryContent['rich_metadata'] = metadataPack
+                    
+                    if DEBUG:
+                        size = self._computeSize(metadataPack)
+                        # if available records also the destination of the message
+                        dest = "NA" if destPermid is None else show_permid_short(destPermid)
+                    
+                        id = "SQ" if fromQuery else "S"
+                        # format (S (for sent) | SQ (for sent as response to a query), channel, infohash, destination, timestampe, size)
+                        print >> sys.stderr, "%c, %s, %s, %s, %d, %d" % \
+                            (id, bin2str(metadataDTO.channel), \
+                            bin2str(metadataDTO.infohash), \
+                             dest, metadataDTO.timestamp, size)
+                except Exception,e:
+                    print >> sys.stderr, "Warning: Error serializing metadata: %s", str(e)
+                    return channelCastMessage
+            else:
+                # better to put the field to None, or to avoid adding the
+                # metadata field at all?
+                ##entryContent['rich_metadata'] = None
+                pass
+            
+            
+        
+            
+        return newMessage
+    
+def validMetadataEntry(entry):
+    if entry is None or len(entry) != 6:
+        if DEBUG:
+            print >> sys.stderr, "An invalid metadata entry was found in channelcast message"
+        return False
+    
+    if not isinstance(entry[1], int) or entry[1] <= 0:
+        if DEBUG:
+            print >> sys.stderr, "Invalid rich metadata: invalid timestamp"
+        return False
+   
+    if not isinstance(entry[2], basestring) or not len(entry[2]) == 4: #32 bit subtitles mask
+        if DEBUG:
+            print >> sys.stderr, "Invalid rich metadata: subtitles mask"
+        return False
+    
+    if not isinstance(entry[3], list):
+        if DEBUG:
+            print >> sys.stderr, "Invalid rich metadata: subtitles' checsums"
+        return False
+    else:
+        for checksum in entry[3]:
+            if not isinstance(entry[2], basestring) or not len(checksum) == 20:
+                if DEBUG:
+                    print >> sys.stderr, "Invalid rich metadata: subtitles' checsums"
+                return False
+
+    
+    if not isinstance(entry[2], basestring) or not len(entry[5]) == 4: #32 bit have mask
+        if DEBUG:
+            print >> sys.stderr, "Invalid rich metadata: have mask"
+        return False
+    
+    return True
+    
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/DiskManager.py b/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/DiskManager.py
new file mode 100644 (file)
index 0000000..9e1c326
--- /dev/null
@@ -0,0 +1,363 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import \
+    DiskManagerException
+from BaseLib.Core.osutils import getfreespace
+from random import random
+from traceback import print_exc
+import codecs
+import os
+import sys
+
+DISK_FULL_REJECT_WRITES = 0x0
+DISK_FULL_DELETE_SOME = 0x1
+
+DELETE_RANDOM = 0x02
+DELETE_OLDEST_FIRST = 0x0
+DELETE_NEWEST_FIRST = 0x4
+
+MINIMUM_FILE_SIZE = 4 #KBs. most common.
+
+
+DEFAULT_CONFIG = { "maxDiskUsage" :-1 , #infinity
+                   "diskPolicy" : DISK_FULL_REJECT_WRITES,
+                   "encoding" : "utf-8"}
+
+DEBUG = False
+
+class DiskManager(object):
+    """
+    Manages disk policies.
+    
+    Used for subtitle disk space handling it could be adapted
+    for any space management. The current implementation is
+    NOT THREAD-SAFE
+    
+    The disk manager is a central resource manager for disk space. 
+    A client object who wants to use the disk manager has to register to
+    it using the registerDir method. After that that client will be
+    associated to a directory where the disk manager will try to store
+    files for it.
+    
+    A DiskManager has a _minFreeSpace attribute that determines how much
+    space has to be always left free on the disk. It will perform no writes
+    if that write will make free space go under the _minFreeSpace threshold.
+    
+    When a client registers it must provided some configuration parameters.
+    This parameters comprehend maxDiskUsage that is the maximum disk quota
+    that can be used by that client, diskPolicy that is a bitmask specifying
+    the actions to do when the disk quota has been reached and a write
+    operation is asked, adn encoding that is the default encoding for every
+    file read or write.
+    
+    THIS CLASS IS NOT THREAD-SAFE!!!!
+    """
+    
+    def __init__(self, minFreeSpace=0, baseDir="."):
+        """
+        Create a new instance of DiskManager.
+        
+        @type minFreeSpace: int
+        @param minFreeSpace: the minimum amount of free space in KBs that
+            needs to be always available on disk after any
+            write operation
+        @type baseDir: str
+        @param baseDir: a path. It will be used by the manager to determine
+            which disk he has to use to calculate free space and
+            so.
+        """
+        assert os.path.isdir(baseDir)
+        self._minFreeSpace = minFreeSpace
+        self._registeredDirs = dict()
+        self._baseDir = baseDir
+        
+    def registerDir(self, directory, config=None):
+        """
+        Register a client object to use the services of the disk manager.
+        
+        When a client object wants to use a DiskManager instance it has to
+        provide a directory path, under which to store its files. This path
+        should corrispond to the same disk as the diskmanager _baseDir 
+        attribute. All subsequente write and read operations performed by the
+        disk manager will refer to files in the provided directory.
+        
+        @param directory: a directory path for which to register. This
+                          directory will be used as the base path for all
+                          subsequent file read and writes by the client that
+                          registered for it.
+        @param config: a dictionary containing configurations parameters
+            for the registrations. The keys of that dictionary
+            are:
+                - 'maxDiskUsage': maximum disk page that the client is
+                  allowed to use (in KBs) [-1 for infinity]
+                - 'diskPolicy': a 3 bit bitmask that is a combination of
+                  one of (DISK_FULL_REJECT_WRITES, 
+                  DISK_FULL_DELETE_SOME) and one of
+                  (DELETE_RANDOM, DELETE_OLDEST,
+                  ELETE_NEWEST)
+                - 'fileEncoding": encoding that will be used to read and
+                  write every file under the registered
+                  Dir
+                                    
+        """
+        assert directory is not None
+        assert os.path.isdir(directory), "%s is not a dir" % directory
+        
+        if config is None:
+            config = DEFAULT_CONFIG
+            
+        if "maxDiskUsage" not in config.keys() \
+            or "diskPolicy" not in config.keys() \
+            or "encoding" not in config.keys():
+            if DEBUG:
+                print >> sys.stderr, "Invalid config. Using default"
+            config = DEFAULT_CONFIG
+            
+        dedicatedDiskManager = BaseSingleDirDiskManager(directory, config, self)
+        self._registeredDirs[directory] = dedicatedDiskManager
+    
+    #free space in KBs
+    def getAvailableSpace(self):
+        space = max(0, self._get_free_space() - self._minFreeSpace)
+        return space
+        
+    
+    def _get_free_space(self):
+        """
+        Retrieves current free disk space.
+        """
+        try:
+            freespace = getfreespace(self._baseDir) / 1024.0
+            return freespace 
+        except:
+            print >> sys.stderr, "cannot get free space of", self._baseDir
+            print_exc()
+            return 0
+        
+    def writeContent(self, directory, filename, content):
+        """
+        Write a string into a file.
+        
+        @return: The path of the written file, if everythin is right
+        
+        @precondition: directory is registered
+        @precondition: content is a string
+        @postcondition: minimum free space, and maximum disk usage constraints
+                        are ok
+        """
+        if directory not in self._registeredDirs.keys():
+            msg = "Directory %s not registered" % directory
+            if DEBUG:
+                print >> sys.stderr, msg
+            raise DiskManagerException(msg)
+        
+        return self._registeredDirs[directory].writeContent(filename, content)
+    
+    def readContent(self, directory, filename):
+        """
+        Read the contents of a file.
+        
+        @return: a string containing the contents of the file
+        """
+        if directory not in self._registeredDirs.keys():
+            msg = "Directory %s not registered" % directory
+            if DEBUG:
+                print >> sys.stderr, msg
+                
+            raise DiskManagerException(msg)
+        
+        return self._registeredDirs[directory].readContent(filename)
+
+    def deleteContent(self, directory, filename):
+        if directory not in self._registeredDirs.keys():
+            msg = "Directory %s not registered" % directory
+            if DEBUG:
+                print >> sys.stderr, msg
+            raise DiskManagerException(msg)
+        
+        return self._registeredDirs[directory].deleteContent(filename)
+    
+    def tryReserveSpace(self, directory, amount):
+        """
+        Check if there a given amount of available space. (in KBs)
+        
+        If there is, it does nothing :)
+        (aslo if there isn't) 
+        """
+        if directory not in self._registeredDirs.keys():
+            msg = "Directory %s not registered" % directory
+            if DEBUG:
+                print >> sys.stderr, msg
+            raise DiskManagerException(msg)
+        
+        return self._registeredDirs[directory].tryReserveSpace(amount)
+    
+    def isFilenOnDisk(self, directory, filename):
+        if directory not in self._registeredDirs.keys():
+            msg = "Directory %s not registered" % directory
+            if DEBUG:
+                print >> sys.stderr, msg
+            raise DiskManagerException(msg)
+        
+        return self._registeredDirs[directory].isFileOnDisk(filename)
+        
+        
+    
+class BaseSingleDirDiskManager(object):
+    
+    def __init__(self, workingDir, config, dm):
+        self.workingDir = workingDir
+        self.fileEncoding = config["encoding"]
+        #select the last bit only
+        self.diskFullPolicy = config["diskPolicy"] & 0x1
+        #select the second and third bit from the right
+        self.deletePolicy = config["diskPolicy"] & 0x6
+        self.maxDiskUsage = config["maxDiskUsage"]
+        if self.maxDiskUsage < 0: #infinte
+            self.maxDiskUsage = (2 ** 80) #quite infinite
+        self.dm = dm
+        self.dirUsage = 0
+        self._updateDirectoryUsage()
+    
+    def writeContent(self, filename, content):
+        # assuming that a file system block is 4 KB
+        # and therefore every file has a size that is a multiple of 4 kbs
+        # if the assumption is violated nothing bad happens :)
+        approxSize = max(MINIMUM_FILE_SIZE, (len(content) / 1024.0))
+        sizeInKb = approxSize + (approxSize % MINIMUM_FILE_SIZE)
+        if self.tryReserveSpace(sizeInKb):
+            return self._doWrite(filename, content)
+        else:
+            if self.diskFullPolicy == DISK_FULL_REJECT_WRITES:
+                raise DiskManagerException("Not enough space to write content. Rejecting")
+            elif self.diskFullPolicy == DISK_FULL_DELETE_SOME:
+                if self.makeFreeSpace(sizeInKb):
+                    return self._doWrite(filename, content)
+                else:
+                    raise DiskManagerException("Unable to get enough space to write content.")
+            
+            
+    def readContent(self, filename):
+        path = os.path.join(self.workingDir, filename)
+        if not os.path.isfile(path):
+            raise IOError("Unable to read from %s" % path)
+        with codecs.open(path, "rb", self.fileEncoding,"replace") as xfile:
+            content = xfile.read()
+        
+        return content
+    
+    def deleteContent(self, filename):
+        if DEBUG:
+            print >> sys.stderr, "Deleting " + filename
+        path = os.path.join(self.workingDir, filename)
+        if not os.path.isfile(path):
+            if DEBUG:
+                print >> sys.stderr, "Noting to delete at %s" % path
+            return False
+        try:
+            os.remove(path)
+            self._updateDirectoryUsage()
+            return True
+        except OSError,e:
+            print >> sys.stderr, "Warning: Error removing %s: %s" % (path, e)
+            return False
+    
+    def makeFreeSpace(self, amount):
+        if DEBUG:
+            print >> sys.stderr, "Trying to retrieve %d KB of free space for %s" % (amount, self.workingDir)
+        if amount >= self.maxDiskUsage:
+            return False
+        if amount >= (self.dm.getAvailableSpace() + self._currentDiskUsage()):
+            return False
+        
+        maxTries = 100
+        tries = 0
+        while self._actualAvailableSpace() <= amount:
+            if tries >= maxTries:
+                print >> sys.stderr, "Unable to make up necessary free space for %s" % \
+                         self.workingDir
+                return False
+            toDelete = self._selectOneToDelete()
+            if toDelete is None:
+                return False
+            self.deleteContent(toDelete)
+            tries = +1
+            
+            
+        return True
+    
+    def isFileOnDisk(self, filename):
+        path = os.path.join(self.workingDir, filename)
+        if os.path.isfile(path):
+            return path
+        else:
+            return None
+    
+    def _doWrite(self, filename, content):
+        
+        path = os.path.join(self.workingDir, filename)
+        if os.path.exists(path):
+            if DEBUG:
+                print >> sys.stderr, "File %s exists. Overwriting it."
+            os.remove(path)
+        try:
+            if not isinstance(content,unicode):
+                content = content.decode(self.fileEncoding,'replace')
+            with codecs.open(path, "wb", self.fileEncoding,'replace') as toWrite:
+                toWrite.write(content)
+        except Exception,e:
+            #cleaning up stuff
+            if os.path.exists(path):
+                os.remove(path)
+            raise e
+        
+        self._updateDirectoryUsage()
+        return path
+    
+    def _selectOneToDelete(self):
+        pathlist = map(lambda x : os.path.join(self.workingDir, x),
+                            os.listdir(self.workingDir))
+        candidateList = [xfile for xfile in pathlist
+                         if os.path.isfile(os.path.join(self.workingDir, xfile))]
+        
+        if not len(candidateList) > 0:
+            return None
+            
+        if self.deletePolicy == DELETE_RANDOM:
+            return random.choice(candidateList)
+        else:
+            sortedByLastChange = sorted(candidateList, key=os.path.getmtime)
+            if self.deletePolicy == DELETE_NEWEST_FIRST:
+                return sortedByLastChange[-1]
+            elif self.deletePolicy == DELETE_OLDEST_FIRST:
+                return sortedByLastChange[0]
+
+
+    def tryReserveSpace(self, amount):
+        if amount >= self._actualAvailableSpace():
+            return False
+        else:
+            return True
+    
+    def _currentDiskUsage(self):
+        return self.dirUsage
+    
+    def _updateDirectoryUsage(self):
+        listOfFiles = os.listdir(self.workingDir)
+        listofPaths = \
+            map(lambda name: os.path.join(self.workingDir, name), listOfFiles)
+        
+        #does not count subdirectories
+        dirSize = sum([os.path.getsize(fpath) for fpath in listofPaths])
+        
+        self.dirUsage = dirSize / 1024.0 #Kilobytes
+    
+    def _actualAvailableSpace(self):
+        space = min(self.dm.getAvailableSpace(),
+                   self.maxDiskUsage - self._currentDiskUsage())
+        return space
+    
+        
+            
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/SimpleTokenBucket.py b/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/SimpleTokenBucket.py
new file mode 100644 (file)
index 0000000..aad7090
--- /dev/null
@@ -0,0 +1,63 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+
+from time import time
+
+class SimpleTokenBucket(object):
+    """
+    A simple implementation of a token bucket, to
+    control the rate of subtitles being uploaded.
+    
+    1 token corresponds to 1 KB
+    
+    Not threadsafe!!
+    """
+    
+    def __init__(self, fill_rate, capacity = -1):
+        """
+        Creates a token bucket initialy having 0 tokens,
+        with the given fill_rate.
+        
+        @param fill_rate: number of tokens refilled per second.
+                          a token corrisponds to 1KB
+        @param capacity: maximum number of tokens in the bucket.
+        """
+        
+        #infinite bucket! (well, really big at least)
+        if capacity == -1:
+            capacity = 2**30 # 1 TeraByte capacity
+        self.capacity = float(capacity)
+        
+        self._tokens = float(0)
+        
+        self.fill_rate = float(fill_rate)
+        self.timestamp = time()
+
+    def consume(self, tokens):
+        """Consume tokens from the bucket. Returns True if there were
+        sufficient tokens otherwise False."""
+        if tokens <= self.tokens:
+            self._tokens -= tokens
+        else:
+            return False
+        return True
+    
+    def _consume_all(self):
+        """
+        Consumes every token in the bucket
+        """
+        self._tokens = float(0)
+
+    @property
+    def tokens(self):
+        if self._tokens < self.capacity:
+            now = time()
+            delta = self.fill_rate * (now - self.timestamp)
+            self._tokens = min(self.capacity, self._tokens + delta)
+            self.timestamp = now
+        return self._tokens
+    
+    @property
+    def upload_rate(self):
+        return self.fill_rate
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/SubsMessageHandler.py b/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/SubsMessageHandler.py
new file mode 100644 (file)
index 0000000..4013e06
--- /dev/null
@@ -0,0 +1,880 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+
+
+from BaseLib.Core.BitTornado.BT1.MessageID import SUBS, GET_SUBS
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import \
+    LanguagesProvider
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import \
+    SubtitleMsgHandlerException
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FOURTEENTH
+from BaseLib.Core.Utilities import utilities
+from BaseLib.Core.Utilities.utilities import show_permid_short, validInfohash, \
+    validPermid, bin2str, uintToBinaryString, binaryStringToUint
+from time import time
+from traceback import print_exc
+import sys
+import threading
+    
+SUBS_LOG_PREFIX = "subtitles: "
+
+REQUEST_VALIDITY_TIME = 10 * 60 #10 minutes
+CLEANUP_PERIOD = 5 * 60#5 minutes
+
+DEBUG = False
+    
+class SubsMessageHandler(object):
+    
+    def __init__(self, overlayBridge, tokenBucket, maxSubsSize):
+        self._languagesUtility = LanguagesProvider.getLanguagesInstance()
+        self._overlay_bridge = overlayBridge
+        
+        # handleMessage() is called by the OLThread
+        # registerListener is called by the OLThread
+        # no synchronization should be needed for this list :)
+        self._listenersList = list()
+        
+        self._tokenBucket = tokenBucket
+        
+        #controls the interval the uploadQueue gets checked
+        self._nextUploadTime = 0
+        
+        
+        #dictionary of type { "".join(channel_id,infohash) : _RequestedSubtitlesEntry}
+        #bits get cleaned when subtitles are recevied
+        #when the bitmask is 000 the netry is removed from the dictionary
+        #also entries older then REQUEST_VALIDITY_TIME get dropped
+        
+
+        
+        self.requestedSubtitles = {}
+        self._requestsLock = threading.RLock()
+        
+        self._nextCleanUpTime = int(time()) + CLEANUP_PERIOD
+
+        #subtitles to send get queueued in this queue
+        #each subtitle message to send is a dictionary whose keys are:
+        #permid: destionation of the message
+        #channel_id: identifier of the channel from wich the subtitles to
+        #            upload are
+        #infohash: identifier of the torrent for which the subtitles to 
+        #          upload are
+        #subtitles: a dictionary of the form {langCode : path} for the
+        #           subtitles to send
+        #selversion: 
+    
+        self._uploadQueue = []
+        
+        self._requestValidityTime = REQUEST_VALIDITY_TIME
+        
+        self._maxSubSize = maxSubsSize
+        
+        
+    def setTokenBucket(self, tokenBucket):
+        assert tokenBucket is not None
+        self._tokenBucket = tokenBucket
+    def getTokenBucket(self):
+        return self._tokenBucket
+    
+    tokenBucket = property(getTokenBucket,setTokenBucket)
+            
+    
+    def _getRequestedSubtitlesKey(self, channel_id, infohash):
+        #requested subtitle is a dictionary whose keys are the
+        #concatenation of (channel_id,infohash)
+
+        return "".join((channel_id, infohash))
+    
+    
+    def sendSubtitleRequest(self, dest_permid, requestDetails, 
+                            msgSentCallback = None, usrCallback = None, selversion=-1):
+        """
+        Create and send a subtitle request to dest_permid.
+        
+        Creates, encodes and sends (through the OLBridge) an GET_SUBS request
+        to the given dest_permid. Notice that even when this method return
+        succesfully the message by have been still not sent. 
+        
+        @param dest_permid: the permid of the peer where the message should be 
+                            sent. Binary.
+        @param requestDetails: a dictionary containing the details of the request
+                               to be sent:
+                               a 'channel_id' entry which is the binary channel
+                               identifier (permid) of the desired subtitles
+                               a 'infohash' entry which is the binary infohash
+                               of the torrent the requested subtitles refer to
+                               a 'languages' entry which is a list of 3-characters
+                               codes identifying the need subtitles
+        @type msgSentCallback: function
+        @param msgSentCallback: a function that will be called when the message has been
+                          sent. It must have 5 parameters: exc (bounded to a possible
+                          exception), dest_permid, channel_id, infohash, bitmask)
+        @type usrCallback: function
+        @param usrCallback: a function that will be called whenever some of the requested
+                      subtitles are retrieved. Only one parameter: ie a list that will
+                      be bound to the received language codes
+                          
+        @raise SubtitleMsgHandlerException: if something fails before attempting
+                                            to send the message. 
+        """
+
+        
+        channel_id = requestDetails['channel_id']
+        infohash = requestDetails['infohash']
+        languages = requestDetails['languages']
+        
+        bitmask = self._languagesUtility.langCodesToMask(languages)
+        if bitmask != 0:
+            try:
+                # Optimization: don't connect if we're connected, although it won't 
+                # do any harm.
+                if selversion == -1: # not currently connected
+                    self._overlay_bridge.connect(dest_permid,
+                                                lambda e, d, p, s:
+                                                self._get_subs_connect_callback(e, d, p, s, channel_id,
+                                                                                infohash, bitmask,
+                                                                                msgSentCallback, usrCallback))
+                else:
+                    self._get_subs_connect_callback(None, None, dest_permid,
+                                                   selversion, channel_id, infohash,
+                                                   bitmask, msgSentCallback, usrCallback)
+                
+            except Exception,e:
+                if DEBUG:
+                    print >> sys.stderr, SUBS_LOG_PREFIX + "Unable to send: %s" % str(e)
+                raise SubtitleMsgHandlerException(e)
+        else:
+            raise SubtitleMsgHandlerException("Empty request, nothing to send")
+        
+        
+    def sendSubtitleResponse(self, destination, response_params, selversion = -1):
+        """
+        Send a subtitle response message to destination permid.
+        
+        @param destination: the permid of the destionation of the message
+        @param response_params: a tuple containing channel_id,infohash, and a 
+                                dictionary of contents, in that order
+        @type selversion: int
+        @param selversion: the protocol version of the destination (default -1)
+        """
+        
+        channel_id, infohash, contentsList = response_params
+        
+                
+        task = {
+                'permid' : destination,
+                'channel_id' : channel_id,
+                'infohash' : infohash,
+                'subtitles' : contentsList,
+                'selversion' : selversion
+                }
+        
+
+        self._uploadQueue.append(task)
+        
+        if int(time()) >= self._nextUploadTime:
+            self._checkingUploadQueue()
+            
+        return True
+        
+    
+    def handleMessage(self, permid, selversion, message):
+        """
+        Must return True or False (for what I understood a return value of
+        false closes the connection with permid, but I'm still not sure)
+        """
+        t = message[0]
+        
+        if t == GET_SUBS:   # the other peer requests a torrent
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Got GET_SUBS len: %s from %s" % \
+                      (len(message), show_permid_short(permid))
+            return self._handleGETSUBS(permid, message, selversion)
+        elif t == SUBS:     # the other peer sends me a torrent
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Got SUBS len: %s from %s" %\
+                     (len(message), show_permid_short(permid))
+
+            return self._handleSUBS(permid, message, selversion)
+        else:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Unknown Overlay Message %d" % ord(t)
+            return False
+    
+    
+    def _handleGETSUBS(self,permid, message, selversion):
+        
+        if selversion < OLPROTO_VER_FOURTEENTH:
+            if DEBUG:
+                print >> sys.stderr, "The peer that sent the GET_SUBS request has an old" \
+                     "protcol version: this is strange. Dropping the msg"
+            return False
+        decoded = self._decodeGETSUBSMessage(message)
+        
+        if decoded is None:
+            if DEBUG:
+                print >> sys.stderr, "Error decoding a GET_SUBS message from %s" %\
+                      utilities.show_permid_short(permid)
+            return False
+    
+        if DEBUG:
+            channel_id, infohash, languages = decoded
+            bitmask = self._languagesUtility.langCodesToMask(languages)
+            print >> sys.stderr, "%s, %s, %s, %s, %d, %d" % ("RG", show_permid_short(permid), 
+                                                     show_permid_short(channel_id),
+                                                     bin2str(infohash), bitmask, len(message))
+        
+        # no synch on _listenersList since both this method
+        # and the registerListener method are called by
+        # the OLThread
+        for listener in self._listenersList:
+            listener.receivedSubsRequest(permid, decoded, selversion)
+        
+        return True
+    
+    
+    
+    def _handleSUBS(self, permid, message, selversion):
+        if selversion < OLPROTO_VER_FOURTEENTH:
+            if DEBUG:
+                print >> sys.stderr, "The peer that sent the SUBS request has an old" \
+                     "protcol version: this is strange. Dropping the msg"
+            return False
+        
+        decoded = self._decodeSUBSMessage(message)
+        
+        if decoded is None:
+            if DEBUG:
+                print >> sys.stderr, "Error decoding a SUBS message from %s" %\
+                      utilities.show_permid_short(permid)
+            return False
+        
+        
+        channel_id, infohash, bitmask,contents = decoded
+        #if no subtitle was requested drop the whole message
+        
+        if DEBUG:
+            print >> sys.stderr, "%s, %s, %s, %s, %d, %d" % ("RS", show_permid_short(permid), 
+                                                     show_permid_short(channel_id),
+                                                     bin2str(infohash), bitmask, len(message))
+        
+        
+
+        requestedSubs = self._checkRequestedSubtitles(channel_id,infohash,bitmask) 
+        if requestedSubs == 0:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Received a SUBS message that was not"\
+                      " requested. Dropping"
+            return False
+        
+        requestedSubsCodes = self._languagesUtility.maskToLangCodes(requestedSubs)
+        #drop from the contents subtitles that where not requested
+        
+        
+        for lang in contents.keys():
+            if lang not in requestedSubsCodes:
+                del contents[lang]
+        
+        #remove the received subtitles from the requested 
+        callbacks = \
+            self._removeFromRequestedSubtitles(channel_id, infohash, bitmask)
+
+        
+        
+        #the receiver does not need the bitmask
+        tuple = channel_id, infohash, contents
+        
+        # no synch on _listenersList since both this method
+        # and the registerListener method are called by
+        # the OLThread
+        for listener in self._listenersList:
+            listener.receivedSubsResponse(permid, tuple, callbacks, selversion)
+        
+    
+        return True
+    
+    def registerListener(self, listenerObject):
+        '''
+        Register an object to be notifed about the reception of subtitles
+        related messages.
+
+        Currently the messages that are notified are:
+            - GET_SUBS
+            - SUBS
+
+        The appropriete method on listenerObject will be called by the
+        OverlayThread upon reception of a message
+
+        @param listenerObject: an object having two methods with the following
+            signature:
+                1. receivedSubsRequest(permid, decoded, selversion)
+                2. receivedSubsResponse(permid, decoded, callbacks, selversion)
+            Following is the explanation of the paramets:
+                - permid: is the PermId of the peer of send the request
+                  (response)
+                - decoded is a tuple containing the decoded attributes of the
+                  GET_SUBS message
+                - selversion is the protocol version of the peer who sent the
+                  request (response)
+                - callbacks is a list of pairs. Each pair is like::
+                    (mask, function)
+                  mask is a bitmask, and function is the function that should
+                  be called upon receival of subtitles for that mask.
+
+        '''
+        #Only called by OLThread
+        self._listenersList.append(listenerObject)
+        
+
+    
+    
+    def _get_subs_connect_callback(self, exception, dns, permid, selversion,
+                              channel_id, infohash, bitmask, msgSentCallback, usrCallback):
+        """
+        Called by the Overlay Thread when a connection with permid is established.
+        
+        Performs the actual action of sending a GET_SUBS request to the peer
+        identified by permid. It is called by the OLThread when a connection
+        with that peer is established.
+    
+        """
+        
+        if exception is not None:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + \
+                      "GET_SUBS not sent. Unable to connect to " + \
+                      utilities.show_permid_short(permid)
+        else:
+            
+                    
+            if (selversion > 0 and selversion < OLPROTO_VER_FOURTEENTH):
+                msg = "GET_SUBS not send, the other peers had an old protocol version: %d" %\
+                    selversion
+                if DEBUG:
+                    print >> sys.stderr, msg
+                raise SubtitleMsgHandlerException(msg)
+            
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "sending GET_SUBS to " + \
+                      utilities.show_permid_short(permid)
+            try :
+                message = self._createGETSUBSMessage(channel_id, infohash,
+                                                    bitmask)
+                
+                
+                if DEBUG:
+                    # Format:
+                    # SS|SG, destination, channel, infohash, bitmask, size
+                    print >> sys.stderr, "%s, %s, %s, %s, %d, %d" % ("SG",show_permid_short(permid), 
+                                                             show_permid_short(channel_id),
+                                                             bin2str(infohash),bitmask,len(message))
+                
+                self._overlay_bridge.send(permid, message,
+                                          lambda exc, permid: \
+                                            self._sent_callback(exc,permid,
+                                                            channel_id,
+                                                            infohash,
+                                                            bitmask,
+                                                            msgSentCallback,
+                                                            usrCallback))
+        
+            except Exception,e:
+                print_exc()
+                msg = "GET_SUBS not sent: %s" % str(e)
+                raise SubtitleMsgHandlerException(e)
+            
+    def _sent_callback(self,exc,permid,channel_id,infohash,bitmask, msgSentCallback, usrCallback):
+        """
+        Called by the OverlayThread after a GET_SUBS request has been sent.
+        """
+        if exc is not None:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Unable to send GET_SUBS to: " + \
+                      utilities.show_permid_short(permid) + ": " + exc
+        else:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "GET_SUBS sent to %s" % \
+                       (utilities.show_permid_short(permid))
+            self._addToRequestedSubtitles(channel_id, infohash, bitmask, usrCallback)
+            if msgSentCallback is not None:
+                msgSentCallback(exc,permid,channel_id,infohash,bitmask)
+        
+                
+    def _createGETSUBSMessage(self, channel_id, infohash, bitmask):
+        """
+        Bencodes a GET_SUBS message and adds the appropriate header.
+        """
+        
+        binaryBitmask = uintToBinaryString(bitmask)
+        body = bencode((channel_id, infohash, binaryBitmask))
+        head = GET_SUBS
+        return head + body
+    
+    
+            
+    def _decodeGETSUBSMessage(self, message):
+        """
+        From a bencoded GET_SUBS messages, returns its decoded contents.
+        
+        Decodes and checks for validity a bencoded GET_SUBS messages.
+        If the message is succesfully decoded returns the tuple
+        (channel_id,infohash,languages).
+        
+        channel_id is the binary identifier of the chanel that published
+        the requested subtitles.
+        infohash is the binary identifier of the torrent wich the subtitle
+        refers to
+        languages is a list of 3 characters language codes, for the languages
+        of the requested subtitles
+        
+        @return: (channel_id,infohash,languages) or None if something is wrong
+        """
+        assert message[0] == GET_SUBS, SUBS_LOG_PREFIX + \
+            "Invalid GET_SUBS Message header: %s" % message[0]
+        
+        try:
+            values = bdecode(message[1:])
+        except:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Error bdecoding message"
+            return None
+        
+        if len(values) != 3:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid number of fields in GET_SUBS"
+            return None
+        channel_id, infohash, bitmask = values[0], values[1], values[2]
+        if not validPermid(channel_id):
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid channel_id in GET_SUBS"
+            return None
+        elif not validInfohash(infohash):
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid infohash in GET_SUBS"
+            return None
+        elif not isinstance(bitmask, str) or not len(bitmask)==4:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid bitmask in GET_SUBS"
+            return None
+        
+        try:
+            bitmask = binaryStringToUint(bitmask)
+            languages = self._languagesUtility.maskToLangCodes(bitmask)
+        except:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid bitmask in GET_SUBS"
+            return None
+        
+        return channel_id, infohash, languages
+    
+    
+    def _decodeSUBSMessage(self, message):
+        """
+        From a bencoded SUBS message, returns its decoded contents.
+        
+        Decodes and checks for validity a bencoded SUBS message.
+        If the message is succesfully decoded returns the tuple
+        (channel_id, infohash, bitmask, contentsDictionary )
+        
+        channel_id is the binary identifier of the chanel that published
+        the requested subtitles.
+        infohash is the binary identifier of the torrent wich the subtitle
+        refers to
+        contentsDictionary is a dictionary having each entry like 
+        {langCode : subtitleContents}.
+        
+        @return: the above described tuple, or None if something is wrong
+        """
+        assert message[0] == SUBS, SUBS_LOG_PREFIX + \
+            "Invalid SUBS Message header: %s" % message[0]
+            
+        try:
+            values = bdecode(message[1:])
+        except:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Error bdecoding SUBS message"
+            return None
+        
+        if len(values) != 4:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid number of fields in SUBS"
+            return None
+        channel_id, infohash, bitmask, contents = values[0], values[1], \
+            values[2], values[3]
+        
+        if not validPermid(channel_id):
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid channel_id in SUBS"
+            return None
+        elif not validInfohash(infohash):
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid infohash in SUBS"
+            return None
+        elif not isinstance(bitmask, str) or not len(bitmask) == 4:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid bitmask in SUBS"
+            return None
+        
+        try:
+            bitmask = binaryStringToUint(bitmask)
+            languages = self._languagesUtility.maskToLangCodes(bitmask)
+        except:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid bitmask in SUBS"
+            return None
+        
+        if not isinstance(contents, list):
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Invalid contents in SUBS"
+            return None
+        if len(languages) != len(contents):
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Bitmask and contents do not match in"\
+                      " SUBS"
+            return None
+        
+        numOfContents = len(languages)
+        if numOfContents == 0:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Empty message. Discarding."
+            return None
+        
+        
+        contentsDictionary = dict()
+        for i in range(numOfContents):
+            lang = languages[i]
+            subtitle = contents[i]
+            if not isinstance(subtitle,unicode):
+                try:
+                    subtitle = unicode(subtitle)
+                except:
+                    return None
+            if len(subtitle) <= self._maxSubSize:
+                contentsDictionary[lang] = subtitle
+            else:
+                #drop that subtitle
+                continue
+            
+        bitmask = self._languagesUtility.langCodesToMask(contentsDictionary.keys())
+            
+        
+        return channel_id, infohash, bitmask, contentsDictionary
+    
+
+    def _checkingUploadQueue(self):
+        """
+        Uses a token bucket to control the subtitles upload rate.
+        
+        Every time this method is called, it will check if there are enough
+        tokens in the bucket to send out a SUBS message.
+        Currently fragmentation is not implemented: all the reuquested subtitles
+        are sent in a single SUBS messages if there are enough tokens:
+        too big responses are simply discarded.
+        
+        The method tries to consume all the available tokens of the token
+        bucket until there are no more messages to send. If there are no
+        sufficiente tokens to send a message, another call to this method
+        is scheduled in a point in time sufficiently distant.
+        """
+        
+        if DEBUG:
+            print >> sys.stderr, SUBS_LOG_PREFIX + "Checking the upload queue..."
+        
+        if not self._tokenBucket.upload_rate > 0:
+            return 
+        
+        if not len(self._uploadQueue) > 0:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Upload queue is empty."
+            
+        while len(self._uploadQueue) > 0 :
+            responseData = self._uploadQueue[0]
+            encodedMsg = self._createSingleResponseMessage(responseData)
+            
+            if encodedMsg is None:
+                if DEBUG:
+                    print >> sys.stderr, SUBS_LOG_PREFIX + "Nothing to send"
+                del self._uploadQueue[0]
+                continue #check other messages in the queue
+            
+            msgSize = len(encodedMsg) / 1024.0 #in kilobytes
+            
+            if msgSize > self._tokenBucket.capacity: 
+                #message is too big, discarding
+                print >> sys.stderr, "Warning:" + SUBS_LOG_PREFIX + "SUBS message too big. Discarded!"
+                del self._uploadQueue[0]
+                continue #check other messages in the queue
+            
+            #check if there are sufficiente tokens
+            if self._tokenBucket.consume(msgSize):
+                
+                if DEBUG:
+                    # Format:
+                    # S|G, destination, channel, infohash, bitmask, size
+                    keys = responseData['subtitles'].keys()
+                    bitmask = self._languagesUtility.langCodesToMask(keys)
+                    print >> sys.stderr, "%s, %s, %s, %s, %d, %d" % ("SS",show_permid_short(responseData['permid']),
+                                                             show_permid_short(responseData['channel_id']),
+                                                             bin2str(responseData['infohash']),bitmask,int(msgSize*1024))
+                    
+                self._doSendSubtitles(responseData['permid'], encodedMsg, responseData['selversion'])
+                del self._uploadQueue[0]
+            else: 
+                #tokens are insufficient wait the necessary time and check again
+                neededCapacity = max(0, msgSize - self._tokenBucket.tokens)
+                delay = (neededCapacity / self._tokenBucket.upload_rate)
+                self._nextUploadTime = time() + delay
+                self.overlay_bridge.add_task(self._checkingUploadQueue, delay)
+                return
+        
+        #The cycle breaks only if the queue is empty
+    
+    
+    def _createSingleResponseMessage(self, responseData):
+        """
+        Create a bencoded SUBS message to send in response to a GET_SUBS
+        
+        The format of the sent message is a not encoded SUBS character and then
+        the bencoded form of
+        (channel_id,infohash,bitmask,[listOfSubtitleContents])
+        the list of subtitle contents is ordered as the bitmask
+        
+        """
+        
+        orderedKeys = sorted(responseData['subtitles'].keys())
+
+        payload = list()
+        #read subtitle contents
+        for lang in orderedKeys:
+            
+            fileContent = responseData['subtitles'][lang]
+                
+            if fileContent is not None and len(fileContent) <= self._maxSubSize:
+                payload.append(fileContent)
+            else:
+                print >> sys.stderr, "Warning: Subtitle in % for ch: %s, infohash:%s dropped. Bigger then %d" % \
+                            (lang, responseData['channel_id'], responseData['infohash'], 
+                             self._maxSubSize)
+
+                
+        
+        if not len(payload) > 0:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "No payload to send in SUBS"
+            return None
+        
+        bitmask = \
+            self._languagesUtility.langCodesToMask(orderedKeys)
+        
+        binaryBitmask = uintToBinaryString(bitmask, length=4)
+        header = (responseData['channel_id'], responseData['infohash'], binaryBitmask)
+        
+        message = bencode((
+                           header[0],
+                           header[1],
+                           header[2],
+                           payload
+                           ))
+        
+        return SUBS + message
+    
+    
+    def _doSendSubtitles(self, permid, msg, selversion):
+        """
+        Do sends the SUBS message through the overlay bridge.
+        """
+        if DEBUG:
+            print >> sys.stderr, SUBS_LOG_PREFIX + "Sending SUBS message to %s..." % \
+                  show_permid_short(permid)
+    
+        # Optimization: we know we're currently connected
+        #DOUBLE CHECK THIS. I just assuemed it was true 
+        # since it is true for MetadataHandler
+        self._overlay_bridge.send(permid, msg, self._subs_send_callback)
+    
+    def _subs_send_callback(self, exc, permid):
+        '''
+        Called by the OLThread when a SUBS message is succesfully sent
+        '''
+        if exc is not None:
+            print >> sys.stderr, "Warning: Sending of SUBS message to %s failed: %s" % \
+                (show_permid_short(permid), str(exc))
+        else:
+            if DEBUG:
+                print >> sys.stderr, "SUBS message succesfully sent to %s" % show_permid_short(permid)
+        
+        
+    def _addToRequestedSubtitles(self, channel_id, infohash, bitmask, callback=None):
+        """
+        Add (channel_id, infohash, bitmask) to the history of requested subs.
+        
+        Call this method after a request for subtitles for a torrent
+        identified by infohash in channel channel_id, has been sent for the 
+        languages identified by the bitmask.
+        """
+        
+        assert 0 <= bitmask < 2**32, "bitmask must be a 32  bit integer"
+
+        if(int(time()) >= self._nextCleanUpTime):
+            self._cleanUpRequestedSubtitles() #cleanup old unanswered requests
+        
+        key = self._getRequestedSubtitlesKey(channel_id, infohash)
+        if key in self.requestedSubtitles.keys():
+            rsEntry = self.requestedSubtitles[key]
+            rsEntry.newRequest(bitmask)
+        else :
+            rsEntry = _RequestedSubtitlesEntry()
+            rsEntry.newRequest(bitmask, callback)
+            self.requestedSubtitles[key] = rsEntry
+
+
+    
+    def _cleanUpRequestedSubtitles(self):
+        """
+        Cleans up unanswered requests.
+        
+        A request is considered unanswered when it was last updated more then
+        REQUESTE_VALIDITY_TIME seconds ago.
+        If a response arrives after a request gets deleted, it will be dropped.
+        """
+        keys = self.requestedSubtitles.keys()
+        now = int(time())
+        for key in keys:
+            rsEntry = self.requestedSubtitles[key]
+            somethingDeleted = rsEntry.cleanUpRequests(self._requestValidityTime)
+            if somethingDeleted:
+                if DEBUG:
+                    print >> sys.stderr, "Deleting subtitle request for key %s: expired.", key
+            
+            #no more requests for the (channel,infohash, pair)
+            if rsEntry.cumulativeBitmask == 0:
+                del self.requestedSubtitles[key]
+                
+        self._nextCleanUpTime = now + CLEANUP_PERIOD
+
+        
+        
+    
+    
+    def _removeFromRequestedSubtitles(self, channel_id, infohash, bitmask):
+        """
+        Remove (channel_id,infohash,bitmask) from the history of requested subs.
+        
+        Call this method after a request for subtitles for a torrent
+        identified by infohash in channel channel_id, has been recevied for the 
+        languages identified by the bitmask.
+        """
+
+        key = self._getRequestedSubtitlesKey(channel_id, infohash)
+        if key not in self.requestedSubtitles.keys():
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "asked to remove a subtitle that" + \
+                        "was never requested from the requestedList"
+            return None
+        else:
+            rsEntry = self.requestedSubtitles[key]
+            callbacks = rsEntry.removeFromRequested(bitmask)
+            
+            if rsEntry.cumulativeBitmask == 0:
+                del self.requestedSubtitles[key]
+            return callbacks
+            
+    def _checkRequestedSubtitles(self, channel_id, infohash, bitmask):
+        """
+        Given a bitmask returns a list of language from the ones in the bitmask
+        that have been actually requested
+        """
+
+        key = self._getRequestedSubtitlesKey(channel_id, infohash)
+        if key not in self.requestedSubtitles.keys():
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "asked to remove a subtitle that" + \
+                        "was never requested from the requested List"
+            return 0
+        else:
+            rsEntry = self.requestedSubtitles[key]
+            reqBitmask = rsEntry.cumulativeBitmask & bitmask
+            return reqBitmask
+
+            
+            
+class _RequestedSubtitlesEntry():
+    '''
+    Convenience class to represent entries in the requestedSubtitles map
+    from the SubtitleHandler.
+    For each (channel, infohash tuple it keeps a cumulative bitmask
+    of all the requested subtitles, and a list of the single different
+    requests. Each single request bears a timestamp that is used
+    to cleanup outdated requests
+    '''
+    
+    def __init__(self):
+        self.requestsList = list()
+        self.cumulativeBitmask = 0
+        
+    def newRequest(self, req_bitmask, callback = None):
+        assert 0 <= req_bitmask < 2**32
+        
+        self.requestsList.append([req_bitmask,callback,int(time())])
+        self.cumulativeBitmask = int(self.cumulativeBitmask | req_bitmask)
+
+            
+    
+    def removeFromRequested(self, rem_bitmask):
+
+        callbacks = list()
+        self.cumulativeBitmask = self.cumulativeBitmask & (~rem_bitmask)
+        
+        length = len(self.requestsList)
+        i=0
+        while i < length:
+            entry = self.requestsList[i]
+            receivedLangs = entry[0] & rem_bitmask
+            #if something was received for the request
+            if receivedLangs != 0:
+                callbacks.append((entry[1],receivedLangs))
+                updatedBitmask = entry[0] & (~receivedLangs)
+                # no more subtitles to receive for 
+                # thath request
+                if updatedBitmask == 0:
+                    del self.requestsList[i]
+                    i -=1
+                    length -=1
+                else:
+                    entry[0] = updatedBitmask
+            i += 1
+             
+        return callbacks
+    
+
+    
+    
+    
+    def cleanUpRequests(self, validityDelta):
+
+        somethingDeleted = False
+        now = int(time())
+        
+        length = len(self.requestsList)
+        i=0
+        while i < length:
+            entry = self.requestsList[i]
+            requestTime = entry[2]
+            #if the request is outdated
+            if requestTime + validityDelta < now :
+                #remove the entry
+                self.cumulativeBitmask = self.cumulativeBitmask & \
+                    (~entry[0])
+                del self.requestsList[i]
+                i -= 1
+                length -= 1
+                somethingDeleted = True
+            
+            i += 1
+        
+        return somethingDeleted
+    
+
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/__init__.py b/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitleHandler/__init__.py
new file mode 100644 (file)
index 0000000..284cc10
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitlesHandler.py b/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitlesHandler.py
new file mode 100644 (file)
index 0000000..0baf419
--- /dev/null
@@ -0,0 +1,611 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import \
+    LanguagesProvider
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import \
+    MetadataDBException, RichMetadataException, DiskManagerException
+from BaseLib.Core.CacheDB.Notifier import Notifier
+from BaseLib.Core.Subtitles.SubtitleHandler.DiskManager import DiskManager, \
+    DISK_FULL_DELETE_SOME, DELETE_OLDEST_FIRST
+from BaseLib.Core.Subtitles.SubtitleHandler.SimpleTokenBucket import \
+    SimpleTokenBucket
+from BaseLib.Core.Subtitles.SubtitleHandler.SubsMessageHandler import \
+    SubsMessageHandler
+from BaseLib.Core.Utilities import utilities
+from BaseLib.Core.Utilities.Crypto import sha
+from BaseLib.Core.Utilities.utilities import bin2str, show_permid_short
+from BaseLib.Core.simpledefs import NTFY_ACT_DISK_FULL, NTFY_SUBTITLE_CONTENTS, \
+    NTFY_UPDATE
+import os
+import sys
+
+
+
+
+SUBS_EXTENSION = ".srt"
+SUBS_LOG_PREFIX = "subtitles: "
+
+DEFAULT_MIN_FREE_SPACE = 0 #no treshold
+
+MAX_SUBTITLE_SIZE = 1 * 1024 * 1024    # 1MB subtitles. too big?
+MAX_SUBS_MESSAGE_SIZE = int(2 * MAX_SUBTITLE_SIZE / 1024) #in KBs
+
+
+MAX_SUBTITLE_DISK_USAGE = 200 * (2 ** 10) #200 MBs
+
+DEBUG = False
+
+class SubtitlesHandler(object):
+    
+    
+    __single = None
+    
+    def __init__(self):
+        # notice that singleton pattern is not enforced.
+        # This is better, since this way the code is more easy
+        # to test.
+        
+        SubtitlesHandler.__single = self
+        self.avg_subtitle_size = 100 # 100 KB, experimental avg
+        self.languagesUtility = LanguagesProvider.getLanguagesInstance()
+        
+
+        #instance of MetadataDBHandler
+        self.subtitlesDb = None
+        self.registered = False
+        self.subs_dir = None
+        
+        
+        
+        #other useful attributes are injected by the register method
+        
+
+    @staticmethod
+    def getInstance(*args, **kw):
+        if SubtitlesHandler.__single is None:
+            SubtitlesHandler(*args, **kw)
+        return SubtitlesHandler.__single
+    
+    def register(self, overlay_bridge, metadataDBHandler, session):
+        """
+        Injects the required dependencies on the instance.
+        
+        @param overlay_bridge: a reference to a working instance
+                               of OverlayTrheadingBridge
+        @param metadataDBHandler: a reference to the current instance of
+                           L{MetadataDBHandler}
+        @param session: a reference to the running session
+        """
+        self.overlay_bridge = overlay_bridge
+        self.subtitlesDb = metadataDBHandler
+        self.config_dir = os.path.abspath(session.get_state_dir())
+        subs_path = os.path.join(self.config_dir, session.get_subtitles_collecting_dir())
+        self.subs_dir = os.path.abspath(subs_path)
+        
+        self.min_free_space = DEFAULT_MIN_FREE_SPACE
+        self._upload_rate = session.get_subtitles_upload_rate()
+        self.max_subs_message_size = MAX_SUBS_MESSAGE_SIZE
+        self._session = session
+       
+        #the upload rate is controlled by a token bucket.
+        #a token corresponds to 1 KB.
+        #The max burst size corresponds to 2 subtitles of the maximum size (2 MBs)
+        tokenBucket = SimpleTokenBucket(self._upload_rate,
+                                              self.max_subs_message_size)
+       
+        self._subsMsgHndlr = SubsMessageHandler(self.overlay_bridge, tokenBucket, 
+                                                MAX_SUBTITLE_SIZE)
+        self._subsMsgHndlr.registerListener(self)
+        
+    
+        #assure that the directory exists
+        
+        if os.path.isdir(self.config_dir) :
+            if not os.path.isdir(self.subs_dir):
+                try:
+                    os.mkdir(self.subs_dir)
+                except:
+                    msg = u"Cannot create collecting dir %s " % self.subs_dir
+                    print >> sys.stderr, "Error: %s" % msg
+                    raise IOError(msg)
+        else:
+            msg = u"Configuration dir %s does not exists" % self.subs_dir
+            print >> sys.stderr, "Error: %s" % msg
+            raise IOError(msg)
+        
+        
+        
+           
+        diskManager = DiskManager(self.min_free_space, self.config_dir)
+        self.diskManager = diskManager
+        
+        
+        dmConfig = {"maxDiskUsage" : MAX_SUBTITLE_DISK_USAGE,
+                    "diskPolicy" : DISK_FULL_DELETE_SOME | DELETE_OLDEST_FIRST,
+                     "encoding" : "utf-8"}
+        self.diskManager.registerDir(self.subs_dir, dmConfig)
+        
+        freeSpace = self.diskManager.getAvailableSpace()
+        if DEBUG:
+            print >> sys.stderr, SUBS_LOG_PREFIX + "Avaialble %d MB for subtitle collecting" % (freeSpace / (2 ** 20))
+        
+        #event notifier
+        self._notifier = Notifier.getInstance()
+        
+        self.registered = True
+    
+    
+    
+    
+    
+    def sendSubtitleRequest(self, permid, channel_id, infohash, languages,
+                            callback=None, selversion= -1):
+        """
+        Send a request for subtitle files. Only called by the OLThread
+        
+        Send a GET_SUBS request to the peer indentified by permid.
+        The request asks for several subtitles file, for a given channel_id
+        and torrent infohash. The subtitles file to request are specified
+        by the languages parameter that is a list of 3 characters language
+        codes.
+        
+        The contents of a GET_SUBS request are:
+            - channel_id: the identifier of the channel for which the subtitles
+              were added. (a permid). Binary.
+            - infohash: the infohash of the torrent, the subtitles refer to.
+              Binary.
+            - bitmask:  a 32 bit bitmask (an integer) which specifies the 
+              languages requested
+                      
+        
+        
+        @param permid: the destination of the request (binary)
+        @param channel_id: the identifier of the channel for which the subtitle
+                           was added (binary)
+        @param infohash: the infohash of a torrent the subtitles refers to (binary).
+        @param languages: a list of 3-characters language codes. It must be
+                          on of the supported language codes (see Languages)
+        @param callback: a function that will be called WHENEVER some of the
+                         requested subtitles are received. It must have exactly
+                         one parameter that will be bound to a list of 
+                         the languages that were received
+        @param selversion: the protocol version of the peer whe are sending 
+                            the request to
+        
+        @raise SubtitleMsgHandlerException: if the message failed its attempt to be sent.
+                                      Notice that also if the method returns without
+                                      raising any exception it doesn't mean
+                                      that the message has been sent.
+        """
+        
+        assert utilities.isValidInfohash(infohash), \
+            SUBS_LOG_PREFIX + "Invalid infohash %s" % infohash
+        assert utilities.isValidPermid(permid), \
+            SUBS_LOG_PREFIX + "Invlaid destination permid %s" % permid
+        
+        assert self.languagesUtility.isLangListSupported(languages), \
+             SUBS_LOG_PREFIX + "Some of the languages where not supported"
+            
+
+        
+        if DEBUG:
+            print >> sys.stderr, SUBS_LOG_PREFIX + "preparing to send GET_SUBS to " + \
+                  utilities.show_permid_short(permid)
+        
+
+        
+# Better to leave up to the caller the responsibility to check
+# if the subtitle is already available and as correct checsum and so on..
+#        onDisk = []
+#        for langCode in languages:
+#            
+#            filename = self.diskManager.isFilenOnDisk(self.subs_dir,
+#                    getSubtitleFileRelativeName(channel_id, infohash, langCode))
+#            
+#            
+#            # should I skip this part and just send the request anyway?
+#            # (thus leaving to the caller the responsibility to avoid useless
+#            # requests)
+#            if filename:
+#                log.debug(SUBS_LOG_PREFIX + langCode + 
+#                          " subtitle already on disk. Skipping it"\
+#                          " in the request")
+#                onDisk.append(langCode)
+#                self._notify_sub_is_in(channel_id, infohash, langCode, filename)
+#        
+#        for deleteme in onDisk:
+#            languages.remove(deleteme)
+            
+        if len(languages) == 0:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + " no subtitles to request."
+            return
+            
+        
+        if not self.diskManager.tryReserveSpace(self.subs_dir, len(languages) * self.avg_subtitle_size):
+            self._warn_disk_full()
+            return False
+        
+        requestDetails = dict()
+        requestDetails['channel_id'] = channel_id
+        requestDetails['infohash'] = infohash
+        requestDetails['languages'] = languages
+        
+        
+        self._subsMsgHndlr.sendSubtitleRequest(permid, requestDetails,
+                                                lambda e,d,c,i,b : \
+                                                    self._subsRequestSent(e,d,c,i,b),
+                                                    callback,
+                                                    selversion)
+            
+        
+    
+    def _subsRequestSent(self,exception,dest, channel_id, infohash, bitmask ):
+        '''
+        Gets called when a subtitle request has been succesfully sent.
+        '''
+        pass
+
+    def receivedSubsRequest(self, permid, request, selversion):
+        """
+        Reads a received GET_SUBS message and possibly sends a response.
+        
+        @param permid: the permid of the sender of the GET_SUBS message
+        @param request: a tuple made of channel_id, infohash, language code
+        @param selversion: the protocol version of the requesting peer
+        
+        @return: False if the message had something wrong. (a return value
+                 of False makes the caller close the connection).
+                 Otherwise True
+        """
+        
+        assert self.registered, SUBS_LOG_PREFIX + "Handler not yet registered"
+
+        
+        channel_id, infohash, languages = request #happily unpacking
+        
+        
+        #diction {lang : Subtitle}
+        allSubtitles = self.subtitlesDb.getAllSubtitles(channel_id,
+                                                        infohash)
+        
+        
+        contentsList = {} #{langCode : path}
+        #for each requested language check if the corresponding subtitle
+        #is available
+        for lang in sorted(languages):
+            if lang in allSubtitles.keys():
+                if allSubtitles[lang].subtitleExists():
+                    content = self._readSubContent(allSubtitles[lang].path)
+                    if content is not None:
+                        contentsList[lang] = content 
+                    
+                else:
+                    if DEBUG:
+                        print >> sys.stderr, SUBS_LOG_PREFIX + "File not available for " + \
+                              "channel %s, infohash %s, lang %s" % \
+                              (show_permid_short(channel_id), bin2str(infohash),
+                              lang)
+                    self.subtitlesDb.updateSubtitlePath(channel_id,infohash,lang,None)
+            else:
+                if DEBUG:
+                    print >> sys.stderr, SUBS_LOG_PREFIX + "Subtitle not available for " + \
+                          "channel %s, infohash %s, lang %s" % \
+                          (show_permid_short(channel_id), bin2str(infohash),
+                          lang)
+        
+        if len(contentsList) == 0: #pathlist is empty
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "None of the requested subtitles " + \
+                      " was available. No answer will be sent to %s" % \
+                      show_permid_short(permid)
+            return True
+        
+        
+        
+        return self._subsMsgHndlr.sendSubtitleResponse(permid, 
+                                                (channel_id,infohash,contentsList), 
+                                                selversion)
+
+        
+        
+    
+    def _readSubContent(self,path):
+
+        try:
+            relativeName = os.path.relpath(path, self.subs_dir)
+            fileContent = self.diskManager.readContent(self.subs_dir,
+                                                       relativeName)
+        except IOError,e:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Error reading from subs file %s: %s" % \
+                 (relativeName, e)
+            fileContent = None
+            
+        if fileContent is not None and len(fileContent) <= MAX_SUBTITLE_SIZE:
+            return fileContent
+        else:
+            print >> sys.stderr, "Warning: Subtitle %s dropped. Bigger then %d" % \
+                (relativeName, MAX_SUBTITLE_SIZE)
+            return None
+                
+
+            
+        
+
+    
+   
+        
+    def _subs_send_callback(self, exception, permid):
+        """
+        Called by the overlay thread when the send action is completed
+        """
+        if exception is not None:
+            if DEBUG:
+                print >> sys.stderr, SUBS_LOG_PREFIX + "Failed to send metadata to %s: %s" % \
+                      (show_permid_short(permid), str(exception))
+        
+    
+    def receivedSubsResponse(self, permid, msg, callbacks, selversion):
+        """
+        Handles the reception of a SUBS message.
+        
+        Checks against integrity of the contents received in a SUBS message.
+        If the message containes one or more subtitles that were not requested
+        they are dropped.
+        If the message is bigger in size then MAX_SUBS_MSG_SIZE it is dropped.
+        If one subtitle is bigger in size then MAX_SUBTITLE_SIZE it is dropped.
+        Otherwise the message is decoded, the subtitles saved to disk, and 
+        their path added to database.
+        
+        @param permid: the permid of the sender
+        @param msg: a triple of channel_id, infohash, and the contentsDictionary
+        @param callbacks: a list of pairs. The first element is a function to call,
+            the second a bitmask that help building back the parameters
+            of the function
+        @param selversion: the protocol version number of the other peer
+        
+        
+        @return: False if the message is dropped becuase malformed. 
+        """
+        assert self.registered == True, SUBS_LOG_PREFIX + "Subtitles Handler"\
+            " is not registered"
+        
+    
+        channel_id, infohash, contentsDictionary = \
+            msg
+        
+        
+        metadataDTO = self.subtitlesDb.getMetadata(channel_id, infohash)
+        
+        assert metadataDTO is not None, SUBS_LOG_PREFIX + "Inconsistent " \
+            "subtitles DB: a requested subtitle was not available in the db"
+        
+        filepaths = dict()
+        somethingToWrite = False
+        
+        for lang, subtitleContent in contentsDictionary.iteritems():
+            try:
+                filename = self._saveSubOnDisk(channel_id, infohash, lang,
+                                               subtitleContent)
+                filepaths[lang] = filename
+            except IOError,e:
+                if DEBUG:
+                    print >> sys.stderr, SUBS_LOG_PREFIX + "Unable to save subtitle for "\
+                          "channel %s and infohash %s to file: %s" % \
+                          (show_permid_short(channel_id), str(infohash), e)
+                continue
+            except Exception,e:
+                if DEBUG:
+                    print >> sys.stderr, "Unexpected error copying subtitle On Disk: " + str(e)
+                raise e
+            
+            subToUpdate = metadataDTO.getSubtitle(lang)
+            if subToUpdate is None:
+                print >> sys.stderr, "Warning:" + SUBS_LOG_PREFIX + "Subtitles database inconsistency."
+                #is it ok to throw a runtime error or should I gracefully fail?
+                raise MetadataDBException("Subtitles database inconsistency!")
+            
+            subToUpdate.path = filename
+            if not subToUpdate.verifyChecksum():
+                if DEBUG:
+                    print >> sys.stderr, "Received a subtitle having invalid checsum from %s" % \
+                         show_permid_short(permid)
+                subToUpdate.path = None
+                
+                relativeName = os.path.relpath(filename, self.subs_dir)
+                self.diskManager.deleteContent(self.subs_dir, relativeName)
+                continue
+            self.subtitlesDb.updateSubtitlePath(channel_id, infohash, subToUpdate.lang, filename, False)
+            somethingToWrite = True
+        
+        if somethingToWrite:
+            self.subtitlesDb.commit()
+        
+        if DEBUG:    
+            print >> sys.stderr, "Subtitle written on disk and informations on database."
+        
+        self._scheduleUserCallbacks(callbacks)
+        
+        return True
+            
+                
+    def _scheduleUserCallbacks(self, callbacks):
+        
+        # callbacks is a list of tuples such as
+        # (callback_func, bitmask)
+        for entry in callbacks:
+            callback = entry[0]
+            if callback is None:
+                pass
+            else:
+                listOfLanguages = self.languagesUtility.maskToLangCodes(entry[1])
+                def callBack():
+                    to_call = callback
+                    return to_call(listOfLanguages)
+                # Commented because had a problem related to the
+                # scope of the closure
+                #toCall = lambda : callback(listOfLanguages)
+                self.overlay_bridge.add_task(callBack)
+
+        
+        
+        
+        
+    
+    def _saveSubOnDisk(self, channel_id, infohash, lang, subtitleContent):
+        assert self.registered == True, SUBS_LOG_PREFIX + "Subtitles Handler"\
+            " is not registered"
+            
+        filename = getSubtitleFileRelativeName(channel_id, infohash, lang)
+        
+        path = self.diskManager.writeContent(self.subs_dir,
+                                             filename, subtitleContent)
+            
+        return path
+
+    
+    
+    def _notify_sub_is_in(self, channel_id, infohash, langCode, filename):
+        """
+        Notify that a subtitle file is available.
+        
+        Notifies any interested receiver that a subtitle for 
+        (channel_id, infohash, langCode) is available in the file
+        located at path filename.
+        
+        Currently it just prints a cool debug message.
+        """
+        if DEBUG:
+            print >> sys.stderr, SUBS_LOG_PREFIX + "Subtitle is in at" + filename
+        
+        if self._notifier is not None:
+            self.notifier.notify(NTFY_SUBTITLE_CONTENTS, NTFY_UPDATE,
+                                 (channel_id, infohash), langCode, filename)
+    
+    
+    def _warn_disk_full(self):
+        """
+        Notifies the LaunchMany instance that the disk is full.
+        """
+        print >> sys.stderr, "Warning: " + SUBS_LOG_PREFIX + "GET_SUBS: Disk full!"
+        drive, rdir = os.path.splitdrive(os.path.abspath(self.subs_dir))
+        if not drive:
+            drive = rdir
+        self.launchmany.set_activity(NTFY_ACT_DISK_FULL, drive)
+        
+
+      
+    def setUploadRate(self, uploadRate):
+        """
+        Sets the subtitles uploading rate, expressed in KB/s
+        """
+        assert self.registered
+        
+        self._upload_rate = float(uploadRate)
+        self._subsMsgHndlr._tokenBucket.fill_rate = float(uploadRate)
+
+    def getUploadRate(self):
+        """
+        Returns the current setting for the subtitles upload rate, in KB/s
+        """
+        return self._upload_rate
+    
+    def delUploadRate(self):
+        """
+        No, you can't delete the upload_rate property
+        """
+        raise RuntimeError("Operation not supported")
+    
+    upload_rate = property(getUploadRate, setUploadRate, delUploadRate,
+                           "Controls the subtitles uploading rate. Expressed in KB/s")
+    
+    
+    def copyToSubtitlesFolder(self,pathToMove, channel_id, infohash, langCode):
+        """
+        Given the path to an srt, moves it to the subtitle folder, also
+        changing the name to the correct one
+        
+        @return: the complete path of the file if the file was succesfully copied,
+        
+        @raise RichMetadataException: if the subtitle cannot be copied.
+        
+        """
+        
+        if not os.path.isfile(pathToMove):
+            raise RichMetadataException("File not found.")
+        
+        if os.path.getsize(pathToMove) >= MAX_SUBTITLE_SIZE :
+            raise RichMetadataException("Subtitle bigger then %d KBs" % (MAX_SUBTITLE_SIZE/1024))
+        
+        # Not really strong check: anyone can change the extension of a file :)
+        if not pathToMove.endswith(SUBS_EXTENSION):
+            raise RichMetadataException("Only .srt subtitles are supported")
+        
+        filename = getSubtitleFileRelativeName(channel_id, infohash, langCode)
+
+        
+        if self.diskManager.isFilenOnDisk(self.subs_dir, filename):
+            if DEBUG:
+                print >> sys.stderr, "Overwriting previous subtitle %s" % filename
+            try:
+                deleted = self.diskManager.deleteContent(self.subs_dir, filename)
+            except DiskManagerException,e:
+                if DEBUG:
+                    print >> sys.stderr, "Unable to remove subtitle %s" % filename
+                raise RichMetadataException("Unable to remove subtile %s to overwrite: %s"\
+                                            % (filename, str(e)))
+            
+            if not deleted:
+                if DEBUG: 
+                    print >> sys.stderr, "Unable to remove subtitle %s" % filename
+                raise RichMetadataException("Old subtitle %s is write protected"% filename)
+        
+        
+        with open(pathToMove,"rb") as toCopy:
+            encoding = toCopy.encoding
+            content = toCopy.read()
+            
+        if encoding is not None:
+            #convert the contents from their original encoding 
+            # to unicode, and replace possible unknown characters
+            #with U+FFFD
+            content = unicode(content, encoding, 'relplace')
+        else:
+            #convert using the system default encoding
+            content = unicode(content, errors="replace")
+        
+        return self.diskManager.writeContent(self.subs_dir, filename, content)
+
+
+    def getMessageHandler(self):
+        return self._subsMsgHndlr.handleMessage                
+        
+
+        
+    
+    
+        
+def getSubtitleFileRelativeName(channel_id, infohash, langCode):
+    #subtitles filenames are build from the sha1 hash
+    #of the triple (channel_id, infohash, langCode)
+    
+    # channel_id and infohash are binary versions
+    
+    assert utilities.validPermid(channel_id), \
+        "Invalid channel_id %s" % utilities.show_permid_short(channel_id)
+    assert utilities.validInfohash(infohash), \
+        "Invalid infohash %s" % bin2str(infohash)
+    assert LanguagesProvider.getLanguagesInstance().isLangCodeSupported(langCode), \
+        "Unsupported language code %s" % langCode
+        
+    hasher = sha()
+    for data in (channel_id, infohash, langCode):
+        hasher.update(data)
+    subtitleName = hasher.hexdigest() + SUBS_EXTENSION
+        
+    return subtitleName
+
+
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitlesSupport.py b/instrumentation/next-share/BaseLib/Core/Subtitles/SubtitlesSupport.py
new file mode 100644 (file)
index 0000000..7255f4e
--- /dev/null
@@ -0,0 +1,458 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import \
+    LanguagesProvider
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import \
+    RichMetadataException
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from BaseLib.Core.Utilities import utilities
+from BaseLib.Core.Utilities.utilities import isValidPermid, bin2str
+import sys
+import threading
+
+DEBUG = False
+
+
+class SubtitlesSupport(object):
+    '''
+    Subtitle dissemination system facade.
+    
+    Acts as the only faced between the subtitle dissemination system and 
+    the GUI (or whoever needs to subtitles).
+    
+    Provides methods to query the subtitles database. Allows publishers to
+    add their own subtitles, and if necessary permits to retrieve the subtitle
+    remotely if not available.
+    '''
+
+    __single = None
+    _singletonLock = threading.RLock()
+    
+    def __init__(self):
+        
+        #singleton pattern not really enforced if someone just calls 
+        # the normal constructor. But this way I can test the instance easier
+        try:
+            SubtitlesSupport._singletonLock.acquire()
+            SubtitlesSupport.__single = self
+        finally:
+            SubtitlesSupport._singletonLock.release()
+            
+        self.richMetadata_db = None
+        self.subtitlesHandler = None
+        self.channelcast_db = None
+        self.langUtility = LanguagesProvider.getLanguagesInstance()
+        self._registered = False
+    
+    @staticmethod
+    def getInstance(*args, **kw):
+        try:
+            SubtitlesSupport._singletonLock.acquire()
+            if SubtitlesSupport.__single == None:
+                SubtitlesSupport(*args, **kw)
+        finally:
+            SubtitlesSupport._singletonLock.release()
+        
+        return SubtitlesSupport.__single
+        
+    def _register(self, richMetadataDBHandler, subtitlesHandler,
+                 channelcast_db, my_permid, my_keypair, peersHaveManger,
+                 ol_bridge):
+        assert richMetadataDBHandler is not None
+        assert subtitlesHandler is not None
+        assert channelcast_db is not None
+        assert peersHaveManger is not None
+        assert ol_bridge is not None
+        assert isValidPermid(my_permid)
+        
+        self.richMetadata_db = richMetadataDBHandler
+        self.subtitlesHandler = subtitlesHandler
+        self.channelcast_db = channelcast_db
+        self.my_permid = my_permid
+        self.my_keypair = my_keypair
+        self._peersHaveManager = peersHaveManger
+        #used to decouple calls to SubtitleHandler
+        self._ol_bridge = ol_bridge
+        self._registered = True
+        
+    
+    def getSubtileInfosForInfohash(self, infohash):
+        '''
+        Retrieve available information about subtitles for the given infohash.
+        
+        Given the infohash of a .torrent, retrieves every
+        information about subtitles published for that .torrent that is
+        currently available in the DB. 
+        
+        @param infohash: a .torrent infohash (binary)
+        @return: a dictionary. The dictionary looks like this::
+                { 
+                  channel_id1 : {langCode : L{SubtitleInfo}, ...} ,
+                  channel_id2 : {langCode : L{SubtitleInfo}, ... },
+                  ...
+                } 
+            Each entry in the dictionary has the following semantics:
+                - channel_id is the permid identifiying the channel (binary).
+                - langCode is an ISO 693-2 three characters language code
+        '''
+        assert utilities.isValidInfohash(infohash)
+        assert self._registered, "Instance is not registered"
+        
+        returnDictionary = dict()
+        
+        #a metadataDTO corrisponds to all metadata for a pair channel, infohash
+        metadataDTOs = self.richMetadata_db.getAllMetadataForInfohash(infohash)
+        
+        for metadataDTO in metadataDTOs:
+            channel = metadataDTO.channel
+            subtitles = metadataDTO.getAllSubtitles()
+            if len(subtitles) > 0 :
+                returnDictionary[channel] = subtitles
+        
+        return returnDictionary
+        
+    
+    
+    def getSubtitleInfos(self, channel, infohash):
+        '''
+        Retrieve subtitles information for the given channel-infohash pair.
+        
+        Searches in the local database for information about subtitles that
+        are currently availabe.
+        
+        @param channel: the channel_id (perm_id) of a channel (binary)
+        @param infohash: a .torrent infohash (binary)
+        @return: a dictionary of SubtitleInfo instances. The keys are the 
+                language codes of the subtitles
+        '''
+        assert self._registered, "Instance is not registered"
+        metadataDTO = self.richMetadata_db.getMetadata(channel,infohash)
+        if metadataDTO is None:
+            #no results
+            return {}
+        else:
+            return metadataDTO.getAllSubtitles()
+        
+    
+    def publishSubtitle(self, infohash, lang, pathToSrtSubtitle):
+        '''
+        Allows an user to publish an srt subtitle file in his channel.
+        
+        Called by a channel owner this method inserts a new subtitle for
+        a torrent published in his channel. 
+        The method assumes that the torrent identified by the infohash
+        parameter is already in the channel, and that the parameter 
+        pathToSrtSubtitle points to an existing srt file on the local
+        filesystem.
+        If a subtitle for the same language was already associated to the 
+        specified infohash and channel, it will be overwritten.
+        After calling this method the newly inserted subtitle will be 
+        disseminated via Channelcast.
+        
+        @param infohash: the infohash of the torrent to associate the subtitle
+                         with, binary
+        @param lang: a 3 characters code for the language of the subtitle as
+                     specified in ISO 639-2. Currently just 32 language codes
+                     will be supported.
+        @param pathToSrtSubtitle: a path in the local filesystem to a subtitle
+                                  in srt format.
+        
+        @raise RichMetadataException: if something "general" goes wrong while
+                                      adding new metadata
+        @raise IOError: if disk related problems occur
+        '''
+        assert utilities.isValidInfohash(infohash), "Invalid Infohash"
+        assert lang is not None and self.langUtility.isLangCodeSupported(lang)
+        assert self._registered, "Instance is not registered"
+  
+        channelid = bin2str(self.my_permid)
+        base64infohash = bin2str(infohash)
+        # consisnstency check: I want to assure that this method is called
+        # for an item that is actually in my channel
+        consinstent = self.channelcast_db.isItemInChannel(channelid,base64infohash)
+        
+        if not consinstent:
+            msg = "Infohash %s not found in my channel. Rejecting subtitle" \
+                    % base64infohash
+            if DEBUG:
+                print >> sys.stderr, msg
+            raise RichMetadataException(msg)
+        
+        try:
+        
+            filepath = \
+                self.subtitlesHandler.copyToSubtitlesFolder(pathToSrtSubtitle,
+                                                            self.my_permid,infohash,
+                                                            lang)   
+        except Exception,e:
+            if DEBUG:
+                print >> sys.stderr, "Failed to read and copy subtitle to appropriate folder: %s" % str(e)
+
+
+        
+        # retrieve existing metadata from my channel, infoahash
+        metadataDTO = self.richMetadata_db.getMetadata(self.my_permid, infohash)
+        # can be none if no metadata was available
+        if metadataDTO is None:
+            metadataDTO = MetadataDTO(self.my_permid, infohash)
+        else:
+            #update the timestamp
+            metadataDTO.resetTimestamp()
+        
+        newSubtitle = SubtitleInfo(lang, filepath)
+        
+        # this check should be redundant, since i should be sure that subtitle
+        # exists at this point
+        if newSubtitle.subtitleExists():
+            newSubtitle.computeChecksum()
+        else:
+            msg = "Inconsistency found. The subtitle was"\
+                                        "not published"
+            if DEBUG:
+                print >> sys.stderr, msg
+            raise RichMetadataException(msg)
+        
+        metadataDTO.addSubtitle(newSubtitle)
+        metadataDTO.sign(self.my_keypair)
+        
+        #channelid is my permid. I received the metadata from myself
+        self.richMetadata_db.insertMetadata(metadataDTO)
+        
+
+
+    def retrieveSubtitleContent(self, channel, infohash, subtitleInfo, callback = None):
+        '''
+        Retrieves the actual subtitle file from a remote peer.
+        
+        If not already locally available this function tries to retrieve the
+        actual subtitle content from remote peers. The parameter subtitleInfo
+        describes the subtitle to retrieve the content for.
+        
+        A callback can be provided. It will be called by the
+        OLThread once the actual subtitle is available, or never
+        in case of failure. 
+        The callback function should have exactly one parameter that will
+        be bound to a new SubtitleInfo instance, with the path field updated
+        to the path where the downloaded subtitle resides.
+        
+        Usually this method should be called when the value of 
+        subtitleInfo.path is None, meaning that the subtitle of the content
+        is not available locally. If subtitleInfo.path is not None, tha path
+        will be checked for validity and in case it is not valid the method
+        will try to fetch a new subtitle. If it points to a valid subtitle
+        with the correct checksum, nothing will be done and the user callback
+        will be immediately scheduled.
+        
+        The current implementation queries for subtitle up to 5 peers
+        ithat manifested the availability for that subtitle through channelcast.
+        The requests are sent in parallel but only the first response is 
+        considered.
+        
+        @param channel: the channel where the subtitle was published. (binary channel_id)
+        
+        @param infohash: the infohash of the item we want to retrieve the
+                         subtitle for. (binary)
+        
+        @param subtitleInfo: an intance of SubtitleInfo describing the
+                             subtitle to be downloaded
+                             
+        @param callback: a function that will be called when the subtitle is 
+                         succesfully retrieved. See the description for
+                         further details. If None nothing will be called.
+        '''
+        assert self._registered, "Instance is not registered"
+        assert subtitleInfo.checksum is not None , "Cannot retrieve a subtitle"\
+            "whose checksum is not known"
+        
+        if subtitleInfo.subtitleExists():
+            if subtitleInfo.verifyChecksum():
+                #subtitle is available call the callback
+                callback(subtitleInfo)
+                return
+            else:
+                #delete the existing subtitle and ask for a new
+                #one
+                if DEBUG:
+                    print >> sys.stderr, "Subtitle is locally available but has invalid" \
+                          "checksum. Issuing another download"
+                subtitleInfo.path = None
+        
+
+        languages = [subtitleInfo.lang]
+        
+        def call_me_when_subtitle_arrives(listOfLanguages):
+            if callback is not None:
+                #since this was a request for a single subtitle
+                assert len(listOfLanguages) == 1
+                
+                #retrieve the updated info from the db
+                sub = self.richMetadata_db.getSubtitle(channel,infohash,
+                                                       listOfLanguages[0])
+                
+                #call the user callback
+                
+                callback(sub)
+            
+            
+        self._queryPeersForSubtitles(channel, infohash, languages,
+                                             call_me_when_subtitle_arrives)
+
+        
+    
+    def retrieveMultipleSubtitleContents(self, channel, infohash, listOfSubInfos, callback=None):
+        '''
+        Query remote peers of severela subtitles given the infohash
+        of the torrent they refer to, and the channel_id of the channel
+        they where published in.
+        
+        @param channel: channel_id (permid) of the channel where the subtitles where published
+                        (binary)
+        @param infohash: infohash of the torrent the subtitles are associated to (binary)
+        @param listOfSubInfos: a list of SubtitleInfo instances, specifing the subtitles to
+                               retrieve
+                               
+        @param callback: a callback function that will be called whenever any of the requested
+                        subtitles are retrieved. The function may be called multiple times
+                        if different requested subtitles arrive at different times, but it is
+                        guaranteed that it will be called at most once for each different
+                        subtitle arrived.
+                        The function MUST have one parameter, that will be bound to a list
+                        of updated SubtitleInfo s, reflecting the subtitles that have been
+                        received
+        
+        @rtype: None
+        @return:  always None
+        '''
+        assert self._registered, "Instance is not registered"
+        
+        languages = []
+        locallyAvailableSubs = []
+        for subtitleInfo in listOfSubInfos:
+            if subtitleInfo.checksum is None:
+                if DEBUG:
+                    print >> sys.stderr, "No checksum for subtitle %s. Skipping it in the request"\
+                        % subtitleInfo
+                continue
+            
+            if subtitleInfo.subtitleExists():
+                if subtitleInfo.verifyChecksum():
+                    #subtitle is available call the callback
+                    locallyAvailableSubs.append(subtitleInfo)
+                    continue
+                else:
+                    #delete the existing subtitle and ask for a new
+                    #one
+                    if DEBUG:
+                        print >> sys.stderr, "Subtitle is locally available but has invalid" \
+                              "checksum. Issuing another download"
+                    subtitleInfo.path = None
+                    
+            languages.append(subtitleInfo.lang)
+        
+            
+        if len(locallyAvailableSubs) > 0 and callback is not None:
+            callback(locallyAvailableSubs)
+        
+        def call_me_when_subtitles_arrive(listOfLanguages):
+            if callback is not None:
+                assert len(listOfLanguages) > 0 
+                
+                subInfos = list()
+                
+                #better to perform a single read from the db
+                allSubtitles = self.richMetadata_db.getAllSubtitles(channel,infohash)
+                for lang in listOfLanguages:
+                    subInfos.append(allSubtitles[lang])
+    
+                callback(subInfos)
+        
+        if len(languages) > 0:
+            self._queryPeersForSubtitles(channel, infohash, languages,
+                                                  call_me_when_subtitles_arrive)
+            
+        
+       
+    def _queryPeersForSubtitles(self, channel, infohash, languages, callback):
+        '''
+        Queries remote peers for subtitle contents  specified by 'infohash' 
+        published in a channel identified by 'channel' in the languages specified
+        by the languages list. 
+        Once any of theses subtitles arrive callback is called.
+        NOTE: calls send() on the OverlayThreadingBridge 
+        
+        @param channel: the channel_id of the channel were the subtitles to retrieve
+                        were published (binary string)
+        @param infohash: the infohash of a torrent to whom the subtitles to retrieve
+                        refer (binary string)
+        @param languages: a list of language codes (see Languages.py) for the subtitles
+                          contents to retrieve
+        @param callback: a callback function that will be called when some (or all) of the
+                         requested subtitles are received. The provided function must
+                         accept one parameter, that will be bound to a list of language codes
+                         corresponding to the languages of the subtitles that were received.
+                         Notice that if subtitles for different languages are received at multiple
+                         times, the callback my be called multiple times. Notice also
+                         that the callback will be called at most once for each of the requested
+                         languages.
+        '''
+        
+        def task():
+            bitmask  = self.langUtility.langCodesToMask(languages)
+            
+            if not bitmask > 0:
+                if DEBUG:
+                    print >> sys.stderr, "Will not send a request for 0 subtitles"
+                return
+                
+            peers_to_query = self._peersHaveManager.getPeersHaving(channel, infohash, bitmask)
+            
+            assert len(peers_to_query) > 0, "Consistency error: there should always be some result"
+        
+            
+            #ask up to 5 peers for the same subtitle. The callback will be called only at the
+            # first received response (the others should be dropped)
+            for peer in peers_to_query:
+                self.subtitlesHandler.sendSubtitleRequest(peer, channel, infohash,
+                                                                   languages, callback)
+        
+        self._ol_bridge.add_task(task)
+        
+            
+            
+    
+        
+        
+        
+            
+    def runDBConsinstencyRoutine(self):
+        '''
+        Clean the database from incorrect data.
+        
+        Checks the databases for the paths of subtitles presumably locally available.
+        If those subtitles are not really available at the given path, updates
+        the database in a consistent way.
+        '''
+        result = self.richMetadata_db.getAllLocalSubtitles()
+        
+        for channel in result:
+            for infohash in result[channel]:
+                for subInfo in result[channel][infohash]:
+                    if not subInfo.subtitleExists():
+                        #If a subtitle published by me was removed delete the whole entry
+                        if channel == self.my_permid:
+                            metadataDTO = self.richMetadata_db.getMetadata(channel,infohash)
+                            metadataDTO.removeSubtitle(subInfo.lang)
+                            metadataDTO.sign(self.my_keypair)
+                            self.richMetadata_db.insertMetadata(metadataDTO)
+                        #otherwise just set the path to none
+                        else:
+                            self.richMetadata_db.updateSubtitlePath(channel, infohash, subInfo.lang,None)
+            
+        
+        
+                
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Core/Subtitles/__init__.py b/instrumentation/next-share/BaseLib/Core/Subtitles/__init__.py
new file mode 100644 (file)
index 0000000..f99ac72
--- /dev/null
@@ -0,0 +1,3 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
diff --git a/instrumentation/next-share/BaseLib/Core/TorrentDef.py b/instrumentation/next-share/BaseLib/Core/TorrentDef.py
new file mode 100644 (file)
index 0000000..dea40a1
--- /dev/null
@@ -0,0 +1,1110 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+""" Definition of a torrent, that is, a collection of files or a live stream. """
+import sys
+import os
+import copy
+import math
+from traceback import print_exc,print_stack
+from types import StringType,ListType,IntType,LongType
+
+import BaseLib
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.defaults import *
+from BaseLib.Core.exceptions import *
+from BaseLib.Core.Base import *
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+import BaseLib.Core.APIImplementation.maketorrent as maketorrent
+import BaseLib.Core.APIImplementation.makeurl as makeurl
+from BaseLib.Core.APIImplementation.miscutils import *
+
+from BaseLib.Core.Utilities.utilities import validTorrentFile,isValidURL
+from BaseLib.Core.Utilities.unicode import dunno2unicode
+from BaseLib.Core.Utilities.timeouturlopen import urlOpenTimeout
+from BaseLib.Core.osutils import *
+from BaseLib.Core.Utilities.Crypto import sha
+
+from BaseLib.Core.ClosedSwarm import ClosedSwarm
+from BaseLib.Core.DecentralizedTracking.MagnetLink.MagnetLink import MagnetLink
+
+class TorrentDef(Serializable,Copyable):
+    """
+    Definition of a torrent, that is, all params required for a torrent file,
+    plus optional params such as thumbnail, playtime, etc.
+    
+    Note: to add fields to the torrent definition which are not supported
+    by its API, first create the torrent def, finalize it, then add the
+    fields to the metainfo, and create a new torrent def from that
+    upgraded metainfo using TorrentDef.load_from_dict()
+    
+    This class can also be used to create P2P URLs, by calling set_url_compat()
+    before finalizing. In that case only name, piece length, tracker, bitrate 
+    and source-authentication parameters (for live) are configurable. 
+
+    cf. libtorrent torrent_info
+    """
+    def __init__(self,input=None,metainfo=None,infohash=None):
+        """ Normal constructor for TorrentDef (The input, metainfo and infohash
+        parameters are used internally to make this a copy constructor) """
+        assert infohash is None or isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash)
+        assert infohash is None or len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash)
+        self.readonly = False
+        if input is not None: # copy constructor
+            self.input = input
+            # self.metainfo_valid set in copy() 
+            self.metainfo = metainfo
+            self.infohash = infohash
+            return
+        
+        self.input = {} # fields added by user, waiting to be turned into torrent file
+        # Define the built-in default here
+        self.input.update(tdefdefaults)
+        try:
+            self.input['encoding'] = sys.getfilesystemencoding()
+        except:
+            self.input['encoding'] = sys.getdefaultencoding()
+
+        self.input['files'] = []
+
+        self.metainfo_valid = False
+        self.metainfo = None # copy of loaded or last saved torrent dict
+        self.infohash = None # only valid if metainfo_valid
+        
+        
+        # We cannot set a built-in default for a tracker here, as it depends on
+        # a Session. Alternatively, the tracker will be set to the internal
+        # tracker by default when Session::start_download() is called, if the
+        # 'announce' field is the empty string.
+
+    #
+    # Class methods for creating a TorrentDef from a .torrent file
+    #
+    def load(filename):
+        """
+        Load a BT .torrent or Tribler .tribe file from disk and convert
+        it into a finalized TorrentDef.
+        
+        @param filename  An absolute Unicode filename
+        @return TorrentDef
+        """
+        # Class method, no locking required
+        f = open(filename,"rb")
+        return TorrentDef._read(f)
+    load = staticmethod(load)
+        
+    def _read(stream):
+        """ Internal class method that reads a torrent file from stream,
+        checks it for correctness and sets self.input and self.metainfo
+        accordingly. """
+        bdata = stream.read()
+        stream.close()
+        data = bdecode(bdata)
+        #print >>sys.stderr,data
+        return TorrentDef._create(data)
+    _read = staticmethod(_read)
+        
+    def _create(metainfo): # TODO: replace with constructor
+        # raises ValueErrors if not good
+        validTorrentFile(metainfo) 
+        
+        t = TorrentDef()
+        t.metainfo = metainfo
+        t.metainfo_valid = True
+        # copy stuff into self.input
+        maketorrent.copy_metainfo_to_input(t.metainfo,t.input)
+
+        # For testing EXISTING LIVE, or EXISTING MERKLE: DISABLE, i.e. keep true infohash
+        if t.get_url_compat():
+            t.infohash = makeurl.metainfo2swarmid(t.metainfo)
+        else:
+            # Two places where infohash calculated, here and in maketorrent.py
+            # Elsewhere: must use TorrentDef.get_infohash() to allow P2PURLs.
+            t.infohash = sha(bencode(metainfo['info'])).digest()
+
+        assert isinstance(t.infohash, str), "INFOHASH has invalid type: %s" % type(t.infohash)
+        assert len(t.infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(t.infohash)
+        
+        #print >>sys.stderr,"INFOHASH",`t.infohash`
+
+        return t
+    
+    _create = staticmethod(_create)
+
+    @staticmethod
+    def retrieve_from_magnet(url, callback):
+        """
+        If the URL conforms to a magnet link, the .torrent info is
+        downloaded and converted into a TorrentDef.  The resulting
+        TorrentDef is provided through CALLBACK.
+
+        Returns True when attempting to obtain the TorrentDef, in this
+        case CALLBACK will always be called.  Otherwise False is
+        returned, in this case CALLBACK will not be called.
+        
+        The thread making the callback should be used very briefly. 
+        """
+        assert isinstance(url, str), "URL has invalid type: %s" % type(url)
+        assert callable(callback), "CALLBACK must be callable"
+        def metainfo_retrieved(metadata):
+            tdef = TorrentDef.load_from_dict(metadata)
+            callback(tdef)
+
+        try:
+            magnet_link = MagnetLink(url, metainfo_retrieved)
+            return magnet_link.retrieve()
+        except:
+            # malformed url
+            return False
+
+    def load_from_url(url):
+        """
+        If the URL starts with 'http:' load a BT .torrent or Tribler .tstream
+        file from the URL and convert it into a TorrentDef. If the URL starts
+        with our URL scheme, we convert the URL to a URL-compatible TorrentDef.
+        
+        @param url URL
+        @return TorrentDef.
+        """
+        # Class method, no locking required
+        if url.startswith(P2PURL_SCHEME):
+            (metainfo,swarmid) = makeurl.p2purl2metainfo(url)
+
+            # Metainfo created from URL, so create URL compatible TorrentDef.
+            metainfo['info']['url-compat'] = 1
+
+            # For testing EXISTING LIVE: ENABLE, for old EXISTING MERKLE: DISABLE 
+            #metainfo['info']['name.utf-8'] = metainfo['info']['name'] 
+
+            t = TorrentDef._create(metainfo)
+            
+            return t
+        else:
+            f = urlOpenTimeout(url)
+            return TorrentDef._read(f)
+    load_from_url = staticmethod(load_from_url)
+
+
+    def load_from_dict(metainfo):
+        """
+        Load a BT .torrent or Tribler .tribe file from the metainfo dictionary
+        it into a TorrentDef
+        
+        @param metainfo A dictionary following the BT torrent file spec.
+        @return TorrentDef.
+        """
+        # Class method, no locking required
+        return TorrentDef._create(metainfo)
+    load_from_dict = staticmethod(load_from_dict)
+
+    
+    #
+    # Convenience instance methods for publishing new content
+    #
+    def add_content(self,inpath,outpath=None,playtime=None):
+        """
+        Add a file or directory to this torrent definition. When adding a
+        directory, all files in that directory will be added to the torrent.
+        
+        One can add multiple files and directories to a torrent definition.
+        In that case the "outpath" parameter must be used to indicate how
+        the files/dirs should be named in the torrent. The outpaths used must
+        start with a common prefix which will become the "name" field of the
+        torrent. 
+
+        To seed the torrent via the core (as opposed to e.g. HTTP) you will
+        need to start the download with the dest_dir set to the top-level
+        directory containing the files and directories to seed. For example,
+        a file "c:\Videos\file.avi" is seeded as follows:
+        <pre>
+            tdef = TorrentDef()
+            tdef.add_content("c:\Videos\file.avi",playtime="1:59:20")
+            tdef.set_tracker(s.get_internal_tracker_url())
+            tdef.finalize()
+            dscfg = DownloadStartupConfig()
+            dscfg.set_dest_dir("c:\Video")
+            s.start_download(tdef,dscfg)
+        </pre>
+        @param inpath Absolute name of file or directory on local filesystem, 
+        as Unicode string.
+        @param outpath (optional) Name of the content to use in the torrent def
+        as Unicode string.
+        @param playtime (optional) String representing the duration of the 
+        multimedia file when played, in [hh:]mm:ss format. 
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+        
+        s = os.stat(inpath)
+        d = {'inpath':inpath,'outpath':outpath,'playtime':playtime,'length':s.st_size}
+        self.input['files'].append(d)
+        
+        self.metainfo_valid = False
+
+
+    def remove_content(self,inpath):
+        """ Remove a file or directory from this torrent definition 
+
+        @param inpath Absolute name of file or directory on local filesystem, 
+        as Unicode string.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+        
+        for d in self.input['files']:
+            if d['inpath'] == inpath:
+                self.input['files'].remove(d)
+                break
+
+    def create_live(self,name,bitrate,playtime="1:00:00",authconfig=None):
+        """ Create a live streaming multimedia torrent with a specific bitrate.
+        
+        The authconfig is a subclass LiveSourceAuthConfig with the key 
+        information required to allow authentication of packets from the source,
+        or None. In the latter case there is no source authentication. The other
+        two legal values are:
+        <pre>
+        * An instance of ECDSALiveSourceAuthConfig. 
+        * An Instance of RSALiveSourceAuthConfig.
+        </pre>
+        When using the ECDSA method, a sequence number, real-time timestamp and 
+        an ECDSA signature of 64 bytes is put in each piece. As a result, the 
+        content in each packet is get_piece_length()-81, so that this into 
+        account when selecting the bitrate.
+        
+        When using the RSA method, a sequence number, real-time timestamp and
+        a RSA signature of keysize/8 bytes is put in each piece.
+        
+        The info from the authconfig is stored in the 'info' part of the 
+        torrent file when finalized, so changing the authentication info changes
+        the identity (infohash) of the torrent.
+         
+        @param name The name of the stream.
+        @param bitrate The desired bitrate in bytes per second.
+        @param playtime The virtual playtime of the stream as a string in 
+        [hh:]mm:ss format.
+        @param authconfig Parameters for the authentication of the source
+        """
+        self.input['bps'] = bitrate
+        self.input['playtime'] = playtime # size of virtual content 
+
+        # For source auth
+        authparams = {}
+        if authconfig is None:
+            authparams['authmethod'] = LIVE_AUTHMETHOD_NONE
+        else: 
+            authparams['authmethod'] = authconfig.get_method()
+            authparams['pubkey'] = authconfig.get_pubkey()
+
+        self.input['live'] = authparams 
+
+        d = {'inpath':name,'outpath':None,'playtime':None,'length':None}
+        self.input['files'].append(d)
+
+    #
+    # Torrent attributes
+    #
+    def set_encoding(self,enc):
+        """ Set the character encoding for e.g. the 'name' field """
+        self.input['encoding'] = enc
+        self.metainfo_valid = False
+        
+    def get_encoding(self):
+        return self.input['encoding']
+
+    def set_thumbnail(self,thumbfilename):
+        """
+        Reads image from file and turns it into a torrent thumbnail
+        The file should contain an image in JPEG format, preferably 171x96.
+        
+        @param thumbfilename Absolute name of image file, as Unicode string.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+        
+        f = open(thumbfilename,"rb")
+        data = f.read()
+        f.close()
+        self.input['thumb'] = data 
+        self.metainfo_valid = False
+
+
+    def get_thumbnail(self):
+        """ Returns (MIME type,thumbnail data) if present or (None,None)
+        @return A tuple. """
+        if 'thumb' not in self.input or self.input['thumb'] is None:
+            return (None,None)
+        else:
+            thumb = self.input['thumb'] # buffer/string immutable
+            return ('image/jpeg',thumb)
+
+    def set_tracker(self,url):
+        """ Sets the tracker (i.e. the torrent file's 'announce' field). 
+        @param url The announce URL.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        if not isValidURL(url):
+            raise ValueError("Invalid URL")
+        
+        if url.endswith('/'):
+            # Some tracker code can't deal with / at end
+            url = url[:-1]
+        self.input['announce'] = url 
+        self.metainfo_valid = False
+
+    def get_tracker(self):
+        """ Returns the announce URL.
+        @return URL """
+        return self.input['announce']
+
+    def set_tracker_hierarchy(self,hier):
+        """ Set hierarchy of trackers (announce-list) following the spec
+        at http://www.bittornado.com/docs/multitracker-spec.txt
+        @param hier A hierarchy of trackers as a list of lists.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        # TODO: check input, in particular remove / at end
+        newhier = []
+        if type(hier) != ListType:
+            raise ValueError("hierarchy is not a list")
+        for tier in hier:
+            if type(tier) != ListType:
+                raise ValueError("tier is not a list")
+            newtier = []
+            for url in tier:
+                if not isValidURL(url):
+                    raise ValueError("Invalid URL: "+`url`)
+                
+                if url.endswith('/'):
+                    # Some tracker code can't deal with / at end
+                    url = url[:-1]
+                newtier.append(url)
+            newhier.append(newtier)
+
+        self.input['announce-list'] = newhier
+        self.metainfo_valid = False
+
+    def get_tracker_hierarchy(self):
+        """ Returns the hierarchy of trackers.
+        @return A list of lists. """
+        return self.input['announce-list']
+
+    def set_dht_nodes(self,nodes):
+        """ Sets the DHT nodes required by the mainline DHT support,
+        See http://www.bittorrent.org/beps/bep_0005.html
+        @param nodes A list of [hostname,port] lists.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        # Check input
+        if type(nodes) != ListType:
+            raise ValueError("nodes not a list")
+        else:
+            for node in nodes:
+                if type(node) != ListType and len(node) != 2:
+                    raise ValueError("node in nodes not a 2-item list: "+`node`)
+                if type(node[0]) != StringType:
+                    raise ValueError("host in node is not string:"+`node`)
+                if type(node[1]) != IntType:
+                    raise ValueError("port in node is not int:"+`node`)
+                
+        self.input['nodes'] = nodes
+        self.metainfo_valid = False 
+
+    def get_dht_nodes(self):
+        """ Returns the DHT nodes set. 
+        @return A list of [hostname,port] lists. """
+        return self.input['nodes']
+        
+    def set_comment(self,value):
+        """ Set comment field.
+        @param value A Unicode string.
+         """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['comment'] = value
+        self.metainfo_valid = False
+
+    def get_comment(self):
+        """ Returns the comment field of the def.
+        @return A Unicode string. """
+        return self.input['comment']
+
+    def get_comment_as_unicode(self):
+        """ Returns the comment field of the def as a unicode string.
+        @return A Unicode string. """
+        return dunno2unicode(self.input['comment'])
+
+    def set_created_by(self,value):
+        """ Set 'created by' field.
+        @param value A Unicode string.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['created by'] = value
+        self.metainfo_valid = False
+
+    def get_created_by(self):
+        """ Returns the 'created by' field.
+        @return Unicode string. """
+        return self.input['created by']
+
+    def set_urllist(self,value):
+        """ Set list of HTTP seeds following the BEP 19 spec (GetRight style):
+        http://www.bittorrent.org/beps/bep_0019.html
+        @param value A list of URLs.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        for url in value:
+            if not isValidURL(url):
+                raise ValueError("Invalid URL: "+`url`)
+
+        self.input['url-list'] = value
+        self.metainfo_valid = False
+
+    def get_urllist(self):
+        """ Returns the list of HTTP seeds.
+        @return A list of URLs. """
+        return self.input['url-list']
+
+    def set_httpseeds(self,value):
+        """ Set list of HTTP seeds following the BEP 17 spec (John Hoffman style):
+        http://www.bittorrent.org/beps/bep_0017.html
+        @param value A list of URLs.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        for url in value:
+            if not isValidURL(url):
+                raise ValueError("Invalid URL: "+`url`)
+
+        self.input['httpseeds'] = value
+        self.metainfo_valid = False
+
+    def get_httpseeds(self):
+        """ Returns the list of HTTP seeds.
+        @return A list of URLs. """
+        return self.input['httpseeds']
+
+    def set_piece_length(self,value):
+        """ Set the size of the pieces in which the content is traded. 
+        The piece size must be a multiple of the chunk size, the unit in which
+        it is transmitted, which is 16K by default (see 
+        DownloadConfig.set_download_slice_size()). The default is automatic 
+        (value 0).
+        @param value A number of bytes as per the text.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        if not (type(value) == IntType or type(value) == LongType):
+            raise ValueError("Piece length not an int/long")
+
+        self.input['piece length'] = value
+        self.metainfo_valid = False
+
+    def get_piece_length(self):
+        """ Returns the piece size.
+        @return A number of bytes. """
+        return self.input['piece length']
+
+    #
+    # ClosedSwarm fields
+    #
+    def set_cs_keys(self, keys):
+        """ Keys is a list of DER encoded keys
+        """
+        self.input['cs_keys'] = ",".join(keys)
+
+    def get_cs_keys_as_ders(self):
+        """Returns a list of DER encoded keys
+        @return A list of DER encoded keys or [] if not a CS
+        """
+        if 'cs_keys' in self.input and len(self.input['cs_keys']) > 0:
+            return self.input['cs_keys'].split(",")
+        return []
+            
+    def get_cs_keys(self):
+        """ Get the Closed swarm keys for this torrent.
+        @return A list of key objects ready to be used or [] if not a CS
+        """
+        if 'cs_keys' in self.input:
+            keys = self.input['cs_keys'].split(",")
+            
+            cs_keys = []
+            for key in keys:
+                k = ClosedSwarm.pubkey_from_der(key)
+                cs_keys.append(k)
+            return cs_keys
+        return []
+
+    def set_add_md5hash(self,value):
+        """ Whether to add an end-to-end MD5 checksum to the def.
+        @param value Boolean.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['makehash_md5'] = value
+        self.metainfo_valid = False
+
+    def get_add_md5hash(self):
+        """ Returns whether to add an MD5 checksum. """
+        return self.input['makehash_md5']
+
+    def set_add_crc32(self,value):
+        """ Whether to add an end-to-end CRC32 checksum to the def.
+        @param value Boolean.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['makehash_crc32'] = value
+        self.metainfo_valid = False
+
+    def get_add_crc32(self):
+        """ Returns whether to add an end-to-end CRC32 checksum to the def.
+        @return Boolean. """
+        return self.input['makehash_crc32']
+
+    def set_add_sha1hash(self,value):
+        """ Whether to add end-to-end SHA1 checksum to the def.
+        @param value Boolean.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['makehash_sha1'] = value
+        self.metainfo_valid = False
+
+    def get_add_sha1hash(self):
+        """ Returns whether to add an end-to-end SHA1 checksum to the def. 
+        @return Boolean."""
+        return self.input['makehash_sha1']
+    
+    def set_create_merkle_torrent(self,value):
+        """ Create a Merkle torrent instead of a regular BT torrent. A Merkle
+        torrent uses a hash tree for checking the integrity of the content
+        received. As such it creates much smaller torrent files than the
+        regular method. Tribler-specific feature."""
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['createmerkletorrent'] = value
+        self.metainfo_valid = False
+
+    def get_create_merkle_torrent(self):
+        """ Returns whether to create a Merkle torrent.
+        @return Boolean. """
+        return self.input['createmerkletorrent']
+
+    def set_signature_keypair_filename(self,value):
+        """ Set absolute filename of keypair to be used for signature.
+        When set, a signature will be added.
+        @param value A filename containing an Elliptic Curve keypair.
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['torrentsigkeypairfilename'] = value
+        self.metainfo_valid = False
+
+    def get_signature_keypair_filename(self):
+        """ Returns the filename containing the signing keypair or None.
+        @return Unicode String or None. """
+        return self.input['torrentsigkeypairfilename']
+
+    def get_live(self):
+        """ Returns whether this definition is for a live torrent.
+        @return Boolean. """
+        return bool('live' in self.input and self.input['live'])
+
+
+    def get_live_authmethod(self):
+        """ Returns the method for authenticating the source.
+        <pre>
+        LIVE_AUTHMETHOD_ECDSA
+        </pre>
+        @return String
+        """
+        return 'live' in self.input and self.input['live']['authmethod']
+
+    def get_live_pubkey(self):
+        """ Returns the public key used for authenticating packets from
+        the source.
+        @return A public key in DER.
+        """
+        if 'live' in self.input and 'pubkey' in self.input['live']:
+            return self.input['live']['pubkey']
+        else:
+            return None
+
+
+    def set_url_compat(self,value):
+        """ Set the URL compatible value for this definition. Only possible
+        for Merkle torrents and live torrents.
+        @param value Integer."""
+        
+        self.input['url-compat'] = value
+
+    def get_url_compat(self):
+        """ Returns whether this definition is URL compatible.
+        @return Boolean. """
+        return 'url-compat' in self.input and self.input['url-compat']
+        
+    #
+    # For P2P-transported Ogg streams
+    #
+    def set_live_ogg_headers(self,value):
+        if self.get_url_compat():
+            raise ValueError("Cannot use P2PURLs for Ogg streams")
+        self.input['ogg-headers'] = value
+
+
+    def get_live_ogg_headers(self):
+        if 'ogg-headers' in self.input:
+            return self.input['ogg-headers']
+        else:
+            return None
+        
+    def set_metadata(self,value):
+        """ Set the P2P-Next metadata 
+        @param value binary string """
+        
+        self.input['ns-metadata'] = value
+
+    def get_metadata(self):
+        """ Returns the stored P2P-Next metadata or None.
+        @return binary string. """
+        if 'ns-metadata' in self.input:
+            return self.input['ns-metadata']
+        else:
+            return None
+
+    def set_initial_peers(self,value):
+        """ Set the initial peers to connect to. 
+        @param value List of (IP,port) tuples """
+        self.input['initial peers'] = value
+
+    def get_initial_peers(self):
+        """ Returns the list of initial peers.
+        @return List of (IP,port) tuples. """
+        if 'initial peers' in self.input:
+            return self.input['initial peers']
+        else:
+            return []
+
+
+    def finalize(self,userabortflag=None,userprogresscallback=None):
+        """ Create BT torrent file by reading the files added with
+        add_content() and calculate the torrent file's infohash. 
+        
+        Creating the torrent file can take a long time and will be carried out
+        by the calling thread. The process can be made interruptable by passing 
+        a threading.Event() object via the userabortflag and setting it when 
+        the process should be aborted. The also optional userprogresscallback 
+        will be called by the calling thread periodically, with a progress 
+        percentage as argument.
+        
+        The userprogresscallback function will be called by the calling thread. 
+        
+        @param userabortflag threading.Event() object
+        @param userprogresscallback Function accepting a fraction as first
+        argument. 
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+        
+        if self.metainfo_valid:
+            return
+
+        if 'live' in self.input:
+            # Make sure the duration is an integral number of pieces, for
+            # security (live source auth).
+            secs = parse_playtime_to_secs(self.input['playtime'])
+            pl = float(self.get_piece_length())
+            length = float(self.input['bps']*secs)
+
+            if DEBUG:
+                print >>sys.stderr,"TorrentDef: finalize: length",length,"piecelen",pl
+            diff = length % pl
+            add = (pl - diff) % pl
+            newlen = int(length + add)
+
+                
+            #print >>sys.stderr,"CHECK INFO LENGTH",secs,newlen
+
+            d = self.input['files'][0]
+            d['length'] = newlen
+
+
+        # Note: reading of all files and calc of hashes is done by calling 
+        # thread.
+        
+        (infohash,metainfo) = maketorrent.make_torrent_file(self.input,userabortflag=userabortflag,userprogresscallback=userprogresscallback)
+        if infohash is not None:
+            
+            if self.get_url_compat():
+                url = makeurl.metainfo2p2purl(metainfo)
+                # Make sure metainfo is preserved, in particular, the url-compat field.
+                swarmid = makeurl.metainfo2swarmid(metainfo) 
+                self.infohash = swarmid
+            else:
+                self.infohash = infohash
+            self.metainfo = metainfo
+            
+            self.input['name'] = metainfo['info']['name']
+            # May have been 0, meaning auto.
+            self.input['piece length'] = metainfo['info']['piece length']
+            self.metainfo_valid = True
+
+        assert self.infohash is None or isinstance(self.infohash, str), "INFOHASH has invalid type: %s" % type(self.infohash)
+        assert self.infohash is None or len(self.infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(self.infohash)
+
+    def is_finalized(self):
+        """ Returns whether the TorrentDef is finalized or not.
+        @return Boolean. """
+        return self.metainfo_valid
+
+    #
+    # Operations on finalized TorrentDefs
+    #
+    def get_infohash(self):
+        """ Returns the infohash of the torrent, for non-URL compatible
+        torrents. Otherwise it returns the swarm identifier (either the root hash
+        (Merkle torrents) or hash of the live-source authentication key.
+        @return A string of length 20. """
+        if self.metainfo_valid:
+            return self.infohash
+        else:
+            raise TorrentDefNotFinalizedException()
+
+    def get_metainfo(self):
+        """ Returns the torrent definition as a dictionary that follows the BT
+        spec for torrent files. 
+        @return dict
+        """
+        if self.metainfo_valid:
+            return self.metainfo
+        else:
+            raise TorrentDefNotFinalizedException()
+
+    def get_name(self):
+        """ Returns the info['name'] field as raw string of bytes.
+        @return String """
+        if self.metainfo_valid:
+            return self.input['name'] # string immutable
+        else:
+            raise TorrentDefNotFinalizedException()
+
+    def set_name(self,name):
+        """ Set the name of this torrent
+        @param name name of torrent as String
+        """
+        if self.readonly:
+            raise OperationNotPossibleAtRuntimeException()
+
+        self.input['name'] = name
+        self.metainfo_valid = False
+
+        
+    def get_name_as_unicode(self):
+        """ Returns the info['name'] field as Unicode string.
+        @return Unicode string. """
+        if not self.metainfo_valid:
+            raise TorrentDefNotFinalizedException()
+
+        if "name.utf-8" in self.metainfo["info"]:
+            # There is an utf-8 encoded name.  We assume that it is
+            # correctly encoded and use it normally
+            try:
+                return unicode(self.metainfo["info"]["name.utf-8"], "UTF-8")
+            except UnicodeError:
+                pass
+
+        if "name" in self.metainfo["info"]:
+            # Try to use the 'encoding' field.  If it exists, it
+            # should contain something like 'utf-8'
+            if "encoding" in self.metainfo:
+                try:
+                    return unicode(self.metainfo["info"]["name"], self.metainfo["encoding"])
+                except UnicodeError:
+                    pass
+                except LookupError:
+                    # Some encodings are not supported by python.  For
+                    # instance, the MBCS codec which is used by
+                    # Windows is not supported (Jan 2010)
+                    pass
+
+            # Try to convert the names in path to unicode, without
+            # specifying the encoding
+            try:
+                return unicode(self.metainfo["info"]["name"])
+            except UnicodeError:
+                pass
+
+            # Try to convert the names in path to unicode, assuming
+            # that it was encoded as utf-8
+            try:
+                return unicode(self.metainfo["info"]["name"], "UTF-8")
+            except UnicodeError:
+                pass
+
+            # Convert the names in path to unicode by replacing out
+            # all characters that may -even remotely- cause problems
+            # with the '?' character
+            try:
+                def filter_characters(name):
+                    def filter_character(char):
+                        if 0 < ord(char) < 128:
+                            return char
+                        else:
+                            if DEBUG: print >> sys.stderr, "Bad character filter", ord(char), "isalnum?", char.isalnum()
+                            return u"?"
+                    return u"".join([filter_character(char) for char in name])
+                return unicode(filter_characters(self.metainfo["info"]["name"]))
+            except UnicodeError:
+                pass
+
+        # We failed.  Returning an empty string
+        return u""
+
+    def verify_torrent_signature(self):
+        """ Verify the signature on the finalized torrent definition. Returns
+        whether the signature was valid.
+        @return Boolean.
+        """
+        if self.metainfo_valid:
+            return BaseLib.Core.Overlay.permid.verify_torrent_signature(self.metainfo)
+        else:
+            raise TorrentDefNotFinalizedException()
+
+
+    def save(self,filename):
+        """
+        Finalizes the torrent def and writes a torrent file i.e., bencoded dict 
+        following BT spec) to the specified filename. Note this may take a
+        long time when the torrent def is not yet finalized.
+        
+        @param filename An absolute Unicode path name.
+        """
+        if not self.readonly:
+            self.finalize()
+
+        bdata = bencode(self.metainfo)
+        f = open(filename,"wb")
+        f.write(bdata)
+        f.close()
+
+
+    def get_bitrate(self,file=None):
+        """ Returns the bitrate of the specified file. If no file is specified, 
+        we assume this is a single-file torrent.
+        
+        @param file (Optional) the file in the torrent to retrieve the bitrate of.
+        @return The bitrate in bytes per second or None.
+        """ 
+        if not self.metainfo_valid:
+            raise NotYetImplementedException() # must save first
+
+        return maketorrent.get_bitrate_from_metainfo(file,self.metainfo)
+
+    def get_files_with_length(self,exts=None):
+        """ The list of files in the finalized torrent def.
+        @param exts (Optional) list of filename extensions (without leading .)
+        to search for.
+        @return A list of filenames.
+        """
+        return maketorrent.get_files(self.metainfo,exts)
+
+    def get_files(self,exts=None):
+        """ The list of files in the finalized torrent def.
+        @param exts (Optional) list of filename extensions (without leading .)
+        to search for.
+        @return A list of filenames.
+        """
+        return [filename for filename, _ in maketorrent.get_files(self.metainfo, exts)]
+
+    def _get_all_files_as_unicode_with_length(self):
+        """ Get a generator for files in the torrent def. No filtering
+        is possible and all tricks are allowed to obtain a unicode
+        list of filenames.
+        @return A unicode filename generator.
+        """
+        assert self.metainfo_valid, "TorrentDef is not finalized"
+        if "files" in self.metainfo["info"]:
+            # Multi-file torrent
+            join = os.path.join
+            files = self.metainfo["info"]["files"]
+
+            for file_dict in files:
+                if "path.utf-8" in file_dict:
+                    # This file has an utf-8 encoded list of elements.
+                    # We assume that it is correctly encoded and use
+                    # it normally
+                    try:
+                        yield join(*[unicode(element, "UTF-8") for element in file_dict["path.utf-8"]]), file_dict["length"]
+                    except UnicodeError:
+                        pass
+
+                if "path" in file_dict:
+                    # Try to use the 'encoding' field.  If it exists,
+                    # it should contain something like 'utf-8'
+                    if "encoding" in self.metainfo:
+                        encoding = self.metainfo["encoding"]
+                        try:
+                            yield join(*[unicode(element, encoding) for element in file_dict["path"]]), file_dict["length"]
+                        except UnicodeError:
+                            pass
+                        except LookupError:
+                            # Some encodings are not supported by
+                            # python.  For instance, the MBCS codec
+                            # which is used by Windows is not
+                            # supported (Jan 2010)
+                            pass
+
+                    # Try to convert the names in path to unicode,
+                    # without specifying the encoding
+                    try:
+                        yield join(*[unicode(element) for element in file_dict["path"]]), file_dict["length"]
+                    except UnicodeError:
+                        pass
+
+                    # Try to convert the names in path to unicode,
+                    # assuming that it was encoded as utf-8
+                    try:
+                        yield join(*[unicode(element, "UTF-8") for element in file_dict["path"]]), file_dict["length"]
+                    except UnicodeError:
+                        pass
+
+                    # Convert the names in path to unicode by
+                    # replacing out all characters that may -even
+                    # remotely- cause problems with the '?' character
+                    try:
+                        def filter_characters(name):
+                            def filter_character(char):
+                                if 0 < ord(char) < 128:
+                                    return char
+                                else:
+                                    if DEBUG: print >> sys.stderr, "Bad character filter", ord(char), "isalnum?", char.isalnum()
+                                    return u"?"
+                            return u"".join([filter_character(char) for char in name])
+                        yield join(*[unicode(filter_characters(element)) for element in file_dict["path"]]), file_dict["length"]
+                    except UnicodeError:
+                        pass
+
+        else:
+            # Single-file torrent
+            yield self.get_name_as_unicode(), self.metainfo["info"]["length"]
+        
+    def get_files_as_unicode_with_length(self,exts=None):
+        """ The list of files in the finalized torrent def.
+        @param exts (Optional) list of filename extensions (without leading .)
+        to search for.
+        @return A list of filenames.
+        """
+        if not self.metainfo_valid:
+            raise NotYetImplementedException() # must save first
+
+        videofiles = []
+        for filename, length in self._get_all_files_as_unicode_with_length():
+            prefix, ext = os.path.splitext(filename)
+            if ext != "" and ext[0] == ".":
+                ext = ext[1:]
+            if exts is None or ext.lower() in exts:
+                videofiles.append((filename, length))
+        return videofiles
+
+    def get_files_as_unicode(self,exts=None):
+        return [filename for filename, _ in self.get_files_as_unicode_with_length(exts)]
+
+    def get_length(self,selectedfiles=None):
+        """ Returns the total size of the content in the torrent. If the
+        optional selectedfiles argument is specified, the method returns
+        the total size of only those files.
+        @return A length (long)
+        """
+        if not self.metainfo_valid:
+            raise NotYetImplementedException() # must save first
+        
+        (length,filepieceranges) = maketorrent.get_length_filepieceranges_from_metainfo(self.metainfo,selectedfiles)
+        return length
+
+    def get_creation_date(self,default=0):
+        if not self.metainfo_valid:
+            raise NotYetImplementedException() # must save first
+
+        return self.metainfo.get("creation date", default)
+
+    def is_multifile_torrent(self):
+        """ Returns whether this TorrentDef is a multi-file torrent.
+        @return Boolean
+        """
+        if not self.metainfo_valid:
+            raise NotYetImplementedException() # must save first
+        
+        return 'files' in self.metainfo['info']
+
+
+
+    def is_merkle_torrent(self):
+        """ Returns whether this TorrentDef is a Merkle torrent. Use
+        get_create_merkle_torrent() to determine this before finalization.
+        @return Boolean """
+        if self.metainfo_valid:
+            return 'root hash' in self.metainfo['info']
+        else:
+            raise TorrentDefNotFinalizedException()
+
+    
+    def get_url(self):
+        """ Returns the URL representation of this TorrentDef. The TorrentDef
+        must be a Merkle or live torrent and must be set to URL-compatible 
+        before finalizing."""
+        
+        if self.metainfo_valid:
+            return makeurl.metainfo2p2purl(self.metainfo)
+        else:
+            raise TorrentDefNotFinalizedException()
+    
+    
+    #
+    # Internal methods
+    #
+    def get_index_of_file_in_files(self,file):
+        if not self.metainfo_valid:
+            raise NotYetImplementedException() # must save first
+
+        info = self.metainfo['info']
+
+        if file is not None and 'files' in info:
+            for i in range(len(info['files'])):
+                x = info['files'][i]
+                    
+                intorrentpath = maketorrent.pathlist2filename(x['path'])
+                if intorrentpath == file:
+                    return i
+            return ValueError("File not found in torrent")
+        else:
+            raise ValueError("File not found in single-file torrent")
+
+    #
+    # Copyable interface
+    # 
+    def copy(self):
+        input = copy.copy(self.input)
+        metainfo = copy.copy(self.metainfo)
+        infohash = self.infohash
+        t = TorrentDef(input,metainfo,infohash)
+        t.metainfo_valid = self.metainfo_valid
+        t.set_cs_keys(self.get_cs_keys_as_ders())
+        return t
diff --git a/instrumentation/next-share/BaseLib/Core/Utilities/Crypto.py b/instrumentation/next-share/BaseLib/Core/Utilities/Crypto.py
new file mode 100644 (file)
index 0000000..a66d6aa
--- /dev/null
@@ -0,0 +1,117 @@
+# Written by Arno Bakker \r
+# see LICENSE.txt for license information\r
+\r
+import sys\r
+import base64\r
+import textwrap\r
+import binascii\r
+from cStringIO import StringIO\r
+\r
+# Switch between using Python's builtin SHA1 function or M2Crypto/OpenSSL's\r
+# TODO: optimize such that less memory is allocated, e.g. reuse a single\r
+# sha() object instance (hard to do here centrally with multiple threads)\r
+#\r
+\r
+# Arno, 2009-06-23: The OpenSSL calls used by M2Crypto's MessageDigest have \r
+# different behaviour than the Python sha class ones. In particular, OpenSSL\r
+# needs to make special calls to incrementally digest data (i.e., update();\r
+# digest();update();digest(). M2Crypto's MessageDigest doesn't make these \r
+# special calls. Due to bad programming, it will actually Segmentation\r
+# Fault when this usage occurs. And this usage occurs during hashchecking \r
+# (so when using VOD repeatedly, not during live), see StorageWrapper.\r
+#\r
+# We'll need to patch M2Crypto to work around this. In the meanwhile, I\r
+# disable the offloading to OpenSSL for all platforms.\r
+#\r
+USE_M2CRYPTO_SHA = False\r
+\r
+\r
+if USE_M2CRYPTO_SHA:\r
+    from M2Crypto import EVP    
+\r
+    class sha:\r
+        def __init__(self,data=None):\r
+            self.hash = None\r
+            self.md = EVP.MessageDigest('sha1')\r
+            if data is not None:\r
+                self.md.update(data)\r
+            \r
+        def update(self,data):\r
+            if self.hash:\r
+                raise ValueError("sha: Cannot update after calling digest (OpenSSL limitation)")\r
+            self.md.update(data)\r
+\r
+        def digest(self):\r
+            if not self.hash:\r
+                self.hash = self.md.final() \r
+            return self.hash \r
+        \r
+        def hexdigest(self):\r
+            d = self.digest()\r
+            return binascii.hexlify(d)\r
+else:\r
+    from sha import sha\r
+\r
+\r
+#\r
+# M2Crypto has no functions to read a pubkey in DER\r
+#\r
+def RSA_pub_key_from_der(der):\r
+    from M2Crypto import RSA,BIO
+\r
+    s = '-----BEGIN PUBLIC KEY-----\n'\r
+    b = base64.standard_b64encode(der)\r
+    s += textwrap.fill(b,64)\r
+    s += '\n'\r
+    s += '-----END PUBLIC KEY-----\n'\r
+    bio = BIO.MemoryBuffer(s)\r
+    return RSA.load_pub_key_bio(bio)\r
+\r
+def RSA_keypair_to_pub_key_in_der(keypair):\r
+    # Cannot use rsapubkey.save_key_der_bio(bio). It calls\r
+    # i2d_RSAPrivateKey_bio() and appears to write just the\r
+    # three RSA parameters, and not the extra ASN.1 stuff that \r
+    # says "rsaEncryption". In detail:\r
+    #\r
+    # * pubkey.save_key_der("orig.der") gives:\r
+    #  0:d=0  hl=3 l= 138 cons: SEQUENCE\r
+    #  3:d=1  hl=2 l=   1 prim: INTEGER           :00\r
+    #  6:d=1  hl=3 l= 129 prim: INTEGER           :A8D3A10FF772E1D5CEA86D88B2B09CE48A8DB2E563008372F4EF02BCB4E498B8BE974F8A7CD1398C7D408DF3B85D58FF0E3835AE96AB003898511D4914DE80008962C46E199276C35E4ABB7F1507F7E9A336CED3AFDC04F4DDA7B6941E8F15C1AD071599007C1F486C1560CBB96B8E07830F8E1849612E532833B55675E1D84B\r
+    #138:d=1  hl=2 l=   1 prim: INTEGER           :03\r
+    #\r
+    # when run through \r
+    #   $ openssl asn1parse -in origpub.der -inform DER\r
+    #\r
+    # * keypair.save_pub_key("origpub.pem"). If we pass this file through asn1parse\r
+    #         $ openssl asn1parse -in origpub.pem -inform PEM\r
+    # we get:\r
+    #  0:d=0  hl=3 l= 157 cons: SEQUENCE\r
+    #  3:d=1  hl=2 l=  13 cons: SEQUENCE\r
+    #  5:d=2  hl=2 l=   9 prim: OBJECT            :rsaEncryption\r
+    # 16:d=2  hl=2 l=   0 prim: NULL\r
+    # 18:d=1  hl=3 l= 139 prim: BIT STRING\r
+    #\r
+    # where the BIT STRING should contain the three params.\r
+    #\r
+    # EVP.PKey.as_der() also returns the latter, so we use that as our DER format.\r
+    #\r
+    # HOWEVER: The following code, when used inside a function as here, crashes\r
+    # Python, so we can't use it:\r
+    #\r
+    #pkey = EVP.PKey()\r
+    #pkey.assign_rsa(keypair)\r
+    #return pkey.as_der()\r
+    #
+    #
+    from M2Crypto import RSA,BIO
+\r
+    bio = BIO.MemoryBuffer()\r
+    keypair.save_pub_key_bio(bio)\r
+    pem = bio.read_all()\r
+    stream = StringIO(pem)\r
+    lines = stream.readlines()\r
+    s = ''\r
+    for i in range(1,len(lines)-1):\r
+        s += lines[i]\r
+    return base64.standard_b64decode(s)\r
+    \r
diff --git a/instrumentation/next-share/BaseLib/Core/Utilities/__init__.py b/instrumentation/next-share/BaseLib/Core/Utilities/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/Utilities/timeouturlopen.py b/instrumentation/next-share/BaseLib/Core/Utilities/timeouturlopen.py
new file mode 100644 (file)
index 0000000..01c9bf1
--- /dev/null
@@ -0,0 +1,75 @@
+# Written by Feek Zindel
+# see LICENSE.txt for license information
+
+import sys
+import httplib
+import socket
+import urllib2
+
+import urllib
+import urlparse
+
+DEBUG = False
+
+def urlOpenTimeout(url,timeout=30,*data):
+    class TimeoutHTTPConnection(httplib.HTTPConnection):
+        def connect(self):
+            """Connect to the host and port specified in __init__."""
+            msg = "getaddrinfo returns an empty list"
+            for res in socket.getaddrinfo(self.host, self.port, 0,
+                                          socket.SOCK_STREAM):
+                af, socktype, proto, canonname, sa = res
+                try:
+                    self.sock = socket.socket(af,socktype, proto)
+                    self.sock.settimeout(timeout)
+                    if self.debuglevel > 0:
+                        print "connect: (%s, %s)" % (self.host, self.port)
+                    self.sock.connect(sa)
+                except socket.error, msg:
+                    if self.debuglevel > 0:
+                        print 'connect fail:', (self.host, self.port)
+                    if self.sock:
+                        self.sock.close()
+                    self.sock = None
+                    continue
+                break
+            if not self.sock:
+                raise socket.error, msg
+
+    class TimeoutHTTPHandler(urllib2.HTTPHandler):
+        def http_open(self, req):
+            return self.do_open(TimeoutHTTPConnection, req)
+
+    # Arno, 2010-03-09: ProxyHandler is implicit, so code already proxy aware.
+    opener = urllib2.build_opener(TimeoutHTTPHandler,
+                                  urllib2.HTTPDefaultErrorHandler,
+                                  urllib2.HTTPRedirectHandler)
+    return opener.open(url,*data)
+
+
+def find_proxy(url):
+    """ Returns proxy host as "host:port" string """
+    (scheme, netloc, path, pars, query, fragment) = urlparse.urlparse(url)
+    proxies = urllib.getproxies()
+    proxyhost = None
+    if scheme in proxies:
+        if '@' in netloc:
+            sidx = netloc.find('@')+1
+        else:
+            sidx = 0
+        # IPVSIX TODO: what if host is IPv6 address
+        eidx = netloc.find(':')
+        if eidx == -1:
+            eidx = len(netloc)
+        host = netloc[sidx:eidx]
+        if not (host == "127.0.0.1" or urllib.proxy_bypass(host)):
+            proxyurl = proxies[scheme]
+            proxyelems = urlparse.urlparse(proxyurl)
+            proxyhost = proxyelems[1]
+            
+    if DEBUG:
+        print >>sys.stderr,"find_proxy: Got proxies",proxies,"selected",proxyhost,"URL was",url
+    return proxyhost
+
+
+#s = urlOpenTimeout("http://www.google.com",timeout=30)
diff --git a/instrumentation/next-share/BaseLib/Core/Utilities/unicode.py b/instrumentation/next-share/BaseLib/Core/Utilities/unicode.py
new file mode 100644 (file)
index 0000000..bc18a8c
--- /dev/null
@@ -0,0 +1,83 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+
+def bin2unicode(bin,possible_encoding='utf_8'):
+    sysenc = sys.getfilesystemencoding()
+    if possible_encoding is None:
+        possible_encoding = sysenc
+    try:
+        return bin.decode(possible_encoding)
+    except:
+        try:
+            if possible_encoding == sysenc:
+                raise
+            return bin.decode(sysenc)
+        except:
+            try:
+                return bin.decode('utf_8')
+            except:
+                try:
+                    return bin.decode('iso-8859-1')
+                except:
+                    try:
+                        return bin.decode(sys.getfilesystemencoding())
+                    except:
+                        return bin.decode(sys.getdefaultencoding(), errors = 'replace')
+
+
+def str2unicode(s):
+    try:
+        s = unicode(s)
+    except: 
+        flag = 0
+        for encoding in [sys.getfilesystemencoding(), 'utf_8', 'iso-8859-1', 'unicode-escape' ]:
+            try:
+                s = unicode(s, encoding)
+                flag = 1
+                break
+            except: 
+                pass
+        if flag == 0:
+            try:
+                s = unicode(s,sys.getdefaultencoding(), errors = 'replace')
+            except:
+                pass
+    return s
+
+def dunno2unicode(dunno):
+    newdunno = None
+    if isinstance(dunno,unicode):
+        newdunno = dunno
+    else:
+        try:
+            newdunno = bin2unicode(dunno)
+        except:
+            newdunno = str2unicode(dunno)
+    return newdunno
+
+
+def name2unicode(metadata):
+    if metadata['info'].has_key('name.utf-8'):
+        namekey = 'name.utf-8'
+    else:
+        namekey = 'name'
+    if metadata.has_key('encoding'):
+        encoding = metadata['encoding']
+        metadata['info'][namekey] = bin2unicode(metadata['info'][namekey],encoding)
+    else:
+        metadata['info'][namekey] = bin2unicode(metadata['info'][namekey])
+
+    # change metainfo['info']['name'] to metainfo['info'][namekey], just in case...
+    # roer888 TODO: Never tested the following 2 lines 
+    if namekey != 'name':
+        metadata['info']['name'] = metadata['info'][namekey ]
+
+    return namekey
+
+
+def unicode2str(s):
+    if not isinstance(s,unicode):
+        return s
+    return s.encode(sys.getfilesystemencoding())
diff --git a/instrumentation/next-share/BaseLib/Core/Utilities/utilities.py b/instrumentation/next-share/BaseLib/Core/Utilities/utilities.py
new file mode 100644 (file)
index 0000000..f965105
--- /dev/null
@@ -0,0 +1,626 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+import socket
+from time import time, strftime, gmtime
+from base64 import encodestring, decodestring
+from BaseLib.Core.Utilities.Crypto import sha
+import sys
+import os
+import copy
+from types import UnicodeType, StringType, LongType, IntType, ListType, DictType
+import urlparse
+from traceback import print_exc,print_stack
+import binascii
+
+STRICT_CHECK = True
+DEBUG = False
+
+infohash_len = 20
+
+def bin2str(bin):
+    # Full BASE64-encoded 
+    return encodestring(bin).replace("\n","")
+    
+def str2bin(str):
+    return decodestring(str)
+
+def validName(name):
+    if not isinstance(name, str) and len(name) == 0:
+        raise RuntimeError, "invalid name: " + name
+    return True
+
+def validPort(port):
+    port = int(port)
+    if port < 0 or port > 65535:
+        raise RuntimeError, "invalid Port: " + str(port)
+    return True
+
+def validIP(ip):
+    try:
+        try:
+            # Is IPv4 addr?
+            socket.inet_aton(ip)
+            return True
+        except socket.error:
+            # Is hostname / IPv6?
+            socket.getaddrinfo(ip, None)
+            return True
+    except:
+        print_exc()
+    raise RuntimeError, "invalid IP address: " + ip
+
+    
+def validPermid(permid):
+    if not isinstance(permid, str):
+        raise RuntimeError, "invalid permid: " + permid
+    # Arno,2010-02-17: permid is ASN.1 encoded data that is NOT fixed length
+    return True
+
+def validInfohash(infohash):
+    if not isinstance(infohash, str):
+        raise RuntimeError, "invalid infohash " + infohash
+    if STRICT_CHECK and len(infohash) != infohash_len:
+        raise RuntimeError, "invalid length infohash " + infohash
+    return True
+    
+def isValidPermid(permid):
+    try:
+        return validPermid(permid)
+    except:
+        return False
+    
+def isValidInfohash(infohash):
+    try:
+        return validInfohash(infohash)
+    except:
+        return False
+
+def isValidPort(port):
+    try:
+        return validPort(port)
+    except:
+        return False
+    
+def isValidIP(ip):
+    try:
+        return validIP(ip)
+    except:
+        return False
+
+def isValidName(name):
+    try:
+        return validPort(name)
+    except:
+        return False
+    
+    
+def validTorrentFile(metainfo):
+    # Jie: is this function too strict? Many torrents could not be downloaded
+    if type(metainfo) != DictType:
+        raise ValueError('metainfo not dict')
+    
+    
+    if 'info' not in metainfo:
+        raise ValueError('metainfo misses key info')
+    
+    if 'announce' in metainfo and not isValidURL(metainfo['announce']):
+        raise ValueError('announce URL bad')
+    
+    # http://www.bittorrent.org/DHT_protocol.html says both announce and nodes
+    # are not allowed, but some torrents (Azureus?) apparently violate this.
+
+    #if 'announce' in metainfo and 'nodes' in metainfo:
+    #    raise ValueError('both announce and nodes present')
+    
+    if 'nodes' in metainfo:
+        nodes = metainfo['nodes']
+        if type(nodes) != ListType:
+            raise ValueError('nodes not list, but '+`type(nodes)`)
+        for pair in nodes:
+            if type(pair) != ListType and len(pair) != 2:
+                raise ValueError('node not 2-item list, but '+`type(pair)`)
+            host,port = pair
+            if type(host) != StringType:
+                raise ValueError('node host not string, but '+`type(host)`)
+            if type(port) != IntType:
+                raise ValueError('node port not int, but '+`type(port)`)
+
+    if not ('announce' in metainfo or 'nodes' in metainfo):
+        raise ValueError('announce and nodes missing')
+
+    # 04/05/10 boudewijn: with the introduction of magnet links we
+    # also allow for peer addresses to be (temporarily) stored in the
+    # metadata.  Typically these addresses are recently gathered.
+    if "initial peers" in metainfo:
+        if not isinstance(metainfo["initial peers"], list):
+            raise ValueError("initial peers not list, but %s" % type(metainfo["initial peers"]))
+        for address in metainfo["initial peers"]:
+            if not (isinstance(address, tuple) and len(address) == 2):
+                raise ValueError("address not 2-item tuple, but %s" % type(address))
+            if not isinstance(address[0], str):
+                raise ValueError("address host not string, but %s" % type(address[0]))
+            if not isinstance(address[1], int):
+                raise ValueError("address port not int, but %s" % type(address[1]))
+    
+    info = metainfo['info']
+    if type(info) != DictType:
+        raise ValueError('info not dict')
+    
+    if 'root hash' in info:
+        infokeys = ['name','piece length', 'root hash']
+    elif 'live' in info:
+        infokeys = ['name','piece length', 'live']
+    else:
+        infokeys = ['name','piece length', 'pieces']
+    for key in infokeys:
+        if key not in info:
+            raise ValueError('info misses key '+key)
+    name = info['name']
+    if type(name) != StringType:
+        raise ValueError('info name is not string but '+`type(name)`)
+    pl = info['piece length']
+    if type(pl) != IntType and type(pl) != LongType:
+        raise ValueError('info piece size is not int, but '+`type(pl)`)
+    if 'root hash' in info:
+        rh = info['root hash']
+        if type(rh) != StringType or len(rh) != 20:
+            raise ValueError('info roothash is not 20-byte string')
+    elif 'live' in info:
+        live = info['live']
+        if type(live) != DictType:
+            raise ValueError('info live is not a dict')
+        else:
+            if 'authmethod' not in live:
+                raise ValueError('info live misses key'+'authmethod')
+    else:
+        p = info['pieces']
+        if type(p) != StringType or len(p) % 20 != 0:
+            raise ValueError('info pieces is not multiple of 20 bytes')
+        
+    if 'length' in info:
+        # single-file torrent
+        if 'files' in info:
+            raise ValueError('info may not contain both files and length key')
+        
+        l = info['length']
+        if type(l) != IntType and type(l) != LongType:
+            raise ValueError('info length is not int, but '+`type(l)`)
+    else:
+        # multi-file torrent
+        if 'length' in info:
+            raise ValueError('info may not contain both files and length key')
+        
+        files = info['files']
+        if type(files) != ListType:
+            raise ValueError('info files not list, but '+`type(files)`)
+        
+        filekeys = ['path','length']
+        for file in files:
+            for key in filekeys:
+                if key not in file:
+                    raise ValueError('info files missing path or length key')
+            
+            p = file['path']
+            if type(p) != ListType:
+                raise ValueError('info files path is not list, but '+`type(p)`)
+            for dir in p:
+                if type(dir) != StringType:
+                    raise ValueError('info files path is not string, but '+`type(dir)`)
+            
+            l = file['length']
+            if type(l) != IntType and type(l) != LongType:
+                raise ValueError('info files length is not int, but '+`type(l)`)
+            
+    # common additional fields
+    if 'announce-list' in metainfo:
+        al = metainfo['announce-list']
+        if type(al) != ListType:
+            raise ValueError('announce-list is not list, but '+`type(al)`)
+        for tier in al:
+            if type(tier) != ListType:
+                raise ValueError('announce-list tier is not list '+`tier`)
+        # Jie: this limitation is not necessary
+#            for url in tier:
+#                if not isValidURL(url):
+#                    raise ValueError('announce-list url is not valid '+`url`)
+
+    if 'azureus_properties' in metainfo:
+        azprop = metainfo['azureus_properties']
+        if type(azprop) != DictType:
+            raise ValueError('azureus_properties is not dict, but '+`type(azprop)`)
+        if 'Content' in azprop:
+                content = azprop['Content']
+                if type(content) != DictType:
+                    raise ValueError('azureus_properties content is not dict, but '+`type(content)`)
+                if 'thumbnail' in content:
+                    thumb = content['thumbnail']
+                    if type(content) != StringType:
+                        raise ValueError('azureus_properties content thumbnail is not string')
+
+    # Diego: perform check on httpseeds/url-list field
+    if 'url-list' in metainfo:
+        if 'files' in metainfo['info']:
+            # Diego: only single-file mode allowed for http seeding now
+            raise ValueError("Only single-file mode supported with HTTP seeding: remove url-list")
+        elif type( metainfo['url-list'] ) != ListType:
+            raise ValueError('url-list is not list, but '+`type(metainfo['url-list'])`)
+        else:
+            for url in metainfo['url-list']:
+                if not isValidURL(url):
+                    raise ValueError("url-list url is not valid: "+`url`)
+
+    if 'httpseeds' in metainfo:
+        if 'files' in metainfo['info']:
+            # Diego: only single-file mode allowed for http seeding now
+            raise ValueError("Only single-file mode supported with HTTP seeding: remove httpseeds")
+        elif type( metainfo['httpseeds'] ) != ListType:
+            raise ValueError('httpseeds is not list, but '+`type(metainfo['httpseeds'])`)
+        else:
+            for url in metainfo['httpseeds']:
+                if not isValidURL(url):
+                    raise ValueError("httpseeds url is not valid: "+`url`)
+
+
+def isValidTorrentFile(metainfo):
+    try:
+        validTorrentFile(metainfo)
+        return True
+    except:
+        if DEBUG:
+            print_exc()
+        return False
+    
+    
+def isValidURL(url):
+    if url.lower().startswith('udp'):    # exception for udp
+        url = url.lower().replace('udp','http',1)
+    r = urlparse.urlsplit(url)
+    # if DEBUG:
+    #     print >>sys.stderr,"isValidURL:",r
+    
+    if r[0] == '' or r[1] == '':
+        return False
+    return True
+    
+def show_permid(permid):
+    # Full BASE64-encoded. Must not be abbreviated in any way. 
+    if not permid:
+        return 'None'
+    return encodestring(permid).replace("\n","")
+    # Short digest
+    ##return sha(permid).hexdigest()
+
+def show_permid_short(permid):
+    if not permid:
+        return 'None'
+    s = encodestring(permid).replace("\n","")
+    return s[-10:]
+    #return encodestring(sha(s).digest()).replace("\n","")
+
+def show_permid_shorter(permid):
+    if not permid:
+        return 'None'
+    s = encodestring(permid).replace("\n","")
+    return s[-5:]
+
+def readableBuddyCastMsg(buddycast_data,selversion):
+    """ Convert msg to readable format.
+    As this copies the original dict, and just transforms it,
+    most added info is already present and therefore logged
+    correctly. Exception is the OLPROTO_VER_EIGHTH which
+    modified the preferences list. """
+    prefxchg_msg = copy.deepcopy(buddycast_data)
+    
+    if prefxchg_msg.has_key('permid'):
+        prefxchg_msg.pop('permid')
+    if prefxchg_msg.has_key('ip'):
+        prefxchg_msg.pop('ip')
+    if prefxchg_msg.has_key('port'):
+        prefxchg_msg.pop('port')
+        
+    name = repr(prefxchg_msg['name'])    # avoid coding error
+
+    if prefxchg_msg['preferences']:
+        prefs = []
+        if selversion < 8: # OLPROTO_VER_EIGHTH: Can't use constant due to recursive import
+            for pref in prefxchg_msg['preferences']:
+                prefs.append(show_permid(pref))
+        else:
+            for preftuple in prefxchg_msg['preferences']:
+                # Copy tuple and escape infohash
+                newlist = []
+                for i in range(0,len(preftuple)):
+                    if i == 0:
+                        val = show_permid(preftuple[i])
+                    else:
+                        val = preftuple[i]
+                    newlist.append(val)
+                prefs.append(newlist)
+                    
+        prefxchg_msg['preferences'] = prefs
+
+        
+    if prefxchg_msg.get('taste buddies', []):
+        buddies = []
+        for buddy in prefxchg_msg['taste buddies']:
+            buddy['permid'] = show_permid(buddy['permid'])
+            if buddy.get('preferences', []):
+                prefs = []
+                for pref in buddy['preferences']:
+                    prefs.append(show_permid(pref))
+                buddy['preferences'] = prefs
+            buddies.append(buddy)
+        prefxchg_msg['taste buddies'] = buddies
+        
+    if prefxchg_msg.get('random peers', []):
+        peers = []
+        for peer in prefxchg_msg['random peers']:
+            peer['permid'] = show_permid(peer['permid'])
+            peers.append(peer)
+        prefxchg_msg['random peers'] = peers
+        
+    return prefxchg_msg
+    
+def print_prefxchg_msg(prefxchg_msg):
+    def show_permid(permid):
+        return permid
+    print "------- preference_exchange message ---------"
+    print prefxchg_msg
+    print "---------------------------------------------"
+    print "permid:", show_permid(prefxchg_msg['permid'])
+    print "name", prefxchg_msg['name']
+    print "ip:", prefxchg_msg['ip']
+    print "port:", prefxchg_msg['port']
+    print "preferences:"
+    if prefxchg_msg['preferences']:
+        for pref in prefxchg_msg['preferences']:
+            print "\t", pref#, prefxchg_msg['preferences'][pref]
+    print "taste buddies:"
+    if prefxchg_msg['taste buddies']:
+        for buddy in prefxchg_msg['taste buddies']:
+            print "\t permid:", show_permid(buddy['permid'])
+            #print "\t permid:", buddy['permid']
+            print "\t ip:", buddy['ip']
+            print "\t port:", buddy['port']
+            print "\t age:", buddy['age']
+            print "\t preferences:"
+            if buddy['preferences']:
+                for pref in buddy['preferences']:
+                    print "\t\t", pref#, buddy['preferences'][pref]
+            print
+    print "random peers:"
+    if prefxchg_msg['random peers']:
+        for peer in prefxchg_msg['random peers']:
+            print "\t permid:", show_permid(peer['permid'])
+            #print "\t permid:", peer['permid']
+            print "\t ip:", peer['ip']
+            print "\t port:", peer['port']
+            print "\t age:", peer['age']
+            print    
+            
+def print_dict(data, level=0):
+    if isinstance(data, dict):
+        print
+        for i in data:
+            print "  "*level, str(i) + ':',
+            print_dict(data[i], level+1)
+    elif isinstance(data, list):
+        if not data:
+            print "[]"
+        else:
+            print
+        for i in xrange(len(data)):
+            print "  "*level, '[' + str(i) + ']:',
+            print_dict(data[i], level+1)
+    else:
+        print data
+        
+def friendly_time(old_time):
+    curr_time = time()
+    try:
+        old_time = int(old_time)
+        assert old_time > 0
+        diff = int(curr_time - old_time)
+    except:
+        if isinstance(old_time, str):
+            return old_time
+        else:
+            return '?'
+    if diff < 0:
+        return '?'
+    elif diff < 2:
+        return str(diff) + " sec. ago"
+    elif diff < 60:
+        return str(diff) + " secs. ago"
+    elif diff < 120:
+        return "1 min. ago"
+    elif diff < 3600:
+        return str(int(diff/60)) + " mins. ago"
+    elif diff < 7200:
+        return "1 hour ago"
+    elif diff < 86400:
+        return str(int(diff/3600)) + " hours ago"
+    elif diff < 172800:
+        return "Yesterday"
+    elif diff < 259200:
+        return str(int(diff/86400)) + " days ago"
+    else:
+        return strftime("%d-%m-%Y", gmtime(old_time))
+        
+def sort_dictlist(dict_list, key, order='increase'):
+    
+    aux = []
+    for i in xrange(len(dict_list)):
+        #print >>sys.stderr,"sort_dictlist",key,"in",dict_list[i].keys(),"?"
+        if key in dict_list[i]:
+            aux.append((dict_list[i][key],i))
+    aux.sort()
+    if order == 'decrease' or order == 1:    # 0 - increase, 1 - decrease
+        aux.reverse()
+    return [dict_list[i] for x, i in aux]
+
+
+def dict_compare(a, b, keys):
+    for key in keys:
+        order = 'increase'
+        if type(key) == tuple:
+            skey, order = key
+        else:
+            skey = key
+
+        if a.get(skey) > b.get(skey):
+            if order == 'decrease' or order == 1:
+                return -1
+            else:
+                return 1
+        elif a.get(skey) < b.get(skey):
+            if order == 'decrease' or order == 1:
+                return 1
+            else:
+                return -1
+
+    return 0
+
+
+def multisort_dictlist(dict_list, keys):
+
+    listcopy = copy.copy(dict_list)
+    cmp = lambda a, b: dict_compare(a, b, keys)
+    listcopy.sort(cmp=cmp)
+    return listcopy
+
+
+def find_content_in_dictlist(dict_list, content, key='infohash'):
+    title = content.get(key)
+    if not title:
+        print 'Error: content had no content_name'
+        return False
+    for i in xrange(len(dict_list)):
+        if title == dict_list[i].get(key):
+            return i
+    return -1
+
+def remove_torrent_from_list(list, content, key = 'infohash'):
+    remove_data_from_list(list, content, key)
+
+def remove_data_from_list(list, content, key = 'infohash'):
+    index = find_content_in_dictlist(list, content, key)
+    if index != -1:
+        del list[index]
+    
+def sortList(list_to_sort, list_key, order='decrease'):
+        aux = zip(list_key, list_to_sort)
+        aux.sort()
+        if order == 'decrease':
+            aux.reverse()
+        return [i for k, i in aux]    
+
+def getPlural( n):
+        if n == 1:
+            return ''
+        else:
+            return 's'
+
+
+def find_prog_in_PATH(prog):
+    envpath = os.path.expandvars('${PATH}')
+    if sys.platform == 'win32':
+        splitchar = ';'
+    else:
+        splitchar = ':'
+    paths = envpath.split(splitchar)
+    foundat = None
+    for path in paths:
+        fullpath = os.path.join(path,prog)
+        if os.access(fullpath,os.R_OK|os.X_OK):
+            foundat = fullpath
+            break
+    return foundat
+    
+def hostname_or_ip2ip(hostname_or_ip):
+    # Arno: don't DNS resolve always, grabs lock on most systems
+    ip = None
+    try:
+        # test that hostname_or_ip contains a xxx.xxx.xxx.xxx string
+        socket.inet_aton(hostname_or_ip)
+        ip = hostname_or_ip
+
+    except:
+        try:
+            # dns-lookup for hostname_or_ip into an ip address
+            ip = socket.gethostbyname(hostname_or_ip)
+            if not hostname_or_ip.startswith("superpeer"):
+                print >>sys.stderr,"hostname_or_ip2ip: resolved ip from hostname, an ip should have been provided", hostname_or_ip
+
+        except:
+            print >>sys.stderr,"hostname_or_ip2ip: invalid hostname", hostname_or_ip
+            print_exc()
+
+    return ip
+
+
+def get_collected_torrent_filename(infohash):
+    # Arno: Better would have been the infohash in hex.
+    filename = sha(infohash).hexdigest()+'.torrent'    # notice: it's sha1-hash of infohash
+    return filename
+    # exceptions will be handled by got_metadata()
+    
+
+def uintToBinaryString(uint, length=4):
+    '''
+    Converts an unsigned integer into its binary representation.
+    
+    @type uint: int
+    @param uint: un unsigned intenger to convert into binary data.
+    
+    @type length: int
+    @param length: the number of bytes the the resulting binary
+                   string should have
+                   
+    @rtype: a binary string
+    @return: a binary string. Each element in the string is one byte 
+            of data.
+                   
+    @precondition: uint >= 0 and uint < 2**(length*8)
+    '''
+    assert 0 <= uint < 2**(length*8), "Cannot represent string"
+    hexlen = length*2
+    hexString =  "{0:0>{1}}".format(hex(uint)[2:], hexlen)
+    if hexString.endswith('L'):
+        hexString = hexString[:-1]
+    
+    binaryString = binascii.unhexlify(hexString)
+    return binaryString
+
+def binaryStringToUint(bstring):
+    '''
+    Converts a binary string into an unsigned integer
+    
+    @param bstring: a string of binary data
+    
+    @return a non-negative integer representing the 
+            value of the binary data interpreted as an
+            unsigned integer 
+    '''
+    hexstr = binascii.hexlify(bstring)
+    intval = int(hexstr,16)
+    return intval
+
+
+    
+
+
+if __name__=='__main__':
+
+    torrenta = {'name':'a', 'swarmsize' : 12}
+    torrentb = {'name':'b', 'swarmsize' : 24}
+    torrentc = {'name':'c', 'swarmsize' : 18, 'Web2' : True}
+    torrentd = {'name':'b', 'swarmsize' : 36, 'Web2' : True}
+
+    torrents = [torrenta, torrentb, torrentc, torrentd]
+    print multisort_dictlist(torrents, ["Web2", ("swarmsize", "decrease")])
+
+
+    #d = {'a':1,'b':[1,2,3],'c':{'c':2,'d':[3,4],'k':{'c':2,'d':[3,4]}}}
+    #print_dict(d)    
diff --git a/instrumentation/next-share/BaseLib/Core/Utilities/win32regchecker.py b/instrumentation/next-share/BaseLib/Core/Utilities/win32regchecker.py
new file mode 100644 (file)
index 0000000..04ec788
--- /dev/null
@@ -0,0 +1,113 @@
+# Written by ABC authors and Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import os
+from traceback import print_exc
+
+if (sys.platform == 'win32'):
+    import _winreg
+
+    # short for PyHKEY from "_winreg" module
+    HKCR = _winreg.HKEY_CLASSES_ROOT
+    HKLM = _winreg.HKEY_LOCAL_MACHINE
+    HKCU = _winreg.HKEY_CURRENT_USER
+else:
+    HKCR = 0
+    HKLM = 1
+    HKCU = 2
+
+DEBUG = False
+
+class Win32RegChecker:
+    def __init__(self):
+        pass
+
+    def readRootKey(self,key_name,value_name=""):
+        return self.readKey(HKCR,key_name,value_name)
+        
+    def readKey(self,hkey,key_name,value_name=""):
+        if (sys.platform != 'win32'):
+            return None
+            
+        try:
+            # test that shell/open association with ABC exist
+            if DEBUG:
+                print >>sys.stderr,"win32regcheck: Opening",key_name,value_name
+            full_key = _winreg.OpenKey(hkey, key_name, 0, _winreg.KEY_READ)
+            
+            if DEBUG:
+                print >>sys.stderr,"win32regcheck: Open returned",full_key
+            
+            value_data, value_type = _winreg.QueryValueEx(full_key, value_name)
+            if DEBUG:
+                print >>sys.stderr,"win32regcheck: Read",value_data,value_type
+            _winreg.CloseKey(full_key)
+                    
+            return value_data
+        except:
+            print_exc(file=sys.stderr)
+            # error, test failed, key don't exist
+            # (could also indicate a unicode error)
+            return None
+
+
+    def readKeyRecursively(self,hkey,key_name,value_name=""):
+        if (sys.platform != 'win32'):
+            return None
+            
+        lasthkey = hkey
+        try:
+            toclose = []
+            keyparts = key_name.split('\\')
+            print >>sys.stderr,"win32regcheck: keyparts",keyparts
+            for keypart in keyparts:
+                if keypart == '':
+                    continue
+                if DEBUG:
+                    print >>sys.stderr,"win32regcheck: Opening",keypart
+                full_key = _winreg.OpenKey(lasthkey, keypart, 0, _winreg.KEY_READ)
+                lasthkey = full_key
+                toclose.append(full_key)
+            
+            if DEBUG:
+                print >>sys.stderr,"win32regcheck: Open returned",full_key
+            
+            value_data, value_type = _winreg.QueryValueEx(full_key, value_name)
+            if DEBUG:
+                print >>sys.stderr,"win32regcheck: Read",value_data,value_type
+            for hkey in toclose:
+                _winreg.CloseKey(hkey)
+                    
+            return value_data
+        except:
+            print_exc()
+            # error, test failed, key don't exist
+            # (could also indicate a unicode error)
+            return None
+
+
+    def writeKey(self,hkey,key_name,value_name,value_data,value_type):
+        try:
+            # kreate desired key in Windows register
+            full_key = _winreg.CreateKey(hkey, key_name)
+        except EnvironmentError:
+            return False;
+        # set desired value in created Windows register key
+        _winreg.SetValueEx(full_key, value_name, 0, value_type, value_data)
+        # close Windows register key
+        _winreg.CloseKey(full_key)
+            
+        return True
+
+
+
+if __name__ == "__main__":
+    w = Win32RegChecker()
+    winfiletype = w.readRootKey(".wmv")
+    playkey = winfiletype+"\shell\play\command"
+    urlplay = w.readRootKey(playkey)
+    print urlplay
+    openkey = winfiletype+"\shell\open\command"
+    urlopen = w.readRootKey(openkey)
+    print urlopen
diff --git a/instrumentation/next-share/BaseLib/Core/Video/LiveSourceAuth.py b/instrumentation/next-share/BaseLib/Core/Video/LiveSourceAuth.py
new file mode 100644 (file)
index 0000000..65fdc5d
--- /dev/null
@@ -0,0 +1,477 @@
+# written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+from traceback import print_exc
+from cStringIO import StringIO
+import struct
+import time
+import array
+
+from BaseLib.Core.Utilities.Crypto import sha,RSA_pub_key_from_der
+from BaseLib.Core.osutils import *
+from M2Crypto import EC
+from BaseLib.Core.osutils import *
+from types import StringType
+
+DEBUG = False
+
+class Authenticator:
+    
+    def __init__(self,piecelen,npieces):
+        self.piecelen = piecelen
+        self.npieces = npieces
+        self.seqnum = 0L
+    
+    def get_piece_length(self):
+        return self.piecelen
+    
+    def get_npieces(self):
+        return self.npieces
+    
+    def get_content_blocksize(self):
+        pass
+    
+    def sign(self,content):
+        pass
+    
+    def verify(self,piece):
+        pass
+    
+    def get_content(self,piece):
+        pass
+
+    def get_source_seqnum(self):
+        return self.seqnum
+
+    def set_source_seqnum(self,seqnum):
+        self.seqnum = seqnum
+
+
+class NullAuthenticator(Authenticator):
+    
+    def __init__(self,piecelen,npieces):
+        Authenticator.__init__(self,piecelen,npieces)
+        self.contentblocksize = piecelen
+    
+    def get_content_blocksize(self):
+        return self.contentblocksize
+    
+    def sign(self,content):
+        return [content]
+    
+    def verify(self,piece):
+        return True
+    
+    def get_content(self,piece):
+        return piece
+
+
+class ECDSAAuthenticator(Authenticator):
+    """ Authenticator who places a ECDSA signature in the last part of a
+    piece. In particular, the sig consists of:
+    - an 8 byte sequence number
+    - an 8 byte real-time timestamp
+    - a 1 byte length field followed by
+    - a variable-length ECDSA signature in ASN.1, (max 64 bytes)  
+    - optionally 0x00 padding bytes, if the ECDSA sig is less than 64 bytes,
+    to give a total of 81 bytes.
+    """
+    
+    SEQNUM_SIZE = 8
+    RTSTAMP_SIZE = 8
+    LENGTH_SIZE = 1
+    MAX_ECDSA_ASN1_SIGSIZE = 64
+    EXTRA_SIZE = SEQNUM_SIZE + RTSTAMP_SIZE
+    # = seqnum + rtstamp + 1 byte length + MAX_ECDSA, padded
+    # put seqnum + rtstamp directly after content, so we calc the sig directly 
+    # from the received buffer.
+    OUR_SIGSIZE = EXTRA_SIZE+LENGTH_SIZE+MAX_ECDSA_ASN1_SIGSIZE 
+    
+    def __init__(self,piecelen,npieces,keypair=None,pubkeypem=None):
+        
+        print >>sys.stderr,"ECDSAAuth: npieces",npieces
+        
+        Authenticator.__init__(self,piecelen,npieces)
+        self.contentblocksize = piecelen-self.OUR_SIGSIZE
+        self.keypair = keypair
+        if pubkeypem is not None:
+            #print >>sys.stderr,"ECDSAAuth: pubkeypem",`pubkeypem`
+            self.pubkey = EC.pub_key_from_der(pubkeypem)
+        else:
+            self.pubkey = None
+        self.startts = None
+
+    def get_content_blocksize(self):
+        return self.contentblocksize
+    
+    def sign(self,content):
+        rtstamp = time.time()
+        #print >>sys.stderr,"ECDSAAuth: sign: ts %.5f s" % rtstamp
+        
+        extra = struct.pack('>Qd', self.seqnum,rtstamp)
+        self.seqnum += 1L
+
+        sig = ecdsa_sign_data(content,extra,self.keypair)
+        # The sig returned is either 64 or 63 bytes long (62 also possible I 
+        # guess). Therefore we transmit size as 1 bytes and fill to 64 bytes.
+        lensig = chr(len(sig))
+        if len(sig) != self.MAX_ECDSA_ASN1_SIGSIZE:
+            # Note: this is not official ASN.1 padding. Also need to modify
+            # the header length for that I assume.
+            diff = self.MAX_ECDSA_ASN1_SIGSIZE-len(sig)
+            padding = '\x00' * diff 
+            return [content,extra,lensig,sig,padding]
+        else:
+            return [content,extra,lensig,sig]
+        
+    def verify(self,piece,index):
+        """ A piece is valid if:
+        - the signature is correct,
+        - the seqnum % npieces == piecenr.
+        - the seqnum is no older than self.seqnum - npieces
+        @param piece The piece data as received from peer
+        @param index The piece number as received from peer
+        @return Boolean
+        """
+        try:
+            # Can we do this without memcpy?
+            #print >>sys.stderr,"ECDSAAuth: verify",len(piece)
+            extra = piece[-self.OUR_SIGSIZE:-self.OUR_SIGSIZE+self.EXTRA_SIZE]
+            lensig = ord(piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE])
+            if lensig > self.MAX_ECDSA_ASN1_SIGSIZE:
+                print >>sys.stderr,"ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"lensig wrong",lensig
+                return False
+            #print >>sys.stderr,"ECDSAAuth: verify lensig",lensig
+            diff = lensig-self.MAX_ECDSA_ASN1_SIGSIZE
+            if diff == 0:
+                sig = piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE+self.LENGTH_SIZE:]
+            else:
+                sig = piece[-self.OUR_SIGSIZE+self.EXTRA_SIZE+self.LENGTH_SIZE:diff]
+            content = piece[:-self.OUR_SIGSIZE]
+            if DEBUG:
+                print >>sys.stderr,"ECDSAAuth: verify piece",index,"sig",`sig`
+                print >>sys.stderr,"ECDSAAuth: verify dig",sha(content).hexdigest()
+        
+            ret = ecdsa_verify_data_pubkeyobj(content,extra,self.pubkey,sig)
+            if ret:
+                (seqnum, rtstamp) = self._decode_extra(piece)
+                
+                if DEBUG:
+                    print >>sys.stderr,"ECDSAAuth: verify piece",index,"seq",seqnum,"ts %.5f s" % rtstamp,"ls",lensig
+                
+                mod = seqnum % self.get_npieces()
+                thres = self.seqnum - self.get_npieces()/2
+                if seqnum <= thres:
+                    print >>sys.stderr,"ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"old seqnum",seqnum,"<<",self.seqnum
+                    return False
+                elif mod != index:
+                    print >>sys.stderr,"ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"expected",mod
+                    return False 
+                elif self.startts is not None and rtstamp < self.startts:
+                    print >>sys.stderr,"ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"older than oldest known ts",rtstamp,self.startts
+                    return False
+                else:
+                    self.seqnum = max(self.seqnum,seqnum)
+                    if self.startts is None:
+                        self.startts = rtstamp-300.0 # minus 5 min in case we read piece N+1 before piece N
+                        print >>sys.stderr,"ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: startts",self.startts
+            else:
+                print >>sys.stderr,"ECDSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ piece",index,"failed sig"
+            
+            return ret
+        except:
+            print_exc()
+            return False 
+
+    def get_content(self,piece):
+        return piece[:-self.OUR_SIGSIZE]
+
+    # Extra fields
+    def get_seqnum(self,piece):
+        (seqnum, rtstamp) = self._decode_extra(piece)
+        return seqnum
+
+    def get_rtstamp(self,piece):
+        (seqnum, rtstamp) = self._decode_extra(piece)
+        return rtstamp
+        
+    def _decode_extra(self,piece):
+        extra = piece[-self.OUR_SIGSIZE:-self.OUR_SIGSIZE+self.EXTRA_SIZE]
+        if type(extra) == array.array:
+            extra = extra.tostring()
+        return struct.unpack('>Qd',extra)
+
+    
+def ecdsa_sign_data(plaintext,extra,ec_keypair):
+    digester = sha(plaintext)
+    digester.update(extra)
+    digest = digester.digest()
+    return ec_keypair.sign_dsa_asn1(digest)
+    
+def ecdsa_verify_data_pubkeyobj(plaintext,extra,pubkey,blob):
+    digester = sha(plaintext)
+    digester.update(extra)
+    digest = digester.digest()
+    return pubkey.verify_dsa_asn1(digest,blob)
+    
+
+
+
+class RSAAuthenticator(Authenticator):
+    """ Authenticator who places a RSA signature in the last part of a piece. 
+    In particular, the sig consists of:
+    - an 8 byte sequence number
+    - an 8 byte real-time timestamp
+    - a variable-length RSA signature, length equivalent to the keysize in bytes  
+    to give a total of 16+(keysize/8) bytes.
+    """
+    
+    SEQNUM_SIZE = 8
+    RTSTAMP_SIZE = 8
+    EXTRA_SIZE = SEQNUM_SIZE + RTSTAMP_SIZE
+    # put seqnum + rtstamp directly after content, so we calc the sig directly 
+    # from the received buffer.
+    def our_sigsize(self):
+        return self.EXTRA_SIZE+self.rsa_sigsize() 
+    
+    def rsa_sigsize(self):
+        return len(self.pubkey)/8
+    
+    def __init__(self,piecelen,npieces,keypair=None,pubkeypem=None):
+        Authenticator.__init__(self,piecelen,npieces)
+        self.keypair = keypair
+        if pubkeypem is not None:
+            #print >>sys.stderr,"ECDSAAuth: pubkeypem",`pubkeypem`
+            self.pubkey = RSA_pub_key_from_der(pubkeypem)
+        else:
+            self.pubkey = self.keypair
+        self.contentblocksize = piecelen-self.our_sigsize()
+        self.startts = None
+
+    def get_content_blocksize(self):
+        return self.contentblocksize
+    
+    def sign(self,content):
+        rtstamp = time.time()
+        #print >>sys.stderr,"ECDSAAuth: sign: ts %.5f s" % rtstamp
+        
+        extra = struct.pack('>Qd', self.seqnum,rtstamp)
+        self.seqnum += 1L
+
+        sig = rsa_sign_data(content,extra,self.keypair)
+        return [content,extra,sig]
+        
+    def verify(self,piece,index):
+        """ A piece is valid if:
+        - the signature is correct,
+        - the seqnum % npieces == piecenr.
+        - the seqnum is no older than self.seqnum - npieces
+        @param piece The piece data as received from peer
+        @param index The piece number as received from peer
+        @return Boolean
+        """
+        try:
+            # Can we do this without memcpy?
+            #print >>sys.stderr,"ECDSAAuth: verify",len(piece)
+            extra = piece[-self.our_sigsize():-self.our_sigsize()+self.EXTRA_SIZE]
+            sig = piece[-self.our_sigsize()+self.EXTRA_SIZE:]
+            content = piece[:-self.our_sigsize()]
+            #if DEBUG:
+            #    print >>sys.stderr,"RSAAuth: verify piece",index,"sig",`sig`
+            #    print >>sys.stderr,"RSAAuth: verify dig",sha(content).hexdigest()
+        
+            ret = rsa_verify_data_pubkeyobj(content,extra,self.pubkey,sig)
+            if ret:
+                (seqnum, rtstamp) = self._decode_extra(piece)
+                
+                if DEBUG:
+                    print >>sys.stderr,"RSAAuth: verify piece",index,"seq",seqnum,"ts %.5f s" % rtstamp
+                
+                mod = seqnum % self.get_npieces()
+                thres = self.seqnum - self.get_npieces()/2
+                if seqnum <= thres:
+                    print >>sys.stderr,"RSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"old seqnum",seqnum,"<<",self.seqnum
+                    return False
+                elif mod != index:
+                    print >>sys.stderr,"RSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"expected",mod
+                    return False
+                elif self.startts is not None and rtstamp < self.startts:
+                    print >>sys.stderr,"RSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ failed piece",index,"older than oldest known ts",rtstamp,self.startts
+                    return False
+                else:
+                    self.seqnum = max(self.seqnum,seqnum)
+                    if self.startts is None:
+                        self.startts = rtstamp-300.0 # minus 5 min in case we read piece N+1 before piece N
+                        print >>sys.stderr,"RSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: startts",self.startts
+            else:
+                print >>sys.stderr,"RSAAuth: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ piece",index,"failed sig"
+            
+            return ret
+        except:
+            print_exc()
+            return False 
+
+    def get_content(self,piece):
+        return piece[:-self.our_sigsize()]
+
+    # Extra fields
+    def get_seqnum(self,piece):
+        (seqnum, rtstamp) = self._decode_extra(piece)
+        return seqnum
+
+    def get_rtstamp(self,piece):
+        (seqnum, rtstamp) = self._decode_extra(piece)
+        return rtstamp
+        
+    def _decode_extra(self,piece):
+        extra = piece[-self.our_sigsize():-self.our_sigsize()+self.EXTRA_SIZE]
+        if type(extra) == array.array:
+            extra = extra.tostring()
+        return struct.unpack('>Qd',extra)
+
+
+def rsa_sign_data(plaintext,extra,rsa_keypair):
+    digester = sha(plaintext)
+    digester.update(extra)
+    digest = digester.digest()
+    return rsa_keypair.sign(digest)
+    
+def rsa_verify_data_pubkeyobj(plaintext,extra,pubkey,sig):
+    digester = sha(plaintext)
+    digester.update(extra)
+    digest = digester.digest()
+    
+    # The type of sig is array.array() at this point (why?), M2Crypto RSA verify
+    # will complain if it is not a string or Unicode object. Check if this is a
+    # memcpy. 
+    s = sig.tostring()
+    return pubkey.verify(digest,s)
+
+
+
+
+
+
+    
+class AuthStreamWrapper:
+    """ Wrapper around the stream returned by VideoOnDemand/MovieOnDemandTransporter
+    that strips of the signature info
+    """
+    
+    def __init__(self,inputstream,authenticator):
+        self.inputstream = inputstream
+        self.buffer = StringIO()
+        self.authenticator = authenticator
+        self.piecelen = authenticator.get_piece_length()
+        self.last_rtstamp = None
+
+    def read(self,numbytes=None):
+        rawdata = self._readn(self.piecelen)
+        if len(rawdata) == 0:
+            # EOF
+            return rawdata
+        content = self.authenticator.get_content(rawdata)
+        self.last_rtstamp = self.authenticator.get_rtstamp(rawdata)
+        if numbytes is None or numbytes < 0:
+            raise ValueError('Stream has unlimited size, read all not supported.')
+        elif numbytes < len(content):
+            # TODO: buffer unread data for next read
+            raise ValueError('reading less than piecesize not supported yet')
+        else:
+            return content
+
+    def get_generation_time(self):
+        """ Returns the time at which the last read piece was generated at the source. """
+        return self.last_rtstamp
+    
+    def seek(self,pos,whence=os.SEEK_SET):
+        if pos == 0 and whence == os.SEEK_SET:
+            print >>sys.stderr,"authstream: seek: Ignoring seek 0 in live"
+        else:
+            raise ValueError("authstream does not support seek")
+
+    def close(self):
+        self.inputstream.close()
+
+    def available(self):
+        return self.inputstream.available()
+
+
+    # Internal method
+    def _readn(self,n):
+        """ read exactly n bytes from inputstream, block if unavail """
+        nwant = n
+        while True:
+            data = self.inputstream.read(nwant)
+            if len(data) == 0:
+                return data
+            nwant -= len(data)
+            self.buffer.write(data)
+            if nwant == 0:
+                break
+        self.buffer.seek(0)
+        data = self.buffer.read(n)
+        self.buffer.seek(0)
+        return data
+        
+
+
+class VariableReadAuthStreamWrapper:
+    """ Wrapper around AuthStreamWrapper that allows reading of variable
+    number of bytes. TODO: optimize whole stack of AuthWrapper, 
+    MovieTransportWrapper, MovieOnDemandTransporter
+    """
+    
+    def __init__(self,inputstream,piecelen):
+        self.inputstream = inputstream
+        self.buffer = ''
+        self.piecelen = piecelen
+
+    def read(self,numbytes=None):
+        if numbytes is None or numbytes < 0:
+            raise ValueError('Stream has unlimited size, read all not supported.')
+        return self._readn(numbytes)
+
+    def get_generation_time(self):
+        """ Returns the time at which the last read piece was generated at the source. """
+        return self.inputstream.get_generation_time()
+    
+    def seek(self,pos,whence=os.SEEK_SET):
+        return self.inputstream.seek(pos,whence=whence)
+        
+    def close(self):
+        self.inputstream.close()
+
+    def available(self):
+        return self.inputstream.available()
+
+    # Internal method
+    def _readn(self,nwant):
+        """ read *at most* nwant bytes from inputstream """
+        
+        if len(self.buffer) == 0:
+            # Must read fixed size blocks from authwrapper
+            data = self.inputstream.read(self.piecelen)
+            #print >>sys.stderr,"varread: Got",len(data),"want",nwant
+            if len(data) == 0:
+                return data
+            self.buffer = data
+
+        lenb = len(self.buffer)
+        tosend = min(nwant,lenb)
+            
+        if tosend == lenb:
+            #print >>sys.stderr,"varread: zero copy 2 lenb",lenb
+            pre = self.buffer
+            post = ''
+        else:
+            #print >>sys.stderr,"varread: copy",tosend,"lenb",lenb
+            pre = self.buffer[0:tosend]
+            post = self.buffer[tosend:]
+            
+        self.buffer = post
+        #print >>sys.stderr,"varread: Returning",len(pre)
+        return pre
+    
+        
diff --git a/instrumentation/next-share/BaseLib/Core/Video/MovieTransport.py b/instrumentation/next-share/BaseLib/Core/Video/MovieTransport.py
new file mode 100644 (file)
index 0000000..d0bef70
--- /dev/null
@@ -0,0 +1,81 @@
+# Written by Jan David Mol, Arno Bakker
+# see LICENSE.txt for license information
+
+
+import os,sys
+
+from BaseLib.Core.osutils import *
+
+DEBUG = False
+
+class MovieTransport:
+    
+    def __init__(self):
+        pass
+        
+    def start( self, bytepos = 0 ):
+        pass
+    
+    def size(self ):
+        pass
+
+    def read(self):
+        pass
+        
+    def stop(self):
+        pass
+
+    def done(self):
+        pass
+    
+    def get_mimetype(self):
+        pass
+    def set_mimetype(self,mimetype):
+        pass
+
+    def available(self):
+        pass
+    
+class MovieTransportStreamWrapper:
+    """ Provide a file-like interface """
+    def __init__(self,mt):
+        self.mt = mt
+        self.started = False
+
+    def read(self,numbytes=None):
+        if DEBUG:
+            print >>sys.stderr,"MovieTransportStreamWrapper: read",numbytes
+
+        if not self.started:
+            self.mt.start(0)
+            self.started = True
+        if self.mt.done():
+            return ''
+        data = self.mt.read(numbytes)
+        if data is None:
+            print >>sys.stderr,"MovieTransportStreamWrapper: mt read returns None"
+            data = ''
+        return data
+
+    def seek(self,pos,whence=os.SEEK_SET):
+        # TODO: shift play_pos in PiecePicking + interpret whence
+        if DEBUG:
+            print >>sys.stderr,"MovieTransportStreamWrapper: seek:",pos,"whence",whence
+        self.mt.seek(pos,whence=whence)
+        # Arno, 2010-01-08: seek also means we've started.
+        self.started = True
+    
+    def close(self):
+        if DEBUG:
+            print >>sys.stderr,"MovieTransportStreamWrapper: close"
+        self.mt.stop()
+
+    def available(self):
+        return self.mt.available()
+    
+    def get_generation_time(self):
+        # Overrriden by AuthStreamWrapper normally. Added to give sane warning
+        # when playing unauthenticated stream as if it had auth.
+        raise ValueError("This is an unauthenticated stream that provides no timestamp")
diff --git a/instrumentation/next-share/BaseLib/Core/Video/PiecePickerSVC.py b/instrumentation/next-share/BaseLib/Core/Video/PiecePickerSVC.py
new file mode 100644 (file)
index 0000000..e0507cd
--- /dev/null
@@ -0,0 +1,652 @@
+# wRIsten by Jan David Mol, Arno Bakker, Riccardo Petrocco, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+import time
+import random
+from traceback import print_exc
+
+from BaseLib.Core.BitTornado.BT1.PiecePicker import PiecePicker 
+if __debug__:
+    from BaseLib.Core.BitTornado.BT1.Downloader import print_chunks
+
+# percent piece loss to emulate -- we just don't request this percentage of the pieces
+# only implemented for live streaming
+PIECELOSS = 0
+
+DEBUG = False
+DEBUG_CHUNKS = False
+DEBUGPP = False
+
+def rarest_first( has_dict, rarity_list, filter = lambda x: True ):
+    """ Select the rarest of pieces in has_dict, according
+        to the rarities in rarity_list. Breaks ties uniformly
+        at random. Additionally, `filter' is applied to select
+        the pieces we can return. """
+
+    """ Strategy:
+        - `choice' is the choice so far
+        - `n' is the number of pieces we could choose from so far
+        - `rarity' is the rarity of the choice so far
+
+        Every time we see a rarer piece, we reset our choice.
+        Every time we see a piece of the same rarity we're looking for,
+        we select it (overriding the previous choice) with probability 1/n.
+        This leads to a uniformly selected piece in one pass, be it that
+        we need more random numbers than when doing two passes. """
+
+    choice = None
+    rarity = None
+    n = 0
+
+    for k in (x for x in has_dict if filter(x)):
+        r = rarity_list[k]
+
+        if rarity is None or r < rarity:
+            rarity = r
+            n = 1
+            choice = k
+        elif r == rarity:
+            n += 1
+            if random.uniform(0,n) == 0: # uniform selects from [0,n)
+                choice = k
+
+    return choice
+
+class PiecePickerSVC(PiecePicker):
+    """ Implements piece picking for streaming video. Keeps track of playback
+        point and avoids requesting obsolete pieces. """
+
+    # order of initialisation and important function calls
+    #   PiecePicker.__init__              (by BitTornado.BT1Download.__init__)
+    #   PiecePicker.complete              (by hash checker, for pieces on disk)
+    #   MovieSelector.__init__
+    #   PiecePicker.set_download_range    (indirectly by MovieSelector.__init__)
+    #   MovieOnDemandTransporter.__init__ (by BitTornado.BT1Download.startEngine)
+    #   PiecePicker.set_bitrate           (by MovieOnDemandTransporter)
+    #   PiecePicker.set_transporter       (by MovieOnDemandTransporter)
+    #
+    #   PiecePicker._next                 (once connections are set up)
+    #
+    #   PiecePicker.complete              (by hash checker, for pieces received)
+
+    # relative size of mid-priority set
+    MU = 4
+
+    def __init__(self, numpieces,
+                 rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
+                 priority_step = 20, helper = None, coordinator = None, rate_predictor = None, piecesize = 0):
+        PiecePicker.__init__( self, numpieces, rarest_first_cutoff, rarest_first_priority_cutoff,
+                              priority_step, helper, coordinator, rate_predictor )
+
+        # maximum existing piece number, to avoid scanning beyond it in next()
+        self.maxhave = 0
+
+        # some statistics
+        self.stats = {}
+        self.stats["high"] = 0
+        self.stats["mid"] = 0
+        self.stats["low"] = 0
+
+        # playback module
+        self.transporter = None
+
+        # self.outstanding_requests contains (piece-id, begin,
+        # length):timestamp pairs for each outstanding request.
+        self.outstanding_requests = {}
+
+        # The playing_delay and buffering_delay give three values
+        # (min, max, offeset) in seconds.
+        #
+        # The min tells how long before the cancel policy is allowed
+        # to kick in. We can not expect to receive a piece instantly,
+        # so we have to wait this time before having a download speed
+        # estimation.
+        #
+        # The max tells how long before we cancel the request. The
+        # request may also be canceled because the chunk will not be
+        # completed given the current download speed.
+        #
+        # The offset gives a grace period that is taken into account
+        # when choosing to cancel a request. For instance, when the
+        # peer download speed is to low to receive the chunk within 10
+        # seconds, a grace offset of 15 would ensure that the chunk is
+        # NOT canceled (usefull while buffering)
+        self.playing_delay = (5, 20, -0.5)
+        self.buffering_delay = (7.5, 30, 10)
+        
+    def set_transporter(self, transporter):
+        self.transporter = transporter
+
+        # update its information -- pieces read from disk
+        download_range = self.videostatus.download_range()
+        for x in range(len(download_range)):
+            (f,l) = download_range[x]
+            for i in xrange(f, l):
+                if self.has[i]:
+                    self.transporter.complete( i, downloaded=False )
+
+    def set_videostatus(self,videostatus):
+        """ Download in a wrap-around fashion between pieces [0,numpieces).
+            Look at most delta pieces ahead from download_range[0].
+        """
+        self.videostatus = videostatus
+        videostatus.add_playback_pos_observer( self.change_playback_pos )
+
+    def is_interesting(self,piece):
+        if PIECELOSS and piece % 100 < PIECELOSS:
+            return False
+
+        if self.has[piece]:
+            return False
+
+        if not self.videostatus or self.videostatus.in_download_range( piece ):
+            return True
+
+        return False
+
+    def change_playback_pos(self, oldpos, newpos):
+        if oldpos is None:
+            # (re)initialise
+            valid = self.is_interesting
+
+            for d in self.peer_connections.values():
+                interesting = {}
+                has = d["connection"].download.have
+                for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1):
+                    if has[i] and valid(i):
+                        interesting[i] = 1
+
+                d["interesting"] = interesting
+        else:
+            # playback position incremented -- remove timed out piece
+            for d in self.peer_connections.values():
+                d["interesting"].pop(oldpos,0)
+
+    def got_have(self, piece, connection=None):
+        # if DEBUG:
+        #     print >>sys.stderr,"PiecePickerStreaming: got_have:",piece
+        self.maxhave = max(self.maxhave,piece)
+        PiecePicker.got_have( self, piece, connection )
+        if self.transporter:
+            self.transporter.got_have( piece )
+
+        if self.is_interesting(piece):
+            self.peer_connections[connection]["interesting"][piece] = 1
+
+    def got_seed(self):
+        self.maxhave = self.numpieces
+        PiecePicker.got_seed( self )
+
+    def lost_have(self, piece):
+        PiecePicker.lost_have( self, piece )
+
+    def got_peer(self, connection):
+        PiecePicker.got_peer( self, connection )
+
+        self.peer_connections[connection]["interesting"] = {}
+
+    def lost_peer(self, connection):
+        PiecePicker.lost_peer( self, connection )
+
+    def got_piece(self, *request):
+        if request in self.outstanding_requests:
+            del self.outstanding_requests[request]
+        if self.transporter:
+            self.transporter.got_piece(*request)
+
+    def complete(self, piece):
+        # if DEBUG:
+        #     print >>sys.stderr,"PiecePickerStreaming: complete:",piece
+        PiecePicker.complete( self, piece )
+        if self.transporter:
+            self.transporter.complete( piece )
+
+        for request in self.outstanding_requests.keys():
+            if request[0] == piece:
+                del self.outstanding_requests[request]
+
+        # don't consider this piece anymore
+        for d in self.peer_connections.itervalues():
+            d["interesting"].pop(piece,0)
+
+    def num_nonempty_neighbours(self):
+        # return #neighbours who have something
+        return len( [c for c in self.peer_connections if c.download.have.numfalse < c.download.have.length] )
+
+    def pos_is_sustainable(self,fudge=2):
+        """
+            Returns whether we have enough data around us to support the current playback position.
+            If not, playback should pause, stall or reinitialised when pieces are lost.
+        """
+        vs = self.videostatus
+
+        # only holds for live streaming for now. theoretically, vod can have the same problem
+        # since data can be seeded in a 'live' fashion
+        if not vs.live_streaming:
+            if DEBUG:
+                print >>sys.stderr, "PiecePickerStreaming: pos is sustainable: not streaming live"
+            return True
+
+        # We assume the maximum piece number that is available at at least half of the neighbours
+        # to be sustainable. Although we only need a fixed number of neighbours with enough bandwidth,
+        # such neighbours may depart, hence we choose a relative trade-off.
+
+        # this means that our current playback position is sustainable if any future piece
+        # is owned by at least half of the peers
+
+        # ignore peers which have nothing
+        numconn = self.num_nonempty_neighbours()
+
+        if not numconn:
+            # not sustainable, but nothing we can do. Return True to avoid pausing
+            # and getting out of sync.
+            if DEBUG:
+                print >>sys.stderr, "PiecePickerStreaming: pos is sustainable: no neighbours with pieces"
+            return True
+
+        half = max( 1, numconn/2 )
+        skip = fudge # ignore the first 'fudge' pieces
+
+        for x in vs.generate_range( vs.download_range() ):
+            if skip > 0:
+                skip -= 1
+            elif self.numhaves[x] >= half:
+                if DEBUG:
+                    print >>sys.stderr, "PiecePickerStreaming: pos is sustainable: piece %s @ %s>%s peers (fudge=%s)" % (x,self.numhaves[x],half,fudge)
+                return True
+            else:
+                pass
+
+        if DEBUG:
+            print >>sys.stderr, "PiecePickerStreaming: pos is NOT sustainable playpos=%s fudge=%s numconn=%s half=%s numpeers=%s %s" % (vs.playback_pos,fudge,numconn,half,len(self.peer_connections),[x.get_ip() for x in self.peer_connections])
+
+        # too few neighbours own the future pieces. it's wise to pause and let neighbours catch up
+        # with us
+        return False
+
+
+    # next: selects next piece to download. adjusts wantfunc with filter for streaming; calls
+    #   _next: selects next piece to download. completes partial downloads first, if needed, otherwise calls
+    #     next_new: selects next piece to download. override this with the piece picking policy
+
+    def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces=[], willrequest=True,connection=None,proxyhave=None):
+        def newwantfunc( piece ):
+            #print >>sys.stderr,"S",self.streaming_piece_filter( piece ),"!sP",not (piece in slowpieces),"w",wantfunc( piece )
+            return not (piece in slowpieces) and wantfunc( piece )
+
+        # fallback: original piece picker
+        p = PiecePicker.next(self, haves, newwantfunc, sdownload, complete_first, helper_con, slowpieces=slowpieces, willrequest=willrequest,connection=connection)
+        if DEBUGPP and self.videostatus.prebuffering:
+            print >>sys.stderr,"PiecePickerStreaming: original PP.next returns",p
+        if p is None and not self.videostatus.live_streaming:
+            # When the file we selected from a multi-file torrent is complete,
+            # we won't request anymore pieces, so the normal way of detecting 
+            # we're done is not working and we won't tell the video player 
+            # we're playable. Do it here instead.
+            self.transporter.notify_playable()
+        return p
+
+    def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None):
+        """ First, complete any partials if needed. Otherwise, select a new piece. """
+
+        #print >>sys.stderr,"PiecePickerStreaming: complete_first is",complete_first,"started",self.started
+
+        # cutoff = True:  random mode
+        #          False: rarest-first mode
+        cutoff = self.numgot < self.rarest_first_cutoff
+
+        # whether to complete existing partials first -- do so before the
+        # cutoff, or if forced by complete_first, but not for seeds.
+        #complete_first = (complete_first or cutoff) and not haves.complete()
+        complete_first = (complete_first or cutoff)
+
+        # most interesting piece
+        best = None
+
+        # interest level of best piece
+        bestnum = 2 ** 30
+
+        # select piece we started to download with best interest index.
+        for i in self.started:
+# 2fastbt_
+            if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)):
+# _2fastbt
+                if self.level_in_interests[i] < bestnum:
+                    best = i
+                    bestnum = self.level_in_interests[i]
+
+        if best is not None:
+            # found a piece -- return it if we are completing partials first
+            # or if there is a cutoff
+            if complete_first or (cutoff and len(self.interests) > self.cutoff):
+                return best
+
+        p = self.next_new(haves, wantfunc, complete_first, helper_con,willrequest=willrequest,connection=connection)
+        # if DEBUG:
+        #     print >>sys.stderr,"PiecePickerStreaming: next_new returns",p
+        return p
+
+    def check_outstanding_requests(self, downloads):
+        if not self.transporter:
+            return
+        
+        now = time.time()
+        cancel_requests = []
+        in_high_range = self.videostatus.in_high_range
+        playing_mode = self.videostatus.playing and not self.videostatus.paused
+        piece_due = self.transporter.piece_due
+        
+        if playing_mode:
+            # playing mode
+            min_delay, max_delay, offset_delay = self.playing_delay
+        else:
+            # buffering mode
+            min_delay, max_delay, offset_delay = self.buffering_delay
+
+        for download in downloads:
+            total_length = 0
+            download_rate = download.get_short_term_rate()
+            for piece_id, begin, length in download.active_requests:
+                # select policy for this piece
+        
+                try:
+                    time_request = self.outstanding_requests[(piece_id, begin, length)]
+                except KeyError:
+                    continue
+                
+                # add the length of this chunk to the total of bytes
+                # that needs to be downloaded
+                total_length += length
+        
+                # each request must be allowed at least some
+                # minimal time to be handled
+                if now < time_request + min_delay:
+                    continue
+
+                # high-priority pieces are eligable for
+                # cancelation. Others are not. They will eventually be
+                # eligable as they become important for playback.
+                if in_high_range(piece_id):
+                    if download_rate == 0:
+                        # we have not received anything in the last min_delay seconds
+                        if DEBUG: print >>sys.stderr, "PiecePickerStreaming: download not started yet for piece", piece_id, "chunk", begin, "on", download.ip
+                        cancel_requests.append((piece_id, begin, length))
+                        download.bad_performance_counter += 1
+
+                    else:
+                        if playing_mode:
+                            time_until_deadline = min(piece_due(piece_id), time_request + max_delay - now)
+                        else:
+                            time_until_deadline = time_request + max_delay - now
+                        time_until_download = total_length / download_rate
+
+                        # we have to cancel when the deadline can not be met
+                        if time_until_deadline < time_until_download - offset_delay:
+                            if DEBUG: print >>sys.stderr, "PiecePickerStreaming: download speed too slow for piece", piece_id, "chunk", begin, "on", download.ip, "Deadline in", time_until_deadline, "while estimated download in", time_until_download
+                            cancel_requests.append((piece_id, begin, length))
+                
+        # Cancel all requests that are too late
+        if cancel_requests:
+            try:
+                self.downloader.cancel_requests(cancel_requests)
+            except:
+                print_exc()
+
+        if __debug__:
+            if DEBUG_CHUNKS:
+                print_chunks(self.downloader, list(self.videostatus.generate_high_range()), compact=False)
+
+    def requested(self, *request):
+        self.outstanding_requests[request] = time.time()
+        return PiecePicker.requested(self, *request)
+        
+    def next_new(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None):
+        """ Determine which piece to download next from a peer.
+
+        haves:          set of pieces owned by that peer
+        wantfunc:       custom piece filter
+        complete_first: whether to complete partial pieces first
+        helper_con:
+        willrequest:    whether the returned piece will actually be requested
+
+        """
+        vs = self.videostatus
+
+        def pick_first( f, t ): # no shuffle
+            for i in vs.generate_range([(f,t)]):
+                # Is there a piece in the range the peer has?
+                # Is there a piece in the range we don't have?
+                if not haves[i] or self.has[i]: 
+                    continue
+
+                if not wantfunc(i): # Is there a piece in the range we want? 
+                    continue
+
+                if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                    return i
+
+            return None
+
+        def pick_rarest_loop_over_small_range(f,t,shuffle=True):
+            # Arno: pick_rarest is way expensive for the midrange thing,
+            # therefore loop over the list of pieces we want and see
+            # if it's avail, rather than looping over the list of all
+            # pieces to see if one falls in the (f,t) range.
+            #
+            xr = vs.generate_range([(f,t)])
+            #xr = xrl[0]
+            r = None
+            if shuffle:
+                # xr is an xrange generator, need real values to shuffle
+                r = []
+                r.extend(xr)
+                random.shuffle(r)
+            else:
+                r = xr
+            for i in r:
+                #print >>sys.stderr,"H",
+                if not haves[i] or self.has[i]:
+                    continue
+
+                #print >>sys.stderr,"W",
+                if not wantfunc(i):
+                    continue
+
+                if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                    return i
+
+            return None
+
+        def pick_rarest_small_range(f,t):
+            #print >>sys.stderr,"choice small",f,t
+            d = vs.dist_range(f,t)
+            
+            for level in xrange(len(self.interests)):
+                piecelist  = self.interests[level]
+                
+                if len(piecelist) > d:
+                #if level+1 == len(self.interests):
+                    # Arno: Lowest level priorities / long piecelist.
+                    # This avoids doing a scan that goes over the entire list 
+                    # of pieces when we already have the hi and/or mid ranges.
+                    
+                    # Arno, 2008-05-21: Apparently, the big list is not always
+                    # at the lowest level, hacked distance metric to determine
+                    # whether to use slow or fast method.
+                    
+                    #print >>sys.stderr,"choice QUICK"
+                    return pick_rarest_loop_over_small_range(f,t)
+                    #print >>sys.stderr,"choice Q",diffstr,"l",level,"s",len(piecelist) 
+                else:
+                    # Higher priorities / short lists
+                    for i in piecelist:
+                        if not vs.in_range( [(f, t)], i ):
+                            continue
+    
+                        #print >>sys.stderr,"H",
+                        if not haves[i] or self.has[i]:
+                            continue
+    
+                        #print >>sys.stderr,"W",
+                        if not wantfunc(i):
+                            continue
+    
+                        if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                            return i
+
+            return None
+
+        def pick_rarest(f,t): #BitTorrent already shuffles the self.interests for us
+            for piecelist in self.interests:
+                for i in piecelist:
+                    if not vs.in_range( f, t, i ):
+                        continue
+
+                    #print >>sys.stderr,"H",
+                    if not haves[i] or self.has[i]:
+                        continue
+
+                    #print >>sys.stderr,"W",
+                    if not wantfunc(i):
+                        continue
+
+                    if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                        return i
+
+            return None
+
+        def pieces_in_buffer_completed(high_range):
+            for i in vs.generate_range(high_range):
+                if not self.has[i]:            
+                    return False
+            return True
+            
+        # Ric: mod for new set of high priority
+        download_range = vs.download_range()
+        first, _ = download_range[0]
+        last = vs.get_highest_piece(download_range)
+        high_range = vs.get_high_range()
+        
+        #print >>sys.stderr , "wwwwwwwwwwwwww", download_range, high_range, first, last300.13KB/s
+        priority_first, _ = high_range[0]
+        priority_last = vs.get_highest_piece(high_range)
+        
+        if priority_first != priority_last:
+            first = priority_first
+            highprob_cutoff = vs.normalize(priority_last + 1)
+            # TODO
+            midprob_cutoff = vs.normalize(first + self.MU * vs.get_small_range_length(first, last))
+        else:
+            highprob_cutoff = last
+            midprob_cutoff = vs.normalize(first + self.MU * vs.high_prob_curr_pieces)
+
+        # for VOD playback consider peers to be bad when they miss the deadline 1 time
+        allow_based_on_performance = connection.download.bad_performance_counter < 1
+
+        # Ric: only prebuffering of the base layer
+        if vs.prebuffering:
+            f = first
+            t = vs.normalize( first + self.transporter.max_prebuf_packets )
+            choice = pick_rarest_small_range(f,t)
+            type = "high"
+        else:
+            choice = None
+
+        if choice is None:
+            for i in high_range:
+                f, l = i
+                choice = pick_first( f, l )
+                if choice != None:
+                    # TODO bad hack
+                    break
+
+            # once there are no more pieces to pick in the current high
+            # priority set, increase the quality and run the code again
+            if choice is None and vs.quality < vs.available_qualities-1:
+                # We increase the quality only if we already recieved all
+                # the pieces from the current high priority set
+                if pieces_in_buffer_completed(high_range):
+                    if DEBUG:
+                        print >>sys.stderr, "vod: Enough pieces of the current quality have been downloaded. Increase the quality!"
+                    vs.quality += 1
+                    self.next_new(haves, wantfunc, complete_first, helper_con, willrequest, connection)
+                
+
+            type = "high"
+
+        # it is possible that the performance of this peer prohibits
+        # us from selecting this piece...
+        if not allow_based_on_performance:
+            high_priority_choice = choice
+            choice = None
+
+        if choice is None:
+            temp_range = vs.get_respective_range( (highprob_cutoff, midprob_cutoff) )
+            for i in range(vs.quality + 1):
+                f, l = temp_range[i]
+                choice = pick_rarest_small_range( f, l )
+                if choice != None:
+                    # TODO bad hack
+                    break
+            type = "mid"
+
+        if choice is None:
+            temp_range = vs.get_respective_range( (midprob_cutoff, last) )
+            for i in temp_range:
+                f, l = i
+                choice = pick_rarest( f, l )
+                if choice != None:
+                    # TODO bad hack
+                    break
+            type = "low"
+            
+        if choice and willrequest:
+            self.stats[type] += 1
+
+        if DEBUG:
+            # TODO
+            print >>sys.stderr,"vod: picked piece %s [type=%s] [%d,%d,%d,%d]" % (`choice`,type,first,highprob_cutoff,midprob_cutoff,last)
+            #print >>sys.stderr,"vod: picked piece %s [type=%s] [%d,%d]" % (`choice`,type,first,highprob_cutoff)
+
+        # 12/05/09, boudewijn: (1) The bad_performance_counter is
+        # incremented whenever a piece download failed and decremented
+        # whenever is succeeds. (2) A peer with a positive
+        # bad_performance_counter is only allowd to pick low-priority
+        # pieces. (Conclusion) When all low-priority pieces are
+        # downloaded the client hangs when one or more high-priority
+        # pieces are required and if all peers have a positive
+        # bad_performance_counter.
+        if choice is None and not allow_based_on_performance:
+            # ensure that there is another known peer with a
+            # non-positive bad_performance_counter that has the piece
+            # that we would pick from the high-priority set for this
+            # connection.
+
+            if high_priority_choice:
+                availability = 0
+                for download in self.downloader.downloads:
+                    if download.have[high_priority_choice] and not download.bad_performance_counter:
+                        availability += 1
+
+                if not availability:
+                    # no other connection has it... then ignore the
+                    # bad_performance_counter advice and attempt to
+                    # download it from this connection anyway
+                    if DEBUG: print >>sys.stderr, "vod: the bad_performance_counter says this is a bad peer... but we have nothing better... requesting piece", high_priority_choice, "regardless."
+                    choice = high_priority_choice
+
+        return choice
+
+    def is_valid_piece(self,piece):
+        return self.videostatus.in_valid_range(piece)
+   
+    def get_valid_range_iterator(self):
+
+        #print >>sys.stderr,"PiecePickerStreaming: Live hooked in, or VOD, valid range set to subset"
+        download_range = self.videostatus.download_range()
+#        first,last = self.videostatus.download_range()
+#        return self.videostatus.generate_range((first,last))
+        return self.videostatus.generate_range(download_range)
+            
+
diff --git a/instrumentation/next-share/BaseLib/Core/Video/PiecePickerStreaming.py b/instrumentation/next-share/BaseLib/Core/Video/PiecePickerStreaming.py
new file mode 100644 (file)
index 0000000..a9925d8
--- /dev/null
@@ -0,0 +1,700 @@
+# Written by Jan David Mol, Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+
+import sys
+import time
+import random
+from traceback import print_exc,print_stack
+
+from BaseLib.Core.BitTornado.BT1.PiecePicker import PiecePicker 
+
+if __debug__:
+    from BaseLib.Core.BitTornado.BT1.Downloader import print_chunks
+
+# percent piece loss to emulate -- we just don't request this percentage of the pieces
+# only implemented for live streaming
+#PIECELOSS = 0
+TEST_VOD_OVERRIDE = False
+
+DEBUG = False
+DEBUG_CHUNKS = False # set DEBUG_CHUNKS in BT1.Downloader to True
+DEBUGPP = False
+
+def rarest_first( has_dict, rarity_list, filter = lambda x: True ):
+    """ Select the rarest of pieces in has_dict, according
+        to the rarities in rarity_list. Breaks ties uniformly
+        at random. Additionally, `filter' is applied to select
+        the pieces we can return. """
+
+    """ Strategy:
+        - `choice' is the choice so far
+        - `n' is the number of pieces we could choose from so far
+        - `rarity' is the rarity of the choice so far
+
+        Every time we see a rarer piece, we reset our choice.
+        Every time we see a piece of the same rarity we're looking for,
+        we select it (overriding the previous choice) with probability 1/n.
+        This leads to a uniformly selected piece in one pass, be it that
+        we need more random numbers than when doing two passes. """
+
+    choice = None
+    rarity = None
+    n = 0
+
+    for k in (x for x in has_dict if filter(x)):
+        r = rarity_list[k]
+
+        if rarity is None or r < rarity:
+            rarity = r
+            n = 1
+            choice = k
+        elif r == rarity:
+            n += 1
+            if random.uniform(0,n) == 0: # uniform selects from [0,n)
+                choice = k
+
+    return choice
+
+class PiecePickerStreaming(PiecePicker):
+    """ Implements piece picking for streaming video. Keeps track of playback
+        point and avoids requesting obsolete pieces. """
+
+    # order of initialisation and important function calls
+    #   PiecePicker.__init__              (by BitTornado.BT1Download.__init__)
+    #   PiecePicker.complete              (by hash checker, for pieces on disk)
+    #   MovieSelector.__init__
+    #   PiecePicker.set_download_range    (indirectly by MovieSelector.__init__)
+    #   MovieOnDemandTransporter.__init__ (by BitTornado.BT1Download.startEngine)
+    #   PiecePicker.set_bitrate           (by MovieOnDemandTransporter)
+    #   PiecePicker.set_transporter       (by MovieOnDemandTransporter)
+    #
+    #   PiecePicker._next                 (once connections are set up)
+    #
+    #   PiecePicker.complete              (by hash checker, for pieces received)
+
+    # relative size of mid-priority set
+    MU = 4
+
+    def __init__(self, numpieces,
+                 rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
+                 priority_step = 20, helper = None, coordinator = None, rate_predictor = None, piecesize = 0):
+        PiecePicker.__init__( self, numpieces, rarest_first_cutoff, rarest_first_priority_cutoff,
+                              priority_step, helper, coordinator, rate_predictor)
+
+        # maximum existing piece number, to avoid scanning beyond it in next()
+        self.maxhave = 0
+
+        # some statistics
+        self.stats = {}
+        self.stats["high"] = 0
+        self.stats["mid"] = 0
+        self.stats["low"] = 0
+
+        # playback module
+        self.transporter = None
+
+        # self.outstanding_requests contains (piece-id, begin,
+        # length):timestamp pairs for each outstanding request.
+        self.outstanding_requests = {}
+
+        # The playing_delay and buffering_delay give three values
+        # (min, max, offeset) in seconds.
+        #
+        # The min tells how long before the cancel policy is allowed
+        # to kick in. We can not expect to receive a piece instantly,
+        # so we have to wait this time before having a download speed
+        # estimation.
+        #
+        # The max tells how long before we cancel the request. The
+        # request may also be canceled because the chunk will not be
+        # completed given the current download speed.
+        #
+        # The offset gives a grace period that is taken into account
+        # when choosing to cancel a request. For instance, when the
+        # peer download speed is too low to receive the chunk within 10
+        # seconds, a grace offset of 15 would ensure that the chunk is
+        # NOT canceled (useful while buffering)
+        self.playing_delay = (5, 20, -0.5)
+        self.buffering_delay = (7.5, 30, 10)
+
+        # Arno, 2010-04-20: STBSPEED: is_interesting is now a variable.
+        self.is_interesting  = self.is_interesting_normal
+
+    def set_transporter(self, transporter):
+        self.transporter = transporter
+
+        """
+        Arno, 2010-04-20: STBSPEED: Replaced by transporter.complete_from_persistent_state()
+        # update its information -- pieces read from disk
+        if not self.videostatus.live_streaming:
+            for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1):
+                if self.has[i]:
+                    self.transporter.complete( i, downloaded=False )
+        """
+
+    def set_videostatus(self,videostatus):
+        """ Download in a wrap-around fashion between pieces [0,numpieces).
+            Look at most delta pieces ahead from download_range[0].
+        """
+        self.videostatus = videostatus
+        
+        if self.videostatus.live_streaming:
+            self.is_interesting  = self.is_interesting_live
+        else:
+            self.is_interesting  = self.is_interesting_vod
+        videostatus.add_playback_pos_observer( self.change_playback_pos )
+
+    def is_interesting_live(self,piece):
+        return self.videostatus.in_download_range( piece ) and not self.has[piece]
+
+    def is_interesting_vod(self,piece):
+        return (self.videostatus.first_piece <= piece <= self.videostatus.last_piece) and not self.has[piece]
+
+    def is_interesting_normal(self,piece):
+        return not self.has[piece]
+
+
+    def change_playback_pos(self, oldpos, newpos):
+        if oldpos is None:
+            # (re)initialise
+            valid = self.is_interesting
+
+            for d in self.peer_connections.values():
+                interesting = {}
+                has = d["connection"].download.have
+                
+                # Arno, 2009-11-07: STBSPEED: iterator over just valid range, that's
+                # what we'll be interested in.
+                #for i in xrange(self.videostatus.first_piece,self.videostatus.last_piece+1):
+                for i in self.get_valid_range_iterator():
+                    if has[i] and valid(i):
+                        interesting[i] = 1
+
+                d["interesting"] = interesting
+        else:
+            # playback position incremented -- remove timed out piece
+            for d in self.peer_connections.values():
+                d["interesting"].pop(oldpos,0)
+
+    def got_have(self, piece, connection=None):
+        # if DEBUG:
+        #     print >>sys.stderr,"PiecePickerStreaming: got_have:",piece
+        self.maxhave = max(self.maxhave,piece)
+
+        # Arno, 2010-04-15: STBSPEED Disabled, does nothing but stats.
+        #if self.transporter:
+        #    self.transporter.got_have( piece )
+        PiecePicker.got_have(self,piece,connection)
+
+        if self.is_interesting(piece):
+            self.peer_connections[connection]["interesting"][piece] = 1
+
+
+    def got_seed(self):
+        self.maxhave = self.numpieces
+        PiecePicker.got_seed( self )
+
+    def lost_have(self, piece):
+        PiecePicker.lost_have( self, piece )
+
+    def got_peer(self, connection):
+        PiecePicker.got_peer( self, connection )
+
+        self.peer_connections[connection]["interesting"] = {}
+
+    def lost_peer(self, connection):
+        PiecePicker.lost_peer( self, connection )
+
+    def got_piece(self, *request):
+        if request in self.outstanding_requests:
+            del self.outstanding_requests[request]
+        if self.transporter:
+            self.transporter.got_piece(*request)
+
+    def complete(self, piece):
+        if DEBUG:
+            print >>sys.stderr,"PiecePickerStreaming: complete:",piece
+        
+        PiecePicker.complete( self, piece )
+        if self.transporter:
+            self.transporter.complete( piece )
+
+        for request in self.outstanding_requests.keys():
+            if request[0] == piece:
+                del self.outstanding_requests[request]
+
+        # don't consider this piece anymore
+        for d in self.peer_connections.itervalues():
+            d["interesting"].pop(piece,0)
+
+    def num_nonempty_neighbours(self):
+        # return #neighbours who have something
+        return len( [c for c in self.peer_connections if c.download.have.numfalse < c.download.have.length] )
+
+    def pos_is_sustainable(self,fudge=2):
+        """
+            Returns whether we have enough data around us to support the current playback position.
+            If not, playback should pause, stall or reinitialised when pieces are lost.
+        """
+        vs = self.videostatus
+
+        # only holds for live streaming for now. theoretically, vod can have the same problem
+        # since data can be seeded in a 'live' fashion
+        if not vs.live_streaming:
+            if DEBUG:
+                print >>sys.stderr, "PiecePickerStreaming: pos is sustainable: not streaming live"
+            return True
+
+        # We assume the maximum piece number that is available at at least half of the neighbours
+        # to be sustainable. Although we only need a fixed number of neighbours with enough bandwidth,
+        # such neighbours may depart, hence we choose a relative trade-off.
+
+        # this means that our current playback position is sustainable if any future piece
+        # is owned by at least half of the peers
+
+        # ignore peers which have nothing
+        numconn = self.num_nonempty_neighbours()
+
+        if not numconn:
+            # not sustainable, but nothing we can do. Return True to avoid pausing
+            # and getting out of sync.
+            if DEBUG:
+                print >>sys.stderr, "PiecePickerStreaming: pos is sustainable: no neighbours with pieces"
+            return True
+
+        half = max( 1, numconn/2 )
+        skip = fudge # ignore the first 'fudge' pieces
+
+        for x in vs.generate_range( vs.download_range() ):
+            if skip > 0:
+                skip -= 1
+            elif self.numhaves[x] >= half:
+                if DEBUG:
+                    print >>sys.stderr, "PiecePickerStreaming: pos is sustainable: piece %s @ %s>%s peers (fudge=%s)" % (x,self.numhaves[x],half,fudge)
+                return True
+            else:
+                pass
+
+        if DEBUG:
+            print >>sys.stderr, "PiecePickerStreaming: pos is NOT sustainable playpos=%s fudge=%s numconn=%s half=%s numpeers=%s %s" % (vs.playback_pos,fudge,numconn,half,len(self.peer_connections),[x.get_ip() for x in self.peer_connections])
+
+        # too few neighbours own the future pieces. it's wise to pause and let neighbours catch up
+        # with us
+        return False
+
+
+    # next: selects next piece to download. adjusts wantfunc with filter for streaming; calls
+    #   _next: selects next piece to download. completes partial downloads first, if needed, otherwise calls
+    #     next_new: selects next piece to download. override this with the piece picking policy
+
+    def next(self, haves, wantfunc, sdownload, complete_first = False, helper_con = False, slowpieces=[], willrequest=True,connection=None,proxyhave=None):
+        def newwantfunc( piece ):
+            #print >>sys.stderr,"S",self.streaming_piece_filter( piece ),"!sP",not (piece in slowpieces),"w",wantfunc( piece )
+            return not (piece in slowpieces) and wantfunc( piece )
+
+        # fallback: original piece picker
+        p = PiecePicker.next(self, haves, newwantfunc, sdownload, complete_first, helper_con, slowpieces=slowpieces, willrequest=willrequest,connection=connection)
+        if DEBUGPP and self.videostatus.prebuffering:
+            print >>sys.stderr,"PiecePickerStreaming: original PP.next returns",p
+        # Arno, 2010-03-11: Njaal's CS something causes this to return None
+        # when we're not complete: added check
+        if p is None and not self.videostatus.live_streaming and self.am_I_complete() or TEST_VOD_OVERRIDE:
+            # When the file we selected from a multi-file torrent is complete,
+            # we won't request anymore pieces, so the normal way of detecting 
+            # we're done is not working and we won't tell the video player 
+            # we're playable. Do it here instead.
+            self.transporter.notify_playable()
+        return p
+
+    def _next(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None):
+        """ First, complete any partials if needed. Otherwise, select a new piece. """
+
+        #print >>sys.stderr,"PiecePickerStreaming: complete_first is",complete_first,"started",self.started
+
+        # cutoff = True:  random mode
+        #          False: rarest-first mode
+        cutoff = self.numgot < self.rarest_first_cutoff
+
+        # whether to complete existing partials first -- do so before the
+        # cutoff, or if forced by complete_first, but not for seeds.
+        #complete_first = (complete_first or cutoff) and not haves.complete()
+        complete_first = (complete_first or cutoff)
+
+        # most interesting piece
+        best = None
+
+        # interest level of best piece
+        bestnum = 2 ** 30
+
+        # select piece we started to download with best interest index.
+        for i in self.started:
+# 2fastbt_
+            if haves[i] and wantfunc(i) and (self.helper is None or helper_con or not self.helper.is_ignored(i)):
+# _2fastbt
+                if self.level_in_interests[i] < bestnum:
+                    best = i
+                    bestnum = self.level_in_interests[i]
+
+        if best is not None:
+            # found a piece -- return it if we are completing partials first
+            # or if there is a cutoff
+            if complete_first or (cutoff and len(self.interests) > self.cutoff):
+                return best
+
+        p = self.next_new(haves, wantfunc, complete_first, helper_con,willrequest=willrequest,connection=connection)
+        if DEBUG:
+             print >>sys.stderr,"PiecePickerStreaming: next_new returns",p
+        return p
+
+    def check_outstanding_requests(self, downloads):
+        if not self.transporter:
+            return
+        
+        now = time.time()
+        cancel_requests = []
+        in_high_range = self.videostatus.in_high_range
+        playing_mode = self.videostatus.playing and not self.videostatus.paused
+        piece_due = self.transporter.piece_due
+        
+        if playing_mode:
+            # playing mode
+            min_delay, max_delay, offset_delay = self.playing_delay
+        else:
+            # buffering mode
+            min_delay, max_delay, offset_delay = self.buffering_delay
+
+        for download in downloads:
+
+            total_length = 0
+            download_rate = download.get_short_term_rate()
+            for piece_id, begin, length in download.active_requests:
+                # select policy for this piece
+                try:
+                    time_request = self.outstanding_requests[(piece_id, begin, length)]
+                except KeyError:
+                    continue
+                
+                # add the length of this chunk to the total of bytes
+                # that needs to be downloaded
+                total_length += length
+
+                # each request must be allowed at least some
+                # minimal time to be handled
+                if now < time_request + min_delay:
+                    continue
+
+                # high-priority pieces are eligable for
+                # cancelation. Others are not. They will eventually be
+                # eligable as they become important for playback.
+                if in_high_range(piece_id):
+                    if download_rate == 0:
+                        # we have not received anything in the last min_delay seconds
+                        if DEBUG: print >>sys.stderr, "PiecePickerStreaming: download not started yet for piece", piece_id, "chunk", begin, "on", download.ip
+                        cancel_requests.append((piece_id, begin, length))
+                        download.bad_performance_counter += 1
+
+                    else:
+                        if playing_mode:
+                            time_until_deadline = min(piece_due(piece_id), time_request + max_delay - now)
+                        else:
+                            time_until_deadline = time_request + max_delay - now
+                        time_until_download = total_length / download_rate
+
+                        # we have to cancel when the deadline can not be met
+                        if time_until_deadline < time_until_download - offset_delay:
+                            if DEBUG: print >>sys.stderr, "PiecePickerStreaming: download speed too slow for piece", piece_id, "chunk", begin, "on", download.ip, "Deadline in", time_until_deadline, "while estimated download in", time_until_download
+                            cancel_requests.append((piece_id, begin, length))
+                
+        # Cancel all requests that are too late
+        if cancel_requests:
+            try:
+                self.downloader.cancel_requests(cancel_requests)
+            except:
+                print_exc()
+
+        if __debug__:
+            if DEBUG_CHUNKS:
+                print_chunks(self.downloader, list(self.videostatus.generate_high_range()), compact=False)
+
+    def requested(self, *request):
+        self.outstanding_requests[request] = time.time()
+        return PiecePicker.requested(self, *request)
+        
+    def next_new(self, haves, wantfunc, complete_first, helper_con, willrequest=True, connection=None):
+        """ Determine which piece to download next from a peer.
+
+        haves:          set of pieces owned by that peer
+        wantfunc:       custom piece filter
+        complete_first: whether to complete partial pieces first
+        helper_con:
+        willrequest:    whether the returned piece will actually be requested
+
+        """
+
+        vs = self.videostatus
+
+        if vs.live_streaming:
+            # first, make sure we know where to start downloading
+            if vs.live_startpos is None:
+                self.transporter.calc_live_startpos( self.transporter.max_prebuf_packets, False )
+                print >>sys.stderr,"vod: pp: determined startpos of",vs.live_startpos
+
+            # select any interesting piece, rarest first
+            if connection:
+                # Without 'connection', we don't know who we will request from.
+                
+                #print >>sys.stderr,"PiecePickerStreaming: pp",connection.get_ip(),"int",self.peer_connections[connection]["interesting"]
+                
+                return rarest_first( self.peer_connections[connection]["interesting"], self.numhaves, wantfunc )
+
+        def pick_first( f, t ): # no shuffle
+            for i in vs.generate_range((f,t)):
+                # Is there a piece in the range the peer has?
+                # Is there a piece in the range we don't have?
+                if not haves[i] or self.has[i]: 
+                    continue
+
+                if not wantfunc(i): # Is there a piece in the range we want? 
+                    continue
+
+                if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                    return i
+
+            return None
+
+        def pick_rarest_loop_over_small_range(f,t,shuffle=True):
+            # Arno: pick_rarest is way expensive for the midrange thing,
+            # therefore loop over the list of pieces we want and see
+            # if it's avail, rather than looping over the list of all
+            # pieces to see if one falls in the (f,t) range.
+            #
+            xr = vs.generate_range((f,t))
+            r = None
+            if shuffle:
+                # xr is an xrange generator, need real values to shuffle
+                r = []
+                r.extend(xr)
+                random.shuffle(r)
+            else:
+                r = xr
+            for i in r:
+                #print >>sys.stderr,"H",
+                if not haves[i] or self.has[i]:
+                    continue
+
+                #print >>sys.stderr,"W",
+                if not wantfunc(i):
+                    continue
+
+                if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                    return i
+
+            return None
+
+        def pick_rarest_small_range(f,t):
+            #print >>sys.stderr,"choice small",f,t
+            d = vs.dist_range(f,t)
+            
+            for level in xrange(len(self.interests)):
+                piecelist  = self.interests[level]
+                
+                if len(piecelist) > d:
+                #if level+1 == len(self.interests):
+                    # Arno: Lowest level priorities / long piecelist.
+                    # This avoids doing a scan that goes over the entire list 
+                    # of pieces when we already have the hi and/or mid ranges.
+                    
+                    # Arno, 2008-05-21: Apparently, the big list is not always
+                    # at the lowest level, hacked distance metric to determine
+                    # whether to use slow or fast method.
+                    
+                    #print >>sys.stderr,"choice QUICK"
+                    return pick_rarest_loop_over_small_range(f,t)
+                    #print >>sys.stderr,"choice Q",diffstr,"l",level,"s",len(piecelist) 
+                else:
+                    # Higher priorities / short lists
+                    for i in piecelist:
+                        if not vs.in_range( f, t, i ):
+                            continue
+    
+                        #print >>sys.stderr,"H",
+                        if not haves[i] or self.has[i]:
+                            continue
+    
+                        #print >>sys.stderr,"W",
+                        if not wantfunc(i):
+                            continue
+    
+                        if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                            return i
+
+            return None
+
+        def pick_rarest(f,t): #BitTorrent already shuffles the self.interests for us
+            for piecelist in self.interests:
+                for i in piecelist:
+                    if not vs.in_range( f, t, i ):
+                        continue
+
+                    #print >>sys.stderr,"H",
+                    if not haves[i] or self.has[i]:
+                        continue
+
+                    #print >>sys.stderr,"W",
+                    if not wantfunc(i):
+                        continue
+
+                    if self.helper is None or helper_con or not self.helper.is_ignored(i):
+                        return i
+
+            return None
+
+        first, last = vs.download_range()
+        priority_first, priority_last = vs.get_high_range()
+        if priority_first != priority_last:
+            first = priority_first
+            highprob_cutoff = vs.normalize(priority_last + 1)
+            # Arno, 2010-08-10: Errr, mid = MU * high
+            midprob_cutoff = vs.normalize(first + self.MU * vs.get_range_length(first, highprob_cutoff))
+        else:
+            highprob_cutoff = last
+            midprob_cutoff = vs.normalize(first + self.MU * vs.high_prob_curr_pieces)
+        # h = vs.time_to_pieces( self.HIGH_PROB_SETSIZE )
+        # highprob_cutoff = vs.normalize(first + max(h, self.HIGH_PROB_MIN_PIECES))
+        # midprob_cutoff = vs.normalize(first + max(self.MU * h, self.HIGH_PROB_MIN_PIECES))
+
+        # print >>sys.stderr, "Prio %s:%s:%s" % (first, highprob_cutoff, midprob_cutoff), highprob_cutoff - first, midprob_cutoff - highprob_cutoff
+
+        # first,last = vs.download_range()
+        # if vs.wraparound:
+        #     max_lookahead = vs.wraparound_delta
+        # else:
+        #     max_lookahead = vs.last_piece - vs.playback_pos
+
+        # highprob_cutoff = vs.normalize( first + min( h, max_lookahead ) )
+        # midprob_cutoff  = vs.normalize( first + min( h + self.MU * h, max_lookahead ) )
+
+        if vs.live_streaming:
+            # for live playback consider peers to be bad when they miss the deadline 5 times
+            allow_based_on_performance = connection.download.bad_performance_counter < 5
+        else:
+            # for VOD playback consider peers to be bad when they miss the deadline 1 time
+            # Diego : patch from Jan
+            if connection:
+                allow_based_on_performance = connection.download.bad_performance_counter < 1
+            else:
+                allow_based_on_performance = True
+
+        if vs.prebuffering:
+            f = first
+            t = vs.normalize( first + self.transporter.max_prebuf_packets )
+            choice = pick_rarest_small_range(f,t)
+            type = "high"
+        else:
+            choice = None
+
+        if choice is None:
+            if vs.live_streaming:
+                choice = pick_rarest_small_range( first, highprob_cutoff )
+            else:
+                choice = pick_first( first, highprob_cutoff )
+            type = "high"
+
+        # it is possible that the performance of this peer prohibits
+        # us from selecting this piece...
+        if not allow_based_on_performance:
+            high_priority_choice = choice
+            choice = None
+
+        if choice is None:
+            choice = pick_rarest_small_range( highprob_cutoff, midprob_cutoff )
+            type = "mid"
+
+        if choice is None:
+            if vs.live_streaming:
+                # Want: loop over what peer has avail, respecting piece priorities
+                # (could ignore those for live).
+                #
+                # Attempt 1: loop over range (which is 25% of window (see 
+                # VideoStatus), ignoring priorities, no shuffle.
+                #print >>sys.stderr,"vod: choice low RANGE",midprob_cutoff,last
+                #choice = pick_rarest_loop_over_small_range(midprob_cutoff,last,shuffle=False)
+                pass
+            else:
+                choice = pick_rarest( midprob_cutoff, last )
+            type = "low"
+            
+        if choice and willrequest:
+            self.stats[type] += 1
+
+        if DEBUG:
+            print >>sys.stderr,"vod: pp: picked piece %s [type=%s] [%d,%d,%d,%d]" % (`choice`,type,first,highprob_cutoff,midprob_cutoff,last)
+
+        # 12/05/09, boudewijn: (1) The bad_performance_counter is
+        # incremented whenever a piece download failed and decremented
+        # whenever is succeeds. (2) A peer with a positive
+        # bad_performance_counter is only allowd to pick low-priority
+        # pieces. (Conclusion) When all low-priority pieces are
+        # downloaded the client hangs when one or more high-priority
+        # pieces are required and if all peers have a positive
+        # bad_performance_counter.
+        if choice is None and not allow_based_on_performance:
+            # ensure that there is another known peer with a
+            # non-positive bad_performance_counter that has the piece
+            # that we would pick from the high-priority set for this
+            # connection.
+
+            if high_priority_choice:
+                availability = 0
+                for download in self.downloader.downloads:
+                    if download.have[high_priority_choice] and not download.bad_performance_counter:
+                        availability += 1
+
+                if not availability:
+                    # no other connection has it... then ignore the
+                    # bad_performance_counter advice and attempt to
+                    # download it from this connection anyway
+                    if DEBUG: print >>sys.stderr, "vod: pp: the bad_performance_counter says this is a bad peer... but we have nothing better... requesting piece", high_priority_choice, "regardless."
+                    choice = high_priority_choice
+
+        if not vs.live_streaming:
+            if choice is None and not self.am_I_complete():
+                # Arno, 2010-02-24:
+                # VOD + seeking: we seeked into the future and played till end, 
+                # there is a gap between the old playback and the seek point
+                # which we didn't download, and otherwise never will.
+                #
+                secondchoice = pick_rarest(vs.first_piece,vs.last_piece)
+                if secondchoice is not None:
+                    if DEBUG:
+                        print >>sys.stderr,"vod: pp: Picking skipped-over piece",secondchoice 
+                    return secondchoice
+
+        return choice
+
+    def is_valid_piece(self,piece):
+        return self.videostatus.in_valid_range(piece)
+   
+    def get_valid_range_iterator(self):
+        if self.videostatus.live_streaming and self.videostatus.get_live_startpos() is None:
+            # Not hooked in, so cannot provide a sensible download range
+            #print >>sys.stderr,"PiecePickerStreaming: Not hooked in, valid range set to total"
+            return PiecePicker.get_valid_range_iterator(self)
+            
+        #print >>sys.stderr,"PiecePickerStreaming: Live hooked in, or VOD, valid range set to subset"
+        first,last = self.videostatus.download_range()
+        return self.videostatus.generate_range((first,last))
+
+    def get_live_source_have(self):
+        for d in self.peer_connections.values():
+            if d["connection"].is_live_source():
+                return d["connection"].download.have
+        return None
+    
+            
+            
+    def am_I_complete(self):
+        return self.done and not TEST_VOD_OVERRIDE
+            
+            
+PiecePickerVOD = PiecePickerStreaming
diff --git a/instrumentation/next-share/BaseLib/Core/Video/SVCTransporter.py b/instrumentation/next-share/BaseLib/Core/Video/SVCTransporter.py
new file mode 100644 (file)
index 0000000..3c7f18c
--- /dev/null
@@ -0,0 +1,1275 @@
+# Written by Jan David Mol, Arno Bakker, Riccardo Petrocco
+# see LICENSE.txt for license information
+
+import sys
+from math import ceil
+from threading import Condition,currentThread
+from traceback import print_exc
+from tempfile import mkstemp
+import collections
+import os
+import base64
+import os,sys,time
+import re
+
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.Video.MovieTransport import MovieTransport,MovieTransportStreamWrapper
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.osutils import *
+from BaseLib.Core.Video.VideoOnDemand import *
+
+# pull all video data as if a video player was attached
+FAKEPLAYBACK = False
+
+DEBUG = True
+DEBUGPP = False
+
+class SVCTransporter(MovieOnDemandTransporter):
+    """ Takes care of providing a bytestream interface based on the available pieces. """
+
+    # seconds to prebuffer if bitrate is known (always for SVC)
+    PREBUF_SEC_VOD  = 10
+
+    # max number of seconds in queue to player
+    # Arno: < 2008-07-15: St*pid vlc apparently can't handle lots of data pushed to it
+    # Arno: 2008-07-15: 0.8.6h apparently can
+    BUFFER_TIME = 5.0
+    
+    # polling interval to refill buffer
+    #REFILL_INTERVAL = BUFFER_TIME * 0.75
+    # Arno: there's is no guarantee we got enough (=BUFFER_TIME secs worth) to write to output bug!
+    REFILL_INTERVAL = 0.1
+
+    # amount of time (seconds) to push a packet into
+    # the player queue ahead of schedule
+    VLC_BUFFER_SIZE = 0
+    PIECE_DUE_SKEW = 0.1 + VLC_BUFFER_SIZE
+
+    # Arno: If we don't know playtime and FFMPEG gave no decent bitrate, this is the minimum
+    # bitrate (in KByte/s) that the playback birate-estimator must have to make us
+    # set the bitrate in movieselector.
+    MINPLAYBACKRATE = 32*1024
+
+    # maximum delay between pops before we force a restart (seconds)
+    MAX_POP_TIME = 60
+
+    def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc):
+
+        # dirty hack to get the Tribler Session
+        from BaseLib.Core.Session import Session
+        session = Session.get_instance()
+
+        if session.get_overlay():
+            # see comment in else section on importing...
+            from BaseLib.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler
+            self._playback_stats = VideoPlaybackDBHandler.get_instance()
+        else:
+            # hack: we should not import this since it is not part of
+            # the core nor should we import here, but otherwise we
+            # will get import errors
+            from BaseLib.Player.Reporter import VideoPlaybackReporter
+            self._playback_stats = VideoPlaybackReporter.get_instance()
+            
+        # add an event to indicate that the user wants playback to
+        # start
+        def set_nat(nat):
+            self._playback_stats.add_event(self._playback_key, "nat:%s" % nat)
+        self._playback_key = base64.b64encode(os.urandom(20))
+        self._playback_stats.add_event(self._playback_key, "play-init")
+        self._playback_stats.add_event(self._playback_key, "piece-size:%d" % videostatus.piecelen)
+        self._playback_stats.add_event(self._playback_key, "num-pieces:%d" % videostatus.movie_numpieces)
+        self._playback_stats.add_event(self._playback_key, "bitrate:%d" % videostatus.bitrate)
+        self._playback_stats.add_event(self._playback_key, "nat:%s" % session.get_nat_type(callback=set_nat))
+
+
+        self._complete = False
+        self.videoinfo = videoinfo
+        self.bt1download = bt1download
+        self.piecepicker = bt1download.picker
+        self.rawserver = bt1download.rawserver
+        self.storagewrapper = bt1download.storagewrapper
+        self.fileselector = bt1download.fileselector
+
+        self.vodeventfunc = vodeventfunc
+        self.videostatus = vs = videostatus
+        
+        # Add quotes around path, as that's what os.popen() wants on win32
+        if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1:
+            self.video_analyser_path='"'+videoanalyserpath+'"'
+        else:
+            self.video_analyser_path=videoanalyserpath
+
+        # counter for the sustainable() call. Every X calls the
+        # buffer-percentage is updated.
+        self.sustainable_counter = sys.maxint
+
+        # boudewijn: because we now update the downloadrate for each
+        # received chunk instead of each piece we do not need to
+        # average the measurement over a 'long' period of time. Also,
+        # we only update the downloadrate for pieces that are in the
+        # high priority range giving us a better estimation on how
+        # likely the pieces will be available on time.
+        self.overall_rate = Measure(10)
+        self.high_range_rate = Measure(2)
+
+        # buffer: a link to the piecepicker buffer
+        self.has = self.piecepicker.has
+
+        # number of pieces in buffer
+        self.pieces_in_buffer = 0
+
+        self.data_ready = Condition()
+        
+        # Arno: Call FFMPEG only if the torrent did not provide the 
+        # bitrate and video dimensions. This is becasue FFMPEG 
+        # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown 
+        # reasons
+        
+        # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important
+        assert vs.bitrate_set
+        self.doing_ffmpeg_analysis = False
+        self.doing_bitrate_est = False
+        self.videodim = None #self.movieselector.videodim
+
+        self.player_opened_with_width_height = False
+        self.ffmpeg_est_bitrate = None
+        
+        prebufsecs = self.PREBUF_SEC_VOD
+
+        # assumes first piece is whole (first_piecelen == piecelen)
+        piecesneeded = vs.time_to_pieces( prebufsecs )
+        bytesneeded = piecesneeded * vs.piecelen
+
+        self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded)
+
+        if self.doing_ffmpeg_analysis and DEBUG:
+            print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering"
+
+        self.nreceived = 0
+        
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Setting MIME type to",self.videoinfo['mimetype']
+        
+        self.set_mimetype(self.videoinfo['mimetype'])
+
+        # some statistics
+        self.stat_playedpieces = 0 # number of pieces played successfully
+        self.stat_latepieces = 0 # number of pieces that arrived too late
+        self.stat_droppedpieces = 0 # number of pieces dropped
+        self.stat_stalltime = 0.0 # total amount of time the video was stalled
+        self.stat_prebuffertime = 0.0 # amount of prebuffer time used
+        self.stat_pieces = PieceStats() # information about each piece
+
+        # start periodic tasks
+        self.curpiece = ""
+        self.curpiece_pos = 0
+        # The outbuf keeps only the pieces from the base layer.. We play if we 
+        # have at least a piece from the base layer!
+        self.outbuf = []
+        #self.last_pop = None # time of last pop
+        self.reset_bitrate_prediction()
+
+        self.lasttime=0
+        # For DownloadState
+        self.prebufprogress = 0.0
+        self.prebufstart = time.time()
+        self.playable = False
+        self.usernotified = False
+        
+        self.outbuflen = None
+
+        # LIVESOURCEAUTH
+        self.authenticator = None
+
+        self.refill_rawserv_tasker()
+        self.tick_second()
+
+        # link to others (last thing to do)
+        self.piecepicker.set_transporter( self )
+        #self.start()
+
+        if FAKEPLAYBACK:
+            import threading
+            
+            class FakeReader(threading.Thread):
+                def __init__(self,movie):
+                    threading.Thread.__init__(self)
+                    self.movie = movie
+                    
+                def run(self):
+                    self.movie.start()
+                    while not self.movie.done():
+                        self.movie.read()
+            
+            t = FakeReader(self)
+            t.start()
+          
+        #self.rawserver.add_task( fakereader, 0.0 )
+
+
+    def parse_video(self):
+        """ Feeds the first max_prebuf_packets to ffmpeg to determine video bitrate. """
+        vs = self.videostatus
+        width = None
+        height = None
+
+        # Start ffmpeg, let it write to a temporary file to prevent 
+        # blocking problems on Win32 when FFMPEG outputs lots of
+        # (error) messages.
+        #
+        [loghandle,logfilename] = mkstemp()
+        os.close(loghandle)
+        if sys.platform == "win32":
+            # Not "Nul:" but "nul" is /dev/null on Win32
+            sink = 'nul'
+        else:
+            sink = '/dev/null'
+        # DON'T FORGET 'b' OTHERWISE WE'RE WRITING BINARY DATA IN TEXT MODE!
+        (child_out,child_in) = os.popen2( "%s -y -i - -vcodec copy -acodec copy -f avi %s > %s 2>&1" % (self.video_analyser_path, sink, logfilename), 'b' )
+        """
+        # If the path is "C:\Program Files\bla\bla" (escaping left out) and that file does not exist
+        # the output will say something cryptic like "vod: trans: FFMPEG said C:\Program" suggesting an
+        # error with the double quotes around the command, but that's not it. Be warned!
+        cmd = self.video_analyser_path+' -y -i - -vcodec copy -acodec copy -f avi '+sink+' > '+logfilename+' 2>&1'
+        print >>sys.stderr,"vod: trans: Video analyser command is",cmd
+        (child_out,child_in) = os.popen2(cmd,'b')  # DON'T FORGET 'b' OTHERWISE THINGS GO WRONG!
+        """
+
+        # feed all the pieces
+        download_range = vs.download_range()
+        # We get the bitrate from the base layer and determine the rest based on this
+        first, last = download_range[0]
+
+        for i in xrange(first,last):
+            piece = self.get_piece( i )
+            if piece is None:
+                break
+
+            try:
+                child_out.write( piece )
+            except IOError:
+                print_exc(file=sys.stderr)
+                break
+
+        child_out.close()
+        child_in.close()
+
+        logfile = open(logfilename, 'r')
+
+        # find the bitrate in the output
+        bitrate = None
+
+        r = re.compile( "bitrate= *([0-9.]+)kbits/s" )
+        r2 = re.compile( "Video:.* ([0-9]+x[0-9]+)," )    # video dimensions WIDTHxHEIGHT
+
+        founddim = False
+        for x in logfile.readlines():
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: FFMPEG said:",x
+            occ = r.findall( x )
+            if occ:
+                # use the latest mentioning of bitrate
+                bitrate = float( occ[-1] ) * 1024 / 8
+                if DEBUG:
+                    if bitrate is not None:
+                        print >>sys.stderr,"vod: trans: Bitrate according to FFMPEG: %.2f KByte/s" % (bitrate/1024)
+                    else:
+                        print >>sys.stderr,"vod: trans: Bitrate could not be determined by FFMPEG"
+            occ = r2.findall( x )
+            if occ and not founddim:
+                # use first occurence
+                dim = occ[0]
+                idx = dim.find('x')
+                width = int(dim[:idx])
+                height = int(dim[idx+1:])
+                founddim = True
+                
+                if DEBUG:
+                    print >>sys.stderr,"vod: width",width,"heigth",height
+        logfile.close()
+        try:
+            os.remove(logfilename)
+        except:
+            pass
+
+        return [bitrate,width,height]
+
+    def update_prebuffering(self,received_piece=None):
+        """ Update prebuffering process. 'received_piece' is a hint that we just received this piece;
+            keep at 'None' for an update in general. """
+
+        if DEBUG: print >>sys.stderr, "vod: Updating prebuffer. Received piece: ", received_piece
+        vs = self.videostatus
+
+        if not vs.prebuffering:
+            return
+
+        if received_piece:
+            self.nreceived += 1
+        
+        # for the prebuffer we keep track only of the base layer
+        high_range = vs.generate_base_high_range()
+        high_range_length = vs.get_base_high_range_length()
+
+        # Arno, 2010-01-13: This code is only used when *pre*buffering, not
+        # for in-playback buffering. See refill_buffer() for that.
+        # Restored original code here that looks at max_prebuf_packets
+        # and not highrange. The highrange solution didn't allow the prebuf
+        # time to be varied independently of highrange width. 
+        #
+        wantprebuflen = min(self.max_prebuf_packets,high_range_length)
+        high_range_list = list(high_range)
+        wantprebuflist = high_range_list[:wantprebuflen]
+        
+        missing_pieces = filter(lambda i: not self.have_piece(i), wantprebuflist)
+        gotall = not missing_pieces
+        if high_range_length:
+            self.prebufprogress = min(1, float(wantprebuflen - len(missing_pieces)) / max(1, wantprebuflen))
+        else:
+            self.prebufprogress = 1.0
+        
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Already got",(self.prebufprogress*100.0),"% of prebuffer"
+        
+        if not gotall and DEBUG:
+            print >>sys.stderr,"vod: trans: Still need pieces",missing_pieces,"for prebuffering/FFMPEG analysis"
+
+        if vs.dropping:
+            if not self.doing_ffmpeg_analysis and not gotall and not (0 in missing_pieces) and self.nreceived > self.max_prebuf_packets:
+                perc = float(self.max_prebuf_packets)/10.0
+                if float(len(missing_pieces)) < perc or self.nreceived > (2*len(missing_pieces)):
+                    # If less then 10% of packets missing, or we got 2 times the packets we need already,
+                    # force start of playback
+                    gotall = True
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: Forcing stop of prebuffering, less than",perc,"missing, or got 2N packets already"
+
+        if gotall and self.doing_ffmpeg_analysis:
+
+            [bitrate,width,height] = self.parse_video()
+            self.doing_ffmpeg_analysis = False
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: after parse",bitrate,self.doing_bitrate_est
+            if bitrate is None or round(bitrate)== 0:
+                if self.doing_bitrate_est:
+                    # Errr... there was no playtime info in the torrent
+                    # and FFMPEG can't tell us...
+                    #bitrate = (1*1024*1024/8) # 1mbps
+                    # Ric: in svc every piece should be 2,56 sec.
+                    bitrate = vs.piecelen / 2.56
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: No bitrate info avail, wild guess: %.2f KByte/s" % (bitrate/1024)
+
+                    vs.set_bitrate(bitrate)
+                    self._playback_stats.add_event(self._playback_key, "bitrate-guess:%d" % bitrate)
+            else:
+                if self.doing_bitrate_est:
+                    # There was no playtime info in torrent, use what FFMPEG tells us
+                    self.ffmpeg_est_bitrate = bitrate
+                    bitrate *= 1.1  # Make FFMPEG estimation 10% higher
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: Estimated bitrate: %.2f KByte/s" % (bitrate/1024)
+
+                    vs.set_bitrate(bitrate)
+                    self._playback_stats.add_event(self._playback_key, "bitrate-ffmpeg:%d" % bitrate)
+
+            if width is not None and height is not None:
+                diff = False
+                if self.videodim is None:
+                    self.videodim = (width,height)
+                    self.height = height
+                elif self.videodim[0] != width or self.videodim[1] != height:
+                    diff =  True
+                if not self.player_opened_with_width_height or diff:
+                    #self.user_setsize(self.videodim)
+                    pass
+
+        # # 10/03/09 boudewijn: For VOD we will wait for the entire
+        # # buffer to fill (gotall) before we start playback. For live
+        # # this is unlikely to happen and we will therefore only wait
+        # # until we estimate that we have enough_buffer.
+        # if (gotall or vs.live_streaming) and self.enough_buffer():
+        if gotall and self.enough_buffer():
+            # enough buffer and could estimated bitrate - start streaming
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: Prebuffering done",currentThread().getName()
+            self.data_ready.acquire()
+            vs.prebuffering = False
+            self.stat_prebuffertime = time.time() - self.prebufstart
+            self.notify_playable()
+            self.data_ready.notify()
+            self.data_ready.release()
+
+        elif DEBUG:
+            if self.doing_ffmpeg_analysis:
+                print >>sys.stderr,"vod: trans: Prebuffering: waiting to obtain the first %d packets" % (self.max_prebuf_packets)
+            else:
+                print >>sys.stderr,"vod: trans: Prebuffering: %.2f seconds left" % (self.expected_buffering_time())
+        
+        
+    def got_have(self,piece):
+        vs = self.videostatus
+
+        # update stats
+        self.stat_pieces.set( piece, "known", time.time() )
+        """
+        if vs.playing and vs.wraparound:
+            # check whether we've slipped back too far
+            d = vs.wraparound_delta
+            n = max(1,self.piecepicker.num_nonempty_neighbours()/2)
+            if self.piecepicker.numhaves[piece] > n and d/2 < (piece - vs.playback_pos) % vs.movie_numpieces < d:
+                # have is confirmed by more than half of the neighours and is in second half of future window
+                print >>sys.stderr,"vod: trans: Forcing restart. Am at playback position %d but saw %d at %d>%d peers." % (vs.playback_pos,piece,self.piecepicker.numhaves[piece],n)
+
+                self.start(force=True)
+        """
+
+    def got_piece(self, piece_id, begin, length):
+        """
+        Called when a chunk has been downloaded. This information can
+        be used to estimate download speed.
+        """
+        if self.videostatus.in_high_range(piece_id):
+            self.high_range_rate.update_rate(length)
+            if DEBUG: print >>sys.stderr, "vod: high priority rate:", self.high_range_rate.get_rate()
+    
+    def complete(self,piece,downloaded=True):
+        """ Called when a movie piece has been downloaded or was available from the start (disk). """
+
+        vs = self.videostatus
+        if vs.in_high_range(piece):
+            self._playback_stats.add_event(self._playback_key, "hipiece:%d" % piece)
+        else:
+            self._playback_stats.add_event(self._playback_key, "piece:%d" % piece)
+
+        if not self._complete and self.piecepicker.am_I_complete():
+            self._complete = True
+            self._playback_stats.add_event(self._playback_key, "complete")
+            self._playback_stats.flush()
+
+        self.stat_pieces.set( piece, "complete", time.time() )
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Completed",piece
+
+        if downloaded: 
+            self.overall_rate.update_rate( vs.piecelen )
+        if vs.in_download_range( piece ):
+            self.pieces_in_buffer += 1
+        else:
+            if DEBUG:
+                print >>sys.stderr,"vod: piece %d too late [pos=%d]" % (piece,vs.playback_pos)
+            self.stat_latepieces += 1
+
+        if vs.playing and vs.playback_pos == piece:
+            # we were delaying for this piece
+            self.refill_buffer()
+
+        self.update_prebuffering( piece )
+
+    def set_pos(self,pos):
+        """ Update the playback position. Called when playback is started (depending
+        on requested offset). """
+
+        vs = self.videostatus
+
+        oldpos = vs.playback_pos
+        vs.playback_pos = pos
+
+        # fast forward
+        for i in xrange(oldpos,pos+1):
+            if self.has[i]:
+                self.pieces_in_buffer -= 1
+
+        # fast rewind
+        for i in xrange(pos,oldpos+1):
+            if self.has[i]:
+                self.pieces_in_buffer += 1
+
+    def inc_pos(self):
+        vs = self.videostatus
+
+        if self.has[vs.playback_pos]:
+            self.pieces_in_buffer -= 1
+
+        vs.inc_playback_pos()
+        
+    
+    def expected_download_time(self):
+        """ Expected download time left. """
+        vs = self.videostatus
+        if vs.wraparound:
+            return float(2 ** 31)
+
+        # Ric: TODO for the moment keep track only of the base layer. Afterwards we will send
+        # different signals depending on the buffer layer
+        pieces_left = vs.last_piece - vs.playback_pos - self.pieces_in_buffer
+        if pieces_left <= 0:
+            return 0.0
+
+        # list all pieces from the high priority set that have not
+        # been completed
+        uncompleted_pieces = filter(self.storagewrapper.do_I_have, vs.generate_high_range())
+
+        # when all pieces in the high-range have been downloaded,
+        # we have an expected download time of zero
+        if not uncompleted_pieces:
+            return 0.0
+
+        # the download time estimator is very inacurate when we only
+        # have a few chunks left. therefore, we will put more emphesis
+        # on the overall_rate as the number of uncompleted_pieces does
+        # down.
+        total_length = vs.get_high_range_length()
+        uncompleted_length = len(uncompleted_pieces)
+        expected_download_speed = self.high_range_rate.get_rate() * (1 - float(uncompleted_length) / total_length) + \
+                                  self.overall_rate.get_rate() * uncompleted_length / total_length
+        if expected_download_speed < 0.1:
+            return float(2 ** 31)
+
+        return pieces_left * vs.piecelen / expected_download_speed
+
+    def expected_playback_time(self):
+        """ Expected playback time left. """
+
+        vs = self.videostatus
+
+        pieces_to_play = vs.last_piece - vs.playback_pos + 1
+
+        if pieces_to_play <= 0:
+            return 0.0
+
+        if not vs.bitrate:
+            return float(2 ** 31)
+
+        return pieces_to_play * vs.piecelen / vs.bitrate
+
+    def expected_buffering_time(self):
+        """ Expected time required for buffering. """
+        download_time = self.expected_download_time()
+        playback_time = self.expected_playback_time()
+        #print >>sys.stderr,"EXPECT",self.expected_download_time(),self.expected_playback_time()
+        # Infinite minus infinite is still infinite
+        if download_time > float(2 ** 30) and playback_time > float(2 ** 30):
+            return float(2 ** 31)
+        return abs(download_time - playback_time)
+
+    def enough_buffer(self):
+        """ Returns True if we can safely start playback without expecting to run out of
+            buffer. """
+        return max(0.0, self.expected_download_time() - self.expected_playback_time()) == 0.0
+
+    def tick_second(self):
+        self.rawserver.add_task( self.tick_second, 1.0 )
+
+        vs = self.videostatus
+
+        # Adjust estimate every second, but don't display every second
+        display = False # (int(time.time()) % 5) == 0
+        if DEBUG: # display
+            print >>sys.stderr,"vod: Estimated download time: %5.1fs [priority: %7.2f Kbyte/s] [overall: %7.2f Kbyte/s]" % (self.expected_download_time(), self.high_range_rate.get_rate()/1024, self.overall_rate.get_rate()/1024)
+
+        if vs.playing and round(self.playbackrate.rate) > self.MINPLAYBACKRATE and not vs.prebuffering:
+            if self.doing_bitrate_est:
+                if display:
+                    print >>sys.stderr,"vod: Estimated playback time: %5.0fs [%7.2f Kbyte/s], doing estimate=%d" % (self.expected_playback_time(),self.playbackrate.rate/1024, self.ffmpeg_est_bitrate is None)
+                if self.ffmpeg_est_bitrate is None:
+                    vs.set_bitrate( self.playbackrate.rate )
+
+        if display:
+            sys.stderr.flush()
+
+    #
+    # MovieTransport interface
+    #
+    # WARNING: these methods will be called by other threads than NetworkThread!
+    #
+    def size( self ):
+        # Ric: returning the size of the base layer
+        return self.videostatus.selected_movie[0]["size"]
+
+    def read(self,numbytes=None):
+        """ Read a set of pieces. The return data will be 
+            a byte for the pieces presence and a set of pieces
+            depending on the available quality. Return None in
+            case of an error or end-of-stream. """
+        vs = self.videostatus
+        # keep track in the base layer
+        if not self.curpiece:
+            # curpiece_pos could be set to something other than 0! 
+            # for instance, a seek request sets curpiece_pos but does not
+            # set curpiece.
+
+            base_layer_piece = self.pop()
+            if base_layer_piece is None:
+                return None
+
+            piecenr,self.curpiece = base_layer_piece
+            relatives = vs.get_respective_piece(piecenr)
+                        
+            
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: popped piece %d to transport to player," % piecenr, "relative pieces are", relatives
+
+
+        curpos = self.curpiece_pos
+        left = len(self.curpiece) - curpos
+
+
+        if numbytes is None:
+            # default on one piece per read
+            numbytes = left
+
+        # TODO ask, we could leave it like this
+        if left > numbytes:
+            # piece contains enough -- return what was requested
+            data = self.curpiece[curpos:curpos+numbytes]
+            self.curpiece_pos += numbytes
+        else:
+            # TODO add get_bitrate method in SVC status to see how many 
+            # pieces we need from the different layers!
+
+            header = str(vs.piecelen)
+            data = header            
+            # return remainder of the piece, could be less than numbytes
+            data += self.curpiece[curpos:]
+
+            for i in relatives:
+                if self.has[i]:
+                    if DEBUG: print>>sys.stderr, "vod: trans: filling stream with piece %d from an enhancement layer" % i
+                    data += self.get_piece(i)
+            #print>>sys.stderr, "vod: trans: filling stream with piece %d from an enhancement layer" % i, len(data)
+            self.curpiece = ""
+            self.curpiece_pos = 0
+
+        return data
+
+    def start( self, bytepos = 0, force = False ):
+        """ Initialise to start playing at position `bytepos'. """
+        self._playback_stats.add_event(self._playback_key, "play")
+
+        # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we 
+        # should. Also see if we need the read numbytes here, or that it
+        # is better handled at a higher layer. For live it is currently
+        # done at a higher level, see VariableReadAuthStreamWrapper because
+        # we have to strip the signature. Hence the self.curpiece buffer here
+        # is superfluous. Get rid off it or check if 
+        #
+        #    curpiece[0:piecelen]
+        #
+        # returns curpiece if piecelen has length piecelen == optimize for
+        # piecesized case.
+        #
+        # For VOD seeking we may use the numbytes facility to seek to byte offsets
+        # not just piece offsets.
+        #
+        vs = self.videostatus
+
+        if vs.playing and not force:
+            return
+
+        # lock before changing startpos or any other playing variable
+        self.data_ready.acquire()
+        try:
+            # Determine piece number and offset
+            if bytepos < vs.piecelen:
+                piece = vs.first_piece
+                offset = bytepos
+            else:
+                newbytepos = bytepos - vs.first_piecelen
+
+                piece  = vs.first_piece + newbytepos / vs.piecelen + 1
+                offset = newbytepos % vs.piecelen
+
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: === START, START, START, START, START, START, START, START, START, START, START, START, START,START"
+                print >>sys.stderr,"vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force)
+
+            # Initialise all playing variables
+            self.curpiece = "" # piece currently being popped
+            self.curpiece_pos = offset
+            # TODO
+            self.set_pos( piece )
+            self.outbuf = []
+            #self.last_pop = time.time()
+            self.reset_bitrate_prediction()
+            vs.playing = True
+            self.playbackrate = Measure( 60 )
+        finally:
+            self.data_ready.release()
+
+        # ARNOTODO: start is called by non-NetworkThreads, these following methods
+        # are usually called by NetworkThread.
+        #
+        # We now know that this won't be called until notify_playable() so
+        # perhaps this can be removed?
+        #
+        # CAREFUL: if we use start() for seeking... that's OK. User won't be
+        # able to seek before he got his hands on the stream, so after 
+        # notify_playable()
+        
+        # See what we can do right now
+        self.update_prebuffering()
+        self.refill_buffer()
+
+    def stop( self ):
+        """ Playback is stopped. """
+        self._playback_stats.add_event(self._playback_key, "stop")
+
+        vs = self.videostatus
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: === STOP  = player closed conn === "
+        if not vs.playing:
+            return
+        vs.playing = False
+
+        # clear buffer and notify possible readers
+        self.data_ready.acquire()
+        self.outbuf = []
+        #self.last_pop = None
+        vs.prebuffering = False
+        self.data_ready.notify()
+        self.data_ready.release()
+
+    def pause( self, autoresume = False ):
+        """ Pause playback. If `autoresume' is set, playback is expected to be
+        resumed automatically once enough data has arrived. """
+        self._playback_stats.add_event(self._playback_key, "pause")
+
+        vs = self.videostatus
+        if not vs.playing or not vs.pausable:
+            return
+
+        if vs.paused:
+            vs.autoresume = autoresume
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: paused (autoresume: %s)" % (autoresume,)
+
+        vs.paused = True
+        vs.autoresume = autoresume
+        self.paused_at = time.time()
+        #self.reset_bitrate_prediction()
+        self.videoinfo["usercallback"](VODEVENT_PAUSE,{ "autoresume": autoresume })
+
+    def resume( self ):
+        """ Resume paused playback. """
+        self._playback_stats.add_event(self._playback_key, "resume")
+
+        vs = self.videostatus
+
+        if not vs.playing or not vs.paused or not vs.pausable:
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: resumed"
+
+        vs.paused = False
+        vs.autoresume = False
+        self.stat_stalltime += time.time() - self.paused_at
+        self.addtime_bitrate_prediction( time.time() - self.paused_at )
+        self.videoinfo["usercallback"](VODEVENT_RESUME,{})
+
+        self.update_prebuffering()
+        self.refill_buffer()
+
+    def autoresume( self, testfunc = lambda: True ):
+        """ Resumes if testfunc returns True. If not, will test every second. """
+
+        vs = self.videostatus
+
+        if not vs.playing or not vs.paused or not vs.autoresume:
+            return
+
+        if not testfunc():
+            self.rawserver.add_task( lambda: self.autoresume( testfunc ), 1.0 )
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Resuming, since we can maintain this playback position"
+        self.resume()
+
+    def done( self ):
+        vs = self.videostatus
+
+        if not vs.playing:
+            return True
+
+        if vs.wraparound:
+            return False
+
+        return vs.playback_pos == vs.last_piece+1 and self.curpiece_pos >= len(self.curpiece)
+
+    def seek(self,pos,whence=os.SEEK_SET):
+        """ Seek to the given position, a number in bytes relative to both
+        the "whence" reference point and the file being played.
+        
+        We currently actually seek at byte level, via the start() method.
+        We support all forms of seeking, including seeking past the current
+        playback pos. Note this may imply needing to prebuffer again or 
+        being paused.
+        
+        vs.playback_pos in NetworkThread domain. Does data_ready lock cover 
+        that? Nope. However, this doesn't appear to be respected in any
+        of the MovieTransport methods, check all.
+        
+        Check
+        * When seeking reset other buffering, e.g. read()'s self.curpiece
+           and higher layers.
+        
+        """
+        vs = self.videostatus
+        length = self.size()
+
+        # lock before changing startpos or any other playing variable
+        self.data_ready.acquire()
+        try:
+            if whence == os.SEEK_SET:
+                abspos = pos
+            elif whence == os.SEEK_END:
+                if pos > 0:
+                    raise ValueError("seeking beyond end of stream")
+                else:
+                    abspos = size+pos
+            else: # SEEK_CUR
+                raise ValueError("seeking does not currently support SEEK_CUR")
+            
+            self.stop()
+            self.start(pos)
+        finally:
+            self.data_ready.release()
+
+
+
+    def get_mimetype(self):
+        return self.mimetype
+
+    def set_mimetype(self,mimetype):
+        self.mimetype = mimetype
+    #
+    # End of MovieTransport interface
+    #
+
+    def have_piece(self,piece):
+        return self.piecepicker.has[piece]
+
+    def get_piece(self,piece):
+        """ Returns the data of a certain piece, or None. """
+
+        vs = self.videostatus
+
+        if not self.have_piece( piece ):
+            return None
+
+        begin = 0
+        length = vs.piecelen
+
+        data = self.storagewrapper.do_get_piece(piece, 0, length)
+        if data is None:
+            return None
+        return data.tostring()
+
+    def reset_bitrate_prediction(self):
+        self.start_playback = None
+        self.last_playback = None
+        self.history_playback = collections.deque()
+
+    def addtime_bitrate_prediction(self,seconds):
+        if self.start_playback is not None:
+            self.start_playback["local_ts"] += seconds
+
+    def valid_piece_data(self,i,piece):
+        if not piece:
+            return False
+
+        if not self.start_playback or self.authenticator is None:
+            # no check possible
+            return True
+
+        s = self.start_playback
+
+        seqnum = self.authenticator.get_seqnum( piece )
+        source_ts = self.authenticator.get_rtstamp( piece )
+
+        if seqnum < s["absnr"] or source_ts < s["source_ts"]:
+            # old packet???
+            print >>sys.stderr,"vod: trans: **** INVALID PIECE #%s **** seqnum=%d but we started at seqnum=%d" % (i,seqnum,s["absnr"])
+            return False
+
+        return True
+
+
+    def update_bitrate_prediction(self,i,piece):
+        """ Update the rate prediction given that piece i has just been pushed to the buffer. """
+
+        if self.authenticator is not None:
+            seqnum = self.authenticator.get_seqnum( piece )
+            source_ts = self.authenticator.get_rtstamp( piece )
+        else:
+            seqnum = i
+            source_ts = 0
+
+        d = {
+            "nr": i,
+            "absnr": seqnum,
+            "local_ts": time.time(),
+            "source_ts": source_ts,
+        }
+
+        # record 
+        if self.start_playback is None:
+            self.start_playback = d
+
+        if self.last_playback and self.last_playback["absnr"] > d["absnr"]:
+            # called out of order
+            return
+
+        self.last_playback = d
+
+        # keep a recent history
+        MAX_HIST_LEN = 10*60 # seconds
+
+        self.history_playback.append( d )
+
+        # of at most 10 entries (or minutes if we keep receiving pieces)
+        while source_ts - self.history_playback[0]["source_ts"] > MAX_HIST_LEN:
+            self.history_playback.popleft()
+
+        if DEBUG:
+            vs = self.videostatus
+            first, last = self.history_playback[0], self.history_playback[-1]
+            
+            if first["source_ts"] and first != last:
+                divd = (last["source_ts"] - first["source_ts"])
+                if divd == 0:
+                    divd = 0.000001
+                bitrate = "%.2f kbps" % (8.0 / 1024 * (vs.piecelen - vs.sigsize) * (last["absnr"] - first["absnr"]) / divd,)
+            else:
+                bitrate = "%.2f kbps (external info)" % (8.0 / 1024 * vs.bitrate)
+
+            print >>sys.stderr,"vod: trans: %i: pushed at t=%.2f, age is t=%.2f, bitrate = %s" % (i,d["local_ts"]-self.start_playback["local_ts"],d["source_ts"]-self.start_playback["source_ts"],bitrate)
+
+    def piece_due(self,i):
+        """ Return the time when we expect to have to send a certain piece to the player. For
+        wraparound, future pieces are assumed. """
+
+        if self.start_playback is None:
+            return float(2 ** 31) # end of time
+
+        s = self.start_playback
+        l = self.last_playback
+        vs = self.videostatus
+
+        if not vs.wraparound and i < l["nr"]:
+            # should already have arrived!
+            return time.time()
+
+        # assume at most one wrap-around between l and i
+        piecedist = (i - l["nr"]) % vs.movie_numpieces
+
+        if s["source_ts"]:
+            # ----- we have timing information from the source
+            first, last = self.history_playback[0], self.history_playback[-1]
+
+            if first != last:
+                # we have at least two recent pieces, so can calculate average bitrate. use the recent history
+                # do *not* adjust for sigsize since we don't want the actual video speed but the piece rate
+                bitrate = 1.0 * vs.piecelen * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"])
+            else:
+                # fall-back to bitrate predicted from torrent / ffmpeg
+                bitrate = vs.bitrate
+           
+            # extrapolate with the average bitrate so far
+            return s["local_ts"] + l["source_ts"] - s["source_ts"] + piecedist * vs.piecelen / bitrate - self.PIECE_DUE_SKEW
+        else:
+            # ----- no timing information from pieces, so do old-fashioned methods
+            if vs.live_streaming:
+                # Arno, 2008-11-20: old-fashioned method is well bad,
+                # ignore.
+                return time.time() + 60.0
+            else:
+                i =  piecedist + (l["absnr"] - s["absnr"])
+                
+                if s["nr"] == vs.first_piece:
+                    bytepos = vs.first_piecelen + (i-1) * vs.piecelen
+                else:
+                    bytepos = i * vs.piecelen
+                
+                return s["local_ts"] + bytepos / vs.bitrate - self.PIECE_DUE_SKEW
+            
+
+    def max_buffer_size( self ):
+        vs = self.videostatus
+        return max(256*1024, vs.piecelen * 4, self.BUFFER_TIME * vs.bitrate)
+
+
+    def refill_buffer( self ):
+        """ Push pieces (from the base layer) into the player FIFO when needed and able. 
+            This counts as playing the pieces as far as playback_pos is concerned."""
+
+        self.data_ready.acquire()
+
+        vs = self.videostatus
+
+        if vs.prebuffering or not vs.playing:
+            self.data_ready.release()
+            return
+
+        if vs.paused:
+            self.data_ready.release()
+            return
+
+        mx = self.max_buffer_size()
+        self.outbuflen = sum( [len(d) for (p,d) in self.outbuf] )
+        now = time.time()
+
+        def buffer_underrun():
+            return self.outbuflen == 0 and self.start_playback and now - self.start_playback["local_ts"] > 1.0
+
+        if buffer_underrun():
+            # TODO
+            def sustainable():
+
+                self.sustainable_counter += 1
+                if self.sustainable_counter > 10:
+                    self.sustainable_counter = 0
+                    
+                    base_high_range_length = vs.get_base_high_range_length()
+                    have_length = len(filter(lambda n:self.has[n], vs.generate_base_high_range()))
+
+                    # progress                                                                                  
+                    self.prebufprogress = min(1.0, float(have_length) / max(1, base_high_range_length))
+
+                    return have_length >= base_high_range_length
+
+                else:
+                    num_immediate_packets = 0
+                    base_high_range_length = vs.get_base_high_range_length()
+
+                    for piece in vs.generate_base_high_range(): 
+                        if self.has[piece]:
+                            num_immediate_packets += 1
+                            if num_immediate_packets >= base_high_range_length:
+                                break
+                        else:
+                            break
+                    else:
+                        # progress                                                                              
+                        self.prebufprogress = 1.0
+                        # completed loop without breaking, so we have everything we need                        
+                        return True
+
+                    return num_immediate_packets >= base_high_range_length
+
+            sus = sustainable()
+            if vs.pausable and not sus:
+                if DEBUG:
+                    print >>sys.stderr,"vod: trans:                        BUFFER UNDERRUN -- PAUSING"
+                self.pause( autoresume = True )
+                self.autoresume( sustainable )
+
+                # boudewijn: increase the minimum buffer size
+                vs.increase_high_range()
+
+                self.data_ready.release()
+                return
+            elif sus:
+                if DEBUG:
+                    print >>sys.stderr,"vod: trans:                        BUFFER UNDERRUN -- IGNORING, rate is sustainable"
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"vod: trans:                         BUFFER UNDERRUN -- STALLING, cannot pause player to fall back some, so just wait for more pieces"
+                self.data_ready.release()
+                return
+                    
+        def push( i, data ):
+            # push packet into queue
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: %d: pushed l=%d" % (vs.playback_pos,piece)
+
+            # update predictions based on this piece
+            self.update_bitrate_prediction( i, data )
+
+            self.stat_playedpieces += 1
+            self.stat_pieces.set( i, "tobuffer", time.time() )
+                    
+            self.outbuf.append( (vs.playback_pos,data) )
+            self.outbuflen += len(data)
+
+            self.data_ready.notify()
+            self.inc_pos()
+
+        def drop( i ):
+            # drop packet
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: %d: dropped pos=%d; deadline expired %.2f sec ago !!!!!!!!!!!!!!!!!!!!!!" % (piece,vs.playback_pos,time.time()-self.piece_due(i))
+
+            self.stat_droppedpieces += 1
+            self.stat_pieces.complete( i )
+            self.inc_pos()
+
+        # We push in queue only pieces from the base layer 
+        download_range = vs.download_range()
+        base_range = download_range[0]
+        for piece in vs.generate_range( [base_range] ): 
+            ihavepiece = self.has[piece]
+            forcedrop = False
+
+            # check whether we have room to store it
+            if self.outbuflen > mx:
+                # buffer full
+                break
+
+            # final check for piece validity
+            if ihavepiece:
+                data = self.get_piece( piece )
+                if not self.valid_piece_data( piece, data ):
+                    # I should have the piece, but I don't: WAAAAHH!
+                    forcedrop = True
+                    ihavepiece = False
+
+            if ihavepiece:
+                # have piece - push it into buffer
+                if DEBUG:
+                    print >>sys.stderr,"vod: trans:                        BUFFER STATUS (max %.0f): %.0f kbyte" % (mx/1024.0,self.outbuflen/1024.0)
+
+                # piece found -- add it to the queue
+                push( piece, data )
+            else:
+                # don't have piece, or forced to drop
+                if not vs.dropping and forcedrop:
+                    print >>sys.stderr,"vod: trans: DROPPING INVALID PIECE #%s, even though we shouldn't drop anything." % piece
+                if vs.dropping or forcedrop:
+                    if time.time() >= self.piece_due( piece ) or buffer_underrun() or forcedrop:
+                        # piece is too late or we have an empty buffer (and future data to play, otherwise we would have paused) -- drop packet
+                        drop( piece )
+                    else:
+                        # we have time to wait for the piece and still have data in our buffer -- wait for packet
+                        if DEBUG:
+                            print >>sys.stderr,"vod: trans: %d: due in %.2fs  pos=%d" % (piece,self.piece_due(piece)-time.time(),vs.playback_pos)
+                    break
+                else: # not dropping
+                    if self.outbuflen == 0:
+                        print >>sys.stderr,"vod: trans: SHOULD NOT HAPPEN: missing piece but not dropping. should have paused. pausable=",vs.pausable,"player reading too fast looking for I-Frame?"
+                    else:
+                        if DEBUG:
+                            print >>sys.stderr,"vod: trans: prebuffering done, but could not fill buffer."
+                    break
+
+        self.data_ready.release()
+
+    def refill_rawserv_tasker( self ):
+        self.refill_buffer()
+
+        self.rawserver.add_task( self.refill_rawserv_tasker, self.REFILL_INTERVAL )
+
+    def pop( self ):
+        self.data_ready.acquire()
+        vs = self.videostatus
+
+        while vs.prebuffering and not self.done():
+            # wait until done prebuffering
+            self.data_ready.wait()
+
+        while not self.outbuf and not self.done():
+            # wait until a piece is available
+            #if DEBUG:
+            #    print >>sys.stderr,"vod: trans: Player waiting for data"
+            self.data_ready.wait()
+
+        if not self.outbuf:
+            piece = None
+        else:
+            piece = self.outbuf.pop( 0 ) # nr,data pair
+            self.playbackrate.update_rate( len(piece[1]) )
+
+        #self.last_pop = time.time()
+
+        self.data_ready.release()
+
+        if piece:
+            self.stat_pieces.set( piece[0], "toplayer", time.time() )
+            self.stat_pieces.complete( piece[0] )
+
+        return piece
+
+    def notify_playable(self):
+        """ Tell user he can play the media, 
+        cf. BaseLib.Core.DownloadConfig.set_vod_event_callback()
+        """
+        #if self.bufferinfo:
+        #    self.bufferinfo.set_playable()
+        #self.progressinf.bufferinfo_updated_callback()
+
+        # triblerAPI
+        if self.usernotified:
+            return
+        self.usernotified = True
+        self.prebufprogress = 1.0
+        self.playable = True
+        
+        #print >>sys.stderr,"vod: trans: notify_playable: Calling usercallback to tell it we're ready to play",self.videoinfo['usercallback']
+        
+        # MIME type determined normally in LaunchManyCore.network_vod_event_callback
+        # However, allow for recognition by videoanalyser
+        mimetype = self.get_mimetype()
+        complete = self.piecepicker.am_I_complete()
+
+        if complete:
+            stream = None
+            filename = self.videoinfo["outpath"]
+        else:
+            endstream = MovieTransportStreamWrapper(self)
+            filename = None 
+            
+        print >>sys.stderr,"3.3", self.size(), endstream, self.vodeventfunc, complete, self.size()
+        # Call user callback
+        #print >>sys.stderr,"vod: trans: notify_playable: calling:",self.vodeventfunc
+        self.vodeventfunc( self.videoinfo, VODEVENT_START, {
+            "complete":  complete,
+            "filename":  filename,
+            "mimetype":  mimetype,
+            "stream":    endstream,
+            "length":      self.size(),
+        } )
+
+
+    #
+    # Methods for DownloadState to extract status info of VOD mode.
+    #
+    def get_stats(self):
+        """ Returns accumulated statistics. The piece data is cleared after this call to save memory. """
+        """ Called by network thread """
+        s = { "played": self.stat_playedpieces,
+              "late": self.stat_latepieces,
+              "dropped": self.stat_droppedpieces,
+              "stall": self.stat_stalltime,
+              "pos": self.videostatus.playback_pos,
+              "prebuf": self.stat_prebuffertime,
+              "pp": self.piecepicker.stats,
+              "pieces": self.stat_pieces.pop_completed(), }
+        return s
+
+    def get_prebuffering_progress(self):
+        """ Called by network thread """
+        return self.prebufprogress
+    
+    def is_playable(self):
+        """ Called by network thread """
+        if not self.playable or self.videostatus.prebuffering:
+            self.playable = (self.prebufprogress == 1.0 and self.enough_buffer())
+        return self.playable
+        
+    def get_playable_after(self):
+        """ Called by network thread """
+        return self.expected_buffering_time()
+    
+    def get_duration(self):
+        return 1.0 * self.videostatus.selected_movie[0]["size"] / self.videostatus.bitrate
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Core/Video/SVCVideoStatus.py b/instrumentation/next-share/BaseLib/Core/Video/SVCVideoStatus.py
new file mode 100644 (file)
index 0000000..4872271
--- /dev/null
@@ -0,0 +1,427 @@
+# Written by Jan David Mol, Arno Bakker, Riccardo Petrocco
+# see LICENSE.txt for license information
+
+import sys
+from math import ceil
+from sets import Set
+
+from BaseLib.Core.simpledefs import *
+
+# live streaming means wrapping around
+LIVE_WRAPAROUND = False
+
+DEBUG = False
+
+class SVCVideoStatus:
+    """ Info about the selected video and status of the playback. """
+
+    # TODO: thread safety? PiecePicker, MovieSelector and MovieOnDemandTransporter all interface this
+
+    def __init__(self,piecelen,fileinfo,videoinfo,authparams):
+        """
+            piecelen = length of BitTorrent pieces
+            fileinfo = list of (name,length) pairs for all files in the torrent,
+                       in their recorded order
+            videoinfo = videoinfo object from download engine
+        """
+        self.piecelen = piecelen # including signature, if any
+        self.sigsize = 0
+        self.fileinfo = fileinfo
+        self.videoinfo = videoinfo
+        self.authparams = authparams
+        self.selected_movie = []
+
+        # size of high probability set, in seconds (piecepicker varies
+        # between the minmax values depending on network performance,
+        # performance, increases and decreases with step (min,max,step)
+        self.high_prob_curr_time = 10
+        self.high_prob_curr_time_limit = (10, 180,10)
+
+        # minimal size of high probability set, in pieces (piecepicker
+        # varies between the limit values depending on network
+        # performance, increases and decreases with step (min,max,step)
+        self.high_prob_curr_pieces = 5
+        self.high_prob_curr_pieces_limit = (5, 50, 5)
+
+        # Ric: keeps track of the current layer
+        self.quality = 0
+
+        # ----- locate selected movie in fileinfo
+        indexes = self.videoinfo['index']
+        
+        # the available layers in the torrent
+        self.available_qualities = len(indexes)
+        
+        if DEBUG: print >>sys.stderr, "VideoStatus: indexes of ordered layer [base, enhance1, enhance2,....] in the torrent: ", indexes
+        # Ric: first index is the base layer
+        index = indexes[0]
+
+        base_offset = sum( (filesize for (_,filesize) in fileinfo[:index] if filesize) )
+        base_name = fileinfo[index][0]
+        base_size = fileinfo[index][1]
+        
+        # Ric: ordered list of info about the layers
+        self.selected_movie = []
+        
+
+        #enhancementIdx = indexes[1::]
+        #print >>sys.stderr, "enhancementIdx", enhancementIdx
+
+        for idx in indexes:
+            #field = "enhancement" + str(enhancementIdx.index(idx))
+            name = fileinfo[idx][0]
+            size = fileinfo[idx][1]
+            offset = sum( (filesize for (_,filesize) in fileinfo[:idx] if filesize) )
+            self.selected_movie.append( {"name": name, "size": size, "offset": offset} )
+
+        print >> sys.stderr, self.selected_movie
+        
+        self.playback_pos_observers = []
+        # da rimuovere serve a video on demand
+        self.live_streaming = videoinfo['live']
+
+        self.first_piecelen = 0
+        self.last_piecelen = 0
+
+        
+        # Ric: derive generic layers parameters
+        # TODO check if we can assume piece bounderies
+        self.layer_info = []
+        for layer in self.selected_movie:
+            movie_begin = layer["offset"]
+            movie_end = layer["offset"] + layer["size"] - 1
+
+            # movie_range = (bpiece,offset),(epiece,offset), inclusive
+            movie_range = ( (movie_begin/piecelen, movie_begin%piecelen),
+                                 (movie_end/piecelen, movie_end%piecelen) )
+            # first_piecelen = piecelen - movie_range[0][1]
+            # last_piecelen  = movie_range[1][1]
+            first_piece = movie_range[0][0]
+            last_piece = movie_range[1][0]
+            movie_numpieces = last_piece - first_piece + 1
+            self.layer_info.append( {"movie_begin": movie_begin, "movie_end": movie_end, "movie_range": movie_range, "first_piece": first_piece, "last_piece": last_piece, "movie_numpieces": movie_numpieces } )
+
+        if videoinfo['bitrate']:
+            self.set_bitrate( videoinfo['bitrate'] )
+        else:
+        # Ric: TODO
+            self.set_bitrate( 512*1024/8 ) # default to 512 Kbit/s
+            self.bitrate_set = False
+
+
+        # TODO keep first piece for observer
+        self.first_piece = self.layer_info[0]["first_piece"]
+        self.movie_numpieces = self.layer_info[0]["movie_numpieces"]
+        # last piece of the base layer.. to control
+        self.last_piece = self.layer_info[0]["last_piece"]
+        # we are not in live sit. We don't drop
+        self.dropping = False 
+        # for live
+        self.wraparound = False
+        print >>sys.stderr, self.first_piece
+
+        # ----- set defaults for dynamic positions
+        self.playing = False     # video has started playback
+        self.paused = False      # video is paused
+        self.autoresume = False  # video is paused but will resume automatically
+        self.prebuffering = True # video is prebuffering
+        self.playback_pos = self.first_piece
+
+        self.pausable = (VODEVENT_PAUSE in videoinfo["userevents"]) and (VODEVENT_RESUME in videoinfo["userevents"])
+# TODO
+    def add_playback_pos_observer( self, observer ):
+        """ Add a function to be called when the playback position changes. Is called as follows:
+            observer( oldpos, newpos ). In case of initialisation: observer( None, startpos ). """
+        self.playback_pos_observers.append( observer )
+
+# TODO see if needed
+    def real_piecelen( self, x ):
+        if x == self.first_piece:
+            return self.first_piecelen
+        elif x == self.last_piece:
+            return self.last_piecelen
+        else:
+            return self.piecelen
+
+    def set_bitrate( self, bitrate ):
+        self.bitrate_set = True
+        self.bitrate = bitrate
+        self.sec_per_piece = 1.0 * bitrate / self.piecelen
+
+    # the following functions work with absolute piece numbers,
+    # so they all function within the range [first_piece,last_piece]
+
+    # the range of pieces to download is
+    # [playback_pos,numpieces) for normal downloads and
+    # [playback_pos,playback_pos+delta) for wraparound
+
+    def generate_range( self, download_range ):
+
+        for i in range(len(download_range)):
+            (f,t) = download_range[i]
+            for x in xrange (f,t):
+                #print >> sys.stderr, "ttttttttttttttttttttttttttt", x
+                yield x    
+
+    def dist_range(self, f, t):
+        """ Returns the distance between f and t """
+        if f > t:
+            return self.last_piece-f + t-self.first_piece 
+        else:
+            return t - f
+
+    # TODO same method with diff param, see if need it!
+    def in_small_range( self, f, t, x ):
+        return f <= x < t
+        
+    def in_range(self, download_range, x):
+        for i in download_range:
+            f, l = i
+            if self.in_small_range(f, l, x):
+                return True
+        return False
+
+    def inc_playback_pos( self ):
+        oldpos = self.playback_pos
+        self.playback_pos += 1
+
+        if self.playback_pos > self.last_piece:
+            if self.wraparound:
+                self.playback_pos = self.first_piece
+            else:
+                self.playback_pos = self.last_piece
+
+        for o in self.playback_pos_observers:
+            o( oldpos, self.playback_pos )
+
+    def in_download_range( self, x ):
+
+        for i in range(self.quality + 1):
+            f = self.layer_info[i]["first_piece"]
+            l = self.layer_info[i]["last_piece"]
+
+            if f <= x <= l:
+                return True
+
+        return False
+        
+    # TODO just keep for the moment
+    def in_valid_range(self,piece):
+        return self.in_download_range( piece )
+        
+    def get_range_diff(self,oldrange,newrange):
+        """ Returns the diff between oldrange and newrange as a Set.
+        """
+        oldset = range2set(oldrange,self.movie_numpieces)
+        newset = range2set(newrange,self.movie_numpieces)
+        return oldset - newset
+    
+    def normalize( self, x ):
+        """ Caps or wraps a piece number. """
+
+        if self.in_download_range(x):
+            return x
+
+        return max( self.first_piece, min( x, self.get_highest_piece(self.range) ) )
+
+    def time_to_pieces( self, sec ):
+        """ Returns the piece number that contains data for a few seconds down the road. """
+
+        # TODO: take first and last piece into account, as they can have a different size
+        return int(ceil(sec * self.sec_per_piece))
+
+    def download_range( self ):
+        """ Returns the range [(first,last),(first,last)] of pieces we like to download from the layers. """
+        download_range = []
+        pos = self.playback_pos
+        # Ric: the pieces of difference
+        play_offset = pos - self.first_piece
+
+        #for i in range(self.quality + 1):
+        for i in range(self.available_qualities):
+            # Ric: if they have the same bitrate they have the same size TODO
+            if self.selected_movie[0]["size"] / self.selected_movie[i]["size"] == 1:
+                f = self.layer_info[i]["first_piece"]
+                position = f + play_offset
+                l = self.layer_info[i]["last_piece"]
+                download_range.append((position,l)) # should I add + 1  to the last?
+            else:
+                # TODO case of different bitrates
+                pass
+        # Ric: for global use like first and last piece
+        self.range = download_range
+        return download_range
+            
+            
+    def get_wraparound(self):
+        return self.wraparound
+
+    def increase_high_range(self, factor=1):
+        """
+        Increase the high priority range (effectively enlarging the buffer size)
+        """
+        assert factor > 0
+        self.high_prob_curr_time += factor * self.high_prob_curr_time_limit[2]
+        if self.high_prob_curr_time > self.high_prob_curr_time_limit[1]:
+            self.high_prob_curr_time = self.high_prob_curr_time_limit[1]
+        
+        self.high_prob_curr_pieces += int(factor * self.high_prob_curr_pieces_limit[2])
+        if self.high_prob_curr_pieces > self.high_prob_curr_pieces_limit[1]:
+            self.high_prob_curr_pieces = self.high_prob_curr_pieces_limit[1]
+
+        if DEBUG: print >>sys.stderr, "VideoStatus:increase_high_range", self.high_prob_curr_time, "seconds or", self.high_prob_curr_pieces, "pieces"
+
+    def decrease_high_range(self, factor=1):
+        """
+        Decrease the high priority range (effectively reducing the buffer size)
+        """
+        assert factor > 0
+        self.high_prob_curr_time -= factor * self.high_prob_curr_time_limit[2]
+        if self.high_prob_curr_time < self.high_prob_curr_time_limit[0]:
+            self.high_prob_curr_time = self.high_prob_curr_time_limit[0]
+        
+        self.high_prob_curr_pieces -= int(factor * self.high_prob_curr_pieces_limit[2])
+        if self.high_prob_curr_pieces < self.high_prob_curr_pieces_limit[0]:
+            self.high_prob_curr_pieces = self.high_prob_curr_pieces_limit[0]
+
+        if DEBUG: print >>sys.stderr, "VideoStatus:decrease_high_range", self.high_prob_curr_time, "seconds or", self.high_prob_curr_pieces, "pieces"
+
+    def set_high_range(self, seconds=None, pieces=None):
+        """
+        Set the minimum size of the high priority range. Can be given
+        in seconds of pieces.
+        """
+        if seconds: self.high_prob_curr_time = seconds
+        if pieces: self.high_prob_curr_pieces = pieces
+
+    def get_high_range(self):
+        """
+        Returns [(first, last), (first, last), ..] list of tuples
+        """
+        download_range = self.download_range()
+        number_of_pieces = self.time_to_pieces(self.high_prob_curr_time)
+
+        high_range = []
+        for i in range(self.quality + 1):
+
+            if i == 0:
+                # the other layers will align to the last piece of 
+                # the first one
+                f, _ = download_range[0]
+                l = min(self.last_piece,                                                          # last piece
+                         1 + f + max(number_of_pieces, self.high_prob_curr_pieces), # based on time OR pieces
+                         1 + f + self.high_prob_curr_pieces_limit[1])               # hard-coded buffer maximum
+
+                high_range.append((f, l))
+
+            # Ric: for higher layers the initial piece is ahead 
+            # in time regarding the previous layer            
+            else:
+                base_f, base_l = high_range[0]
+                align = self.get_respective_range( (base_f, base_l) )
+                new_b, new_e = align[i]
+                # We increase of one piece the start of the high range for the following layer
+                new_b += i
+                high_range.append( (new_b, new_e) )
+            
+        return high_range
+
+    def in_high_range(self, piece):
+        """
+        Returns True when PIECE is in the high priority range.
+        """
+        high_range = self.get_high_range()
+        return self.in_range(high_range, piece)
+
+    def get_range_length(self, download_range):
+        res = 0
+        for i in range(self.quality + 1):
+            f, l = download_range[i]
+            res = res + self.get_small_range_length(f, l) 
+        return res
+
+    def get_small_range_length(self, first, last):
+        return last - first
+
+    def get_high_range_length(self):
+        high_range = self.get_high_range()
+        return self.get_range_length(high_range)
+
+    # Needed to detect if the buffer undeflow is sustainable
+    def get_base_high_range_length(self):
+        high_range = self.get_high_range()
+        f, l = high_range[0]
+        return self.get_small_range_length(f, l) 
+    
+    def generate_high_range(self):
+        """
+        Returns the high current high priority range in piece_ids
+        """
+        high_range = self.get_high_range()
+        return self.generate_range(high_range)
+        
+    def generate_base_high_range(self):
+        """
+        Returns the high current high priority range in piece_ids
+        """
+        high_range = self.get_high_range()
+        base_high_range = [high_range[0]]
+        return self.generate_range(base_high_range)
+    
+    def get_highest_piece(self, list_of_ranges):
+        highest = 0
+        for i in range(self.quality + 1):
+            (f,l) = list_of_ranges[i]
+            if l > highest:
+                highest = l
+        return highest
+
+    def get_respective_range(self, (f,l)):
+        ret = []
+
+        for i in range(self.quality + 1):
+            if i == 0:
+                # for the first layer just copy the input
+                ret.append((f,l))
+            else:
+                # Ric: if they have the same bitrate they have the same size TODO
+                if self.selected_movie[0]["size"] / self.selected_movie[i]["size"] == 1:              
+                    bdiff = f - self.first_piece
+                    ediff = l - self.first_piece
+                    beg = self.layer_info[i]["first_piece"]
+                    new_beg = beg + bdiff
+                    new_end = beg + ediff
+                    ret.append((new_beg, new_end))
+                else:
+                    # TODO case of different bitrates
+                    pass
+        return ret
+
+    # returns a list of pieces that represent the same moment in the stream from all the layers
+    def get_respective_piece(self, piece):
+        ret = []
+
+        for i in range(self.available_qualities):
+            if i == 0:
+                pass
+                #ret.append(piece)
+            else:
+                # Ric: if they have the same bitrate they have the same size TODO
+                if self.selected_movie[0]["size"] / self.selected_movie[i]["size"] == 1:
+                    diff = piece - self.first_piece
+                    beg = self.layer_info[i]["first_piece"]
+                    res = beg + diff
+                    ret.append(res)
+                else:
+                    # TODO case of different bitrates
+                    pass
+        return ret
+
+def range2set(range,maxrange):    
+    if range[0] <= range[1]:
+        set = Set(xrange(range[0],range[1]))
+    else:
+        set = Set(xrange(range[0],maxrange)) | Set(xrange(0,range[1]))
+    return set
+    
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Video/VideoOnDemand.py b/instrumentation/next-share/BaseLib/Core/Video/VideoOnDemand.py
new file mode 100644 (file)
index 0000000..7fc6595
--- /dev/null
@@ -0,0 +1,1814 @@
+# Written by Jan David Mol, Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+from math import ceil
+from threading import Condition,currentThread
+from traceback import print_exc,print_stack
+from tempfile import mkstemp
+import collections
+import os
+import base64
+import os,sys,time
+import re
+from base64 import b64encode
+
+from BaseLib.Core.BitTornado.CurrentRateMeasure import Measure
+from BaseLib.Core.Video.MovieTransport import MovieTransport,MovieTransportStreamWrapper
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.osutils import *
+from BaseLib.Core.Statistics.Status.Status import get_status_holder
+
+# pull all video data as if a video player was attached
+FAKEPLAYBACK = False
+
+DEBUG = False
+DEBUG_HOOKIN = True
+
+class PieceStats:
+    """ Keeps track of statistics for each piece as it flows through the system. """
+
+    def __init__(self):
+        self.pieces = {}
+        self.completed = {}
+
+    def set(self,piece,stat,value,firstonly=True):
+        if piece not in self.pieces:
+            self.pieces[piece] = {}
+
+        if firstonly and stat in self.pieces[piece]:
+            return
+
+        self.pieces[piece][stat] = value
+
+    def complete(self,piece):
+        self.completed[piece] = 1
+
+    def reset(self):
+        for x in self.completed:
+            self.pieces.pop(x,0)
+
+        self.completed = {}
+
+    def pop_completed(self):
+        completed = {}
+
+        for x in self.completed.keys():
+            completed[x] = self.pieces.pop(x,{})
+
+        self.completed = {}
+        return completed
+
+class MovieOnDemandTransporter(MovieTransport):
+    """ Takes care of providing a bytestream interface based on the available pieces. """
+
+    # seconds to prebuffer if bitrate is known
+    PREBUF_SEC_LIVE = 10.0
+    PREBUF_SEC_VOD  = 10.0
+    
+    # Arno, 2010-01-14: prebuf is controlled with the above params. Buffering
+    # while playback for VOD is handled by increasing the high range, see
+    # VideoStatus self.high_prob_curr_*
+
+    # max number of seconds in queue to player
+    # Arno: < 2008-07-15: St*pid vlc apparently can't handle lots of data pushed to it
+    # Arno: 2008-07-15: 0.8.6h apparently can
+    BUFFER_TIME = 5.0
+    
+    # polling interval to refill buffer
+    #REFILL_INTERVAL = BUFFER_TIME * 0.75
+    # Arno: there is no guarantee we got enough (=BUFFER_TIME secs worth) to write to output bug!
+    REFILL_INTERVAL = 0.1
+
+    # amount of time (seconds) to push a packet into
+    # the player queue ahead of schedule
+    VLC_BUFFER_SIZE = 0
+    PIECE_DUE_SKEW = 0.1 + VLC_BUFFER_SIZE
+
+    # Arno: If we don't know playtime and FFMPEG gave no decent bitrate, this is the minimum
+    # bitrate (in KByte/s) that the playback birate-estimator must have to make us
+    # set the bitrate in movieselector.
+    MINPLAYBACKRATE = 32*1024
+    
+    # If not yet playing and if the difference between the peer's first chosen
+    # hookin time and the newly calculated hookin time is larger than 
+    # PREBUF_REHOOKIN_SECS (because his peer environment changed), then rehookin. 
+    PREBUF_REHOOKIN_SECS = 5.0
+
+    # maximum delay between pops when live streaming before we force a restart (seconds)
+    MAX_POP_TIME = 10
+
+    def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc,httpsupport=None):
+
+        # dirty hack to get the Tribler Session
+        from BaseLib.Core.Session import Session
+        session = Session.get_instance()
+
+        # hack: we should not import this since it is not part of the
+        # core nor should we import here, but otherwise we will get
+        # import errors
+        #
+        # _event_reporter stores events that are logged somewhere...
+        # from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
+        self._event_reporter = get_status_holder("LivingLab")
+        self.b64_infohash = b64encode(bt1download.infohash)
+            
+        # add an event to indicate that the user wants playback to
+        # start
+        def set_nat(nat):
+            self._event_reporter.create_and_add_event("nat", [self.b64_infohash, nat])
+        self._event_reporter.create_and_add_event("play-init", [self.b64_infohash])
+        self._event_reporter.create_and_add_event("piece-size", [self.b64_infohash, videostatus.piecelen])
+        self._event_reporter.create_and_add_event("num-pieces", [self.b64_infohash, videostatus.movie_numpieces])
+        self._event_reporter.create_and_add_event("bitrate", [self.b64_infohash, videostatus.bitrate])
+        self._event_reporter.create_and_add_event("nat", [self.b64_infohash, session.get_nat_type(callback=set_nat)])
+
+        # self._complete = False
+        self.videoinfo = videoinfo
+        self.bt1download = bt1download
+        self.piecepicker = bt1download.picker
+        self.rawserver = bt1download.rawserver
+        self.storagewrapper = bt1download.storagewrapper
+        self.fileselector = bt1download.fileselector
+
+        self.vodeventfunc = vodeventfunc
+        self.videostatus = vs = videostatus
+        # Diego : Http seeding video support
+        self.http_support = httpsupport
+        self.traker_peers_report = None
+        self.http_first_run = None
+        
+        # Add quotes around path, as that's what os.popen() wants on win32
+        if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1:
+            self.video_analyser_path='"'+videoanalyserpath+'"'
+        else:
+            self.video_analyser_path=videoanalyserpath
+
+        # counter for the sustainable() call. Every X calls the
+        # buffer-percentage is updated.
+        self.sustainable_counter = sys.maxint
+
+        # boudewijn: because we now update the downloadrate for each
+        # received chunk instead of each piece we do not need to
+        # average the measurement over a 'long' period of time. Also,
+        # we only update the downloadrate for pieces that are in the
+        # high priority range giving us a better estimation on how
+        # likely the pieces will be available on time.
+        self.overall_rate = Measure(10)
+        self.high_range_rate = Measure(2)
+
+        # buffer: a link to the piecepicker buffer
+        self.has = self.piecepicker.has
+
+        # number of pieces in buffer
+        self.pieces_in_buffer = 0
+
+        self.data_ready = Condition()
+        
+        # Arno: Call FFMPEG only if the torrent did not provide the 
+        # bitrate and video dimensions. This is becasue FFMPEG 
+        # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown 
+        # reasons
+        
+        # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important
+        if vs.bitrate_set:
+            self.doing_ffmpeg_analysis = False
+            self.doing_bitrate_est = False
+            self.videodim = None #self.movieselector.videodim
+        else:
+            self.doing_ffmpeg_analysis = True
+            self.doing_bitrate_est = True
+            self.videodim = None
+
+        self.player_opened_with_width_height = False
+        self.ffmpeg_est_bitrate = None
+        
+        # number of packets required to preparse the video
+        # I say we need 128 KB to sniff size and bitrate
+        
+        # Arno: 2007-01-04: Changed to 1MB. It appears ffplay works better with some
+        # decent prebuffering. We should replace this with a timing based thing, 
+        
+        if not self.doing_bitrate_est:
+            if vs.live_streaming:
+                prebufsecs = self.PREBUF_SEC_LIVE
+            else:
+                prebufsecs = self.PREBUF_SEC_VOD
+
+            if vs.bitrate <=  (256 * 1024 / 8):
+                print >>sys.stderr,"vod: trans: Increasing prebuffer for low-bitrate feeds"
+                prebufsecs += 5.0
+            # assumes first piece is whole (first_piecelen == piecelen)
+            piecesneeded = vs.time_to_pieces( prebufsecs )
+        else:
+            # Arno, 2010-01-13: For torrents with unknown bitrate, prebuf more
+            # following Boudewijn's heuristics
+            piecesneeded = 2 * vs.get_high_range_length()
+
+        if vs.wraparound:
+            self.max_prebuf_packets = min(vs.wraparound_delta, piecesneeded)
+        else:
+            self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded)
+            
+        if DEBUG:
+            if self.doing_ffmpeg_analysis:
+                print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen
+            else:
+                print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering"
+
+        self.nreceived = 0
+        
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Setting MIME type to",self.videoinfo['mimetype']
+        
+        self.set_mimetype(self.videoinfo['mimetype'])
+
+        # some statistics
+        self.stat_playedpieces = 0 # number of pieces played successfully
+        self.stat_latepieces = 0 # number of pieces that arrived too late
+        self.stat_droppedpieces = 0 # number of pieces dropped
+        self.stat_stalltime = 0.0 # total amount of time the video was stalled
+        self.stat_prebuffertime = 0.0 # amount of prebuffer time used
+        self.stat_pieces = PieceStats() # information about each piece
+
+        # start periodic tasks
+        self.curpiece = ""
+        self.curpiece_pos = 0
+        self.outbuf = []
+        self.outbuflen = None
+        self.last_pop = None # time of last pop
+        self.rehookin = False
+        self.reset_bitrate_prediction()
+
+        self.lasttime=0
+        # For DownloadState
+        self.prebufprogress = 0.0
+        self.prebufstart = time.time()
+        self.playable = False
+        self.usernotified = False
+        
+
+        # LIVESOURCEAUTH
+        if vs.live_streaming:
+            from BaseLib.Core.Video.LiveSourceAuth import ECDSAAuthenticator,RSAAuthenticator
+
+            if vs.authparams['authmethod'] == LIVE_AUTHMETHOD_ECDSA:
+                self.authenticator = ECDSAAuthenticator(vs.first_piecelen,vs.movie_numpieces,pubkeypem=vs.authparams['pubkey'])
+                vs.sigsize = vs.piecelen - self.authenticator.get_content_blocksize()
+            elif vs.authparams['authmethod'] == LIVE_AUTHMETHOD_RSA:
+                self.authenticator = RSAAuthenticator(vs.first_piecelen,vs.movie_numpieces,pubkeypem=vs.authparams['pubkey'])
+                vs.sigsize = vs.piecelen - self.authenticator.get_content_blocksize()
+            else:
+                self.authenticator = None
+                vs.sigsize = 0
+        else:
+            self.authenticator = None
+
+        self.video_refillbuf_rawtask()
+        if False:
+            self.video_printstats_tick_second()
+
+        # link to others (last thing to do)
+        self.piecepicker.set_transporter( self )
+        if not vs.live_streaming:
+            self.complete_from_persistent_state(self.storagewrapper.get_pieces_on_disk_at_startup())
+        #self.start()
+
+        if FAKEPLAYBACK:
+            import threading
+            
+            class FakeReader(threading.Thread):
+                def __init__(self,movie):
+                    threading.Thread.__init__(self)
+                    self.movie = movie
+                    
+                def run(self):
+                    self.movie.start()
+                    while not self.movie.done():
+                        self.movie.read()
+            
+            t = FakeReader(self)
+            t.start()
+          
+        #self.rawserver.add_task( fakereader, 0.0 )
+
+        if self.videostatus.live_streaming:
+            self.live_streaming_timer()
+
+        self.update_prebuffering()
+
+    def calc_live_startpos(self,prebufsize=2,have=False):
+        """ When watching a live stream, determine where to 'hook in'. Adjusts 
+            self.download_range[0] accordingly, never decreasing it. If 'have' 
+            is true, we need to have the data ourself. If 'have' is false, we 
+            look at availability at our neighbours.
+
+            Return True if successful, False if more data has to be collected. 
+            
+            This is called periodically until playback has started. After that,
+            the hookin point / playback position is either fixed, or determined 
+            by active pausing and resuming of the playback by this class, see
+            refill_buffer().
+        """
+
+        if DEBUG_HOOKIN:
+            print >>sys.stderr,"vod: calc_live_startpos: prebuf",prebufsize,"have",have
+
+        # ----- determine highest known piece number
+        if have:
+            # I am already hooked in and downloaded stuff, now do the final
+            # hookin based on what I have.
+            numseeds = 0
+            numhaves = self.piecepicker.has 
+            totalhaves = self.piecepicker.numgot
+            sourcehave = None
+
+            threshold = 1
+        else:
+            # Check neighbours playback pos to see where I should hookin.
+            numseeds = self.piecepicker.seeds_connected
+            numhaves = self.piecepicker.numhaves # excludes seeds
+            totalhaves = self.piecepicker.totalcount # excludes seeds
+            sourcehave = self.piecepicker.get_live_source_have()
+            
+            if DEBUG and DEBUG_HOOKIN:
+                if sourcehave is not None:
+                    print >>sys.stderr,"vod: calc_live_offset: DEBUG: testing for multiple clients at source IP (forbidden!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!, but used in CS testing). Source have numtrue",sourcehave.get_numtrue()
+                    if sourcehave.get_numtrue() < 100:
+                        print >>sys.stderr,"vod: calc_live_offset: Source sent near empty BITFIELD, CS testing bug? Not tuning in as source"
+                        sourcehave = None
+
+            numconns = self.piecepicker.num_nonempty_neighbours()
+            if sourcehave is None:
+                # We don't have a connection to the source, must get at least 2 votes 
+                threshold = max( 2, numconns/2 )
+            else:
+                if DEBUG_HOOKIN:
+                    print >>sys.stderr,"vod: calc_live_offset: Connected to source, hookin on that" 
+                    #print >>sys.stderr,"vod: calc_live_offset: sourcehave",`sourcehave.tostring()`
+                threshold = 1
+
+        # FUDGE: number of pieces we subtract from maximum known/have,
+        # to start playback with some buffer present. We need enough
+        # pieces to do pass the prebuffering phase. when still
+        # requesting pieces, FUDGE can probably be a bit low lower,
+        # since by the time they arrive, we will have later pieces anyway.
+        # NB: all live torrents have the bitrate set.
+        FUDGE = prebufsize #self.max_prebuf_packets
+
+        if numseeds == 0 and totalhaves == 0:
+            # optimisation: without seeds or pieces, just wait
+            if DEBUG_HOOKIN:
+                print >>sys.stderr,"vod: calc_live_offset: no pieces"
+            return False
+
+        # pieces are known, so we can determine where to start playing
+        vs = self.videostatus
+
+        bpiece = vs.first_piece
+        epiece = vs.last_piece
+
+        if not vs.wraparound:
+            if numseeds > 0 or numhaves[epiece] > 0:
+                # special: if full video is available, do nothing and enter VoD mode
+                if DEBUG_HOOKIN:
+                    print >>sys.stderr,"vod: calc_live_offset: vod mode"
+                vs.set_live_startpos( 0 )
+                return True
+
+        # maxnum = highest existing piece number owned by more than half of the neighbours
+        maxnum = None
+        if sourcehave is None:
+            # Look at peer neighbourhood
+            inspecthave = numhaves
+        else:
+            # Just look at source
+            inspecthave = sourcehave
+        
+        for i in xrange(epiece,bpiece-1,-1):
+            #if DEBUG_HOOKIN:
+            #    if 0 < inspecthave[i] < threshold:
+            #        print >>sys.stderr,"vod: calc_live_offset: discarding piece %d as it is owned by only %d<%d neighbours" % (i,inspecthave[i],threshold)
+
+            if inspecthave[i] >= threshold:
+                maxnum = i
+                if not have:
+                    if inspecthave == numhaves:
+                        print >>sys.stderr,"vod: calc_live_offset: chosing piece %d as it is owned by %d>=%d neighbours (prewrap)" % (i,inspecthave[i],threshold)
+                    else:
+                        print >>sys.stderr,"vod: calc_live_offset: chosing piece %d as it is owned by the source (prewrap)" % (i)
+                else:
+                    print >>sys.stderr,"vod: calc_live_offset: chosing piece %d as it is owned by me (prewrap)" % (i)
+                        
+                break
+
+        if maxnum is None:
+            print >>sys.stderr,"vod: calc_live_offset: Failed to find quorum for any piece"
+            return False
+
+        # if there is wraparound, newest piece may actually have wrapped
+        if vs.wraparound and maxnum > epiece - vs.wraparound_delta:
+            delta_left = vs.wraparound_delta - (epiece-maxnum)
+
+            for i in xrange( vs.first_piece+delta_left-1, vs.first_piece-1, -1 ):
+                if inspecthave[i] >= threshold:
+                    maxnum = i
+                    if DEBUG_HOOKIN:
+                        if not have:
+                            if inspecthave == numhaves:
+                                print >>sys.stderr,"vod: calc_live_offset: chosing piece %d as it is owned by %d>=%d neighbours (wrap)" % (i,inspecthave[i],threshold)
+                            else:
+                                print >>sys.stderr,"vod: calc_live_offset: chosing piece %d as it is owned by the source (wrap)" % (i)
+                        else:
+                            print >>sys.stderr,"vod: calc_live_offset: chosing piece %d as it is owned by me (wrap)" % (i)
+                            
+                    break
+
+        print >>sys.stderr,"vod: calc_live_offset: hookin candidate (unfudged)",maxnum
+        
+        # start watching from maximum piece number, adjusted by fudge.
+        if vs.wraparound:
+            maxnum = vs.normalize( maxnum - FUDGE )
+            #f = bpiece + (maxnum - bpiece - FUDGE) % (epiece-bpiece)
+            #t = bpiece + (f - bpiece + vs.wraparound_delta) % (epiece-bpiece)
+
+            # start at a piece known to exist to avoid waiting for something that won't appear
+            # for another round. guaranteed to succeed since we would have bailed if noone had anything
+            while not inspecthave[maxnum]:
+                maxnum = vs.normalize( maxnum + 1 )
+        else:
+            maxnum = max( bpiece, maxnum - FUDGE )
+
+            if maxnum == bpiece:
+                # video has just started -- watch from beginning
+                return True
+
+        # If we're connected to the source, and already hooked in,
+        # don't change the hooking point unless it is really far off
+        oldstartpos = vs.get_live_startpos()
+        if not have and threshold == 1 and oldstartpos is not None:
+            diff = vs.dist_range(oldstartpos,maxnum)
+            diffs = float(diff) * float(vs.piecelen) / vs.bitrate
+            print >>sys.stderr,"vod: calc_live_offset: m o",maxnum,oldstartpos,"diff",diff,"diffs",diffs
+            if diffs < self.PREBUF_REHOOKIN_SECS: 
+                return True
+            
+
+        print >>sys.stderr,"vod: === HOOKING IN AT PIECE %d (based on have: %s) ===" % (maxnum,have)
+
+        (toinvalidateset,toinvalidateranges) = vs.set_live_startpos( maxnum )
+        #print >>sys.stderr,"vod: invalidateset is",`toinvalidateset`
+        mevirgin = oldstartpos is None
+        if len(toinvalidateranges) == 0 or (len(toinvalidateranges) == 0 and not mevirgin): # LAST condition is bugcatch
+            for piece in toinvalidateset:
+                self.live_invalidate_piece_globally(piece,mevirgin)
+        else:
+            self.live_invalidate_piece_ranges_globally(toinvalidateranges,toinvalidateset)
+
+        try:
+            self._event_reporter.create_and_add_event("live-hookin", [self.b64_infohash, maxnum])
+        except:
+            print_exc()
+
+        return True
+    
+
+    def live_streaming_timer(self):
+        """ Background 'thread' to check where to hook in if live streaming. """
+        
+        if DEBUG:
+            print >>sys.stderr,"vod: live_streaming_timer: Checking hookin"
+        nextt = 1
+        try:
+            try:
+                vs = self.videostatus
+            
+                if vs.playing and not self.rehookin:
+                    # Stop adjusting the download range via this mechanism, see 
+                    # refill_buffer() for the new pause/resume mechanism.
+                    
+                    # Arno, 2010-03-04: Reactivate protection for live.
+                    if vs.live_streaming and self.last_pop is not None and time.time() - self.last_pop > self.MAX_POP_TIME:
+                        # Live: last pop too long ago, rehook-in
+                        print >>sys.stderr,"vod: live_streaming_timer: Live stalled too long, reaanounce and REHOOK-in !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
+                        self.last_pop = time.time()
+                        
+                        # H4x0r: if conn to source was broken, reconnect
+                        self.bt1download.reannounce()
+                        
+                        # Give some time to connect to new peers, so rehookin in 2 seconds
+                        nextt = 2 
+                        self.rehookin = True
+                    return
+                    
+                # JD:keep checking correct playback pos since it can change if we switch neighbours
+                # due to faulty peers etc
+                if self.calc_live_startpos( self.max_prebuf_packets, False ):
+                    self.rehookin = False
+            except:
+                print_exc()
+        finally:
+            # Always executed
+            self.rawserver.add_task( self.live_streaming_timer, nextt )
+
+    def parse_video(self):
+        """ Feeds the first max_prebuf_packets to ffmpeg to determine video bitrate. """
+
+        vs = self.videostatus
+        width = None
+        height = None
+
+        # Start ffmpeg, let it write to a temporary file to prevent 
+        # blocking problems on Win32 when FFMPEG outputs lots of
+        # (error) messages.
+        #
+        [loghandle,logfilename] = mkstemp()
+        os.close(loghandle)
+        if sys.platform == "win32":
+            # Not "Nul:" but "nul" is /dev/null on Win32
+            sink = 'nul'
+        else:
+            sink = '/dev/null'
+        # DON'T FORGET 'b' OTHERWISE WE'RE WRITING BINARY DATA IN TEXT MODE!
+        (child_out,child_in) = os.popen2( "%s -y -i - -vcodec copy -acodec copy -f avi %s > %s 2>&1" % (self.video_analyser_path, sink, logfilename), 'b' )
+        """
+        # If the path is "C:\Program Files\bla\bla" (escaping left out) and that file does not exist
+        # the output will say something cryptic like "vod: trans: FFMPEG said C:\Program" suggesting an
+        # error with the double quotes around the command, but that's not it. Be warned!
+        cmd = self.video_analyser_path+' -y -i - -vcodec copy -acodec copy -f avi '+sink+' > '+logfilename+' 2>&1'
+        print >>sys.stderr,"vod: trans: Video analyser command is",cmd
+        (child_out,child_in) = os.popen2(cmd,'b')  # DON'T FORGET 'b' OTHERWISE THINGS GO WRONG!
+        """
+
+        # feed all the pieces
+        first,last = vs.download_range()
+        for i in xrange(first,last):
+            piece = self.get_piece( i )
+
+            if piece is None:
+                break
+
+            # remove any signatures etc
+            if self.authenticator is not None:
+                piece = self.authenticator.get_content( piece )
+
+            try:
+                child_out.write( piece )
+            except IOError:
+                print_exc(file=sys.stderr)
+                break
+
+        child_out.close()
+        child_in.close()
+
+        logfile = open(logfilename, 'r')
+
+        # find the bitrate in the output
+        bitrate = None
+
+        r = re.compile( "bitrate= *([0-9.]+)kbits/s" )
+        r2 = re.compile( "Video:.* ([0-9]+x[0-9]+)," )    # video dimensions WIDTHxHEIGHT
+
+        founddim = False
+        for x in logfile.readlines():
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: FFMPEG said:",x
+            occ = r.findall( x )
+            if occ:
+                # use the latest mentioning of bitrate
+                bitrate = float( occ[-1] ) * 1024 / 8
+                if DEBUG:
+                    if bitrate is not None:
+                        print >>sys.stderr,"vod: trans: Bitrate according to FFMPEG: %.2f KByte/s" % (bitrate/1024)
+                    else:
+                        print >>sys.stderr,"vod: trans: Bitrate could not be determined by FFMPEG"
+            occ = r2.findall( x )
+            if occ and not founddim:
+                # use first occurence
+                dim = occ[0]
+                idx = dim.find('x')
+                width = int(dim[:idx])
+                height = int(dim[idx+1:])
+                founddim = True
+                
+                if DEBUG:
+                    print >>sys.stderr,"vod: width",width,"heigth",height
+        logfile.close()
+        try:
+            os.remove(logfilename)
+        except:
+            pass
+
+        return [bitrate,width,height]
+
+    def peers_from_tracker_report( self, num_peers ):
+        #print >>sys.stderr,"DIEGO DEBUG Got from tracker : ", num_peers
+        if self.traker_peers_report is None:
+            self.traker_peers_report = num_peers
+            self.update_prebuffering()
+        else:
+            self.traker_peers_report += num_peers
+
+    def update_prebuffering(self,received_piece=None):
+        """ Update prebuffering process. 'received_piece' is a hint that we just received this piece;
+            keep at 'None' for an update in general. """
+
+        vs = self.videostatus
+
+        if not vs.prebuffering:
+            return
+        else:
+            if self.http_support is not None:
+                # Diego : Give the possibility to other peers to give bandwidth.
+                # Wait a few seconds depending on the tracker response and
+                # on the number of peers connected
+                if self.http_first_run is None:
+                    if self.traker_peers_report is None:
+                        self.rawserver.add_task( lambda: self.peers_from_tracker_report(0), 1 ) # wait 1 second for tracker response
+                    elif self.traker_peers_report:
+                        self.http_support.start_video_support( 0, 2 ) # wait 2 seconds for connecting to peers
+                        self.http_first_run = True
+                    else:
+                        self.http_support.start_video_support( 0 ) # no peers to connect to. start immediately
+                        self.http_first_run = True
+                elif self.http_first_run:
+                    if not self.http_support.is_slow_start():
+                        # Slow start is possible only if video support is off
+                        self.http_first_run = False
+                        self.http_support.stop_video_support()
+                        num_peers = len( self.piecepicker.peer_connections )
+                        if num_peers == 0: # no peers connected. start immediately
+                            self.http_support.start_video_support( 0 )
+                        elif num_peers <= 3: # few peers connected. wait 2 second to get bandwidth. TODO : Diego : tune peer threshold
+                            self.http_support.start_video_support( 0, 2 )
+                        else: # many peers connected. wait 5 seconds to get bandwidth.
+                            self.http_support.start_video_support( 0, 5 )
+                else:
+                    self.http_support.start_video_support( 0 )
+
+        if vs.live_streaming and vs.live_startpos is None:
+            # first determine where to hook in
+            return
+
+        if received_piece:
+            self.nreceived += 1
+
+        # Arno, 2010-01-13: This code is only used when *pre*buffering, not
+        # for in-playback buffering. See refill_buffer() for that.
+        # Restored original code here that looks at max_prebuf_packets
+        # and not highrange. The highrange solution didn't allow the prebuf
+        # time to be varied independently of highrange width. 
+        #
+        f,t = vs.playback_pos, vs.normalize( vs.playback_pos + self.max_prebuf_packets )
+        prebufrange = vs.generate_range( (f, t) )
+        missing_pieces = filter( lambda i: not self.have_piece( i ), prebufrange)
+
+        gotall = not missing_pieces
+        self.prebufprogress = float(self.max_prebuf_packets-len(missing_pieces))/float(self.max_prebuf_packets)
+        
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Already got",(self.prebufprogress*100.0),"% of prebuffer"
+        
+        if not gotall and DEBUG:
+            print >>sys.stderr,"vod: trans: Still need pieces",missing_pieces,"for prebuffering/FFMPEG analysis"
+
+        if vs.dropping:
+            if not self.doing_ffmpeg_analysis and not gotall and not (0 in missing_pieces) and self.nreceived > self.max_prebuf_packets:
+                perc = float(self.max_prebuf_packets)/10.0
+                if float(len(missing_pieces)) < perc or self.nreceived > (2*self.max_prebuf_packets):
+                    # If less then 10% of packets missing, or we got 2 times the packets we need already,
+                    # force start of playback
+                    gotall = True
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: Forcing stop of prebuffering, less than",perc,"missing, or got 2N packets already"
+
+        if gotall and self.doing_ffmpeg_analysis:
+            [bitrate,width,height] = self.parse_video()
+            self.doing_ffmpeg_analysis = False
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: after parse",bitrate,self.doing_bitrate_est
+            if bitrate is None or round(bitrate)== 0:
+                if self.doing_bitrate_est:
+                    # Errr... there was no playtime info in the torrent
+                    # and FFMPEG can't tell us...
+                    bitrate = (1*1024*1024/8) # 1mbps
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: No bitrate info avail, wild guess: %.2f KByte/s" % (bitrate/1024)
+
+                    vs.set_bitrate(bitrate)
+                    self._event_reporter.create_and_add_event("bitrate-guess", [self.b64_infohash, bitrate])
+            else:
+                if self.doing_bitrate_est:
+                    # There was no playtime info in torrent, use what FFMPEG tells us
+                    self.ffmpeg_est_bitrate = bitrate
+                    bitrate *= 1.1  # Make FFMPEG estimation 10% higher
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: Estimated bitrate: %.2f KByte/s" % (bitrate/1024)
+
+                    vs.set_bitrate(bitrate)
+                    self._event_reporter.create_and_add_event("bitrate-ffmpeg", [self.b64_infohash, bitrate])
+
+            if width is not None and height is not None:
+                diff = False
+                if self.videodim is None:
+                    self.videodim = (width,height)
+                    self.height = height
+                elif self.videodim[0] != width or self.videodim[1] != height:
+                    diff =  True
+                if not self.player_opened_with_width_height or diff:
+                    #self.user_setsize(self.videodim)
+                    pass
+
+        # # 10/03/09 boudewijn: For VOD we will wait for the entire
+        # # buffer to fill (gotall) before we start playback. For live
+        # # this is unlikely to happen and we will therefore only wait
+        # # until we estimate that we have enough_buffer.
+        # if (gotall or vs.live_streaming) and self.enough_buffer():
+        if gotall and self.enough_buffer():
+            # enough buffer and could estimated bitrate - start streaming
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: Prebuffering done",currentThread().getName()
+            self.data_ready.acquire()
+            vs.prebuffering = False
+
+            if self.http_support is not None:
+                self.http_support.stop_video_support()
+            self.stat_prebuffertime = time.time() - self.prebufstart
+            self._event_reporter.create_and_add_event("prebuf", [self.b64_infohash, self.stat_prebuffertime])
+            self.notify_playable()
+            self.data_ready.notify()
+            self.data_ready.release()
+
+        """
+        elif DEBUG:
+            if self.doing_ffmpeg_analysis:
+                print >>sys.stderr,"vod: trans: Prebuffering: waiting to obtain the first %d packets" % (self.max_prebuf_packets)
+            else:
+                print >>sys.stderr,"vod: trans: Prebuffering: %.2f seconds left" % (self.expected_buffering_time())
+        """
+
+    def got_have(self,piece):
+        # Arno, 2010-04-15: STBSPEED Not called anymore, to speedup VOD.
+        vs = self.videostatus
+
+        # update stats
+        self.stat_pieces.set( piece, "known", time.time() )
+        """
+        if vs.playing and vs.wraparound:
+            # check whether we've slipped back too far
+            d = vs.wraparound_delta
+            n = max(1,self.piecepicker.num_nonempty_neighbours()/2)
+            if self.piecepicker.numhaves[piece] > n and d/2 < (piece - vs.playback_pos) % vs.movie_numpieces < d:
+                # have is confirmed by more than half of the neighours and is in second half of future window
+                print >>sys.stderr,"vod: trans: Forcing restart. Am at playback position %d but saw %d at %d>%d peers." % (vs.playback_pos,piece,self.piecepicker.numhaves[piece],n)
+
+                self.start(force=True)
+        """
+
+    def got_piece(self, piece_id, begin, length):
+        """
+        Called when a chunk has been downloaded. This information can
+        be used to estimate download speed.
+        """
+        if self.videostatus.in_high_range(piece_id):
+            self.high_range_rate.update_rate(length)
+            # if DEBUG: print >>sys.stderr, "vod: high priority rate:", self.high_range_rate.get_rate()
+
+    def complete_from_persistent_state(self,myhavelist):
+        """ Arno, 2010-04-20: STBSPEED: Net effect of calling complete(piece,downloaded=False) 
+        for pieces available from disk """
+        vs = self.videostatus
+        for piece in myhavelist:
+            if vs.in_download_range(piece):
+                self.pieces_in_buffer += 1
+
+        self.update_prebuffering()
+
+    
+    def complete(self,piece,downloaded=True):
+        """ Called when a movie piece has been downloaded or was available from the start (disk).
+        Arno, 2010-04-20: STBSPEED: Never called anymore for available from start.  
+        """
+
+        vs = self.videostatus
+        if vs.in_high_range(piece):
+            self._event_reporter.create_and_add_event("hipiece", [self.b64_infohash, piece])
+        else:
+            self._event_reporter.create_and_add_event("piece", [self.b64_infohash, piece])
+
+        # if not self._complete and self.piecepicker.am_I_complete():
+        #     self._complete = True
+        #     self._event_reporter.create_and_add_event(self.b64_infohash, "complete")
+        #     self._event_reporter.flush()
+
+        if vs.wraparound:
+            assert downloaded
+
+        self.stat_pieces.set( piece, "complete", time.time() )
+
+        #if DEBUG:
+        #    print >>sys.stderr,"vod: trans: Completed",piece
+
+        if downloaded:
+            self.overall_rate.update_rate( vs.real_piecelen( piece ) )
+
+        # Arno, 2010-04-20: STBSPEED: vs.in_download_range( piece ) is equiv to downloaded=False
+        if vs.in_download_range( piece ):
+            self.pieces_in_buffer += 1
+        else:
+            if DEBUG:
+                print >>sys.stderr,"vod: piece %d too late [pos=%d]" % (piece,vs.playback_pos)
+            self.stat_latepieces += 1
+
+        if vs.playing and vs.playback_pos == piece:
+            # we were delaying for this piece
+            self.refill_buffer()
+
+        self.update_prebuffering( piece )
+
+    def set_pos(self,pos):
+        """ Update the playback position. Called when playback is started (depending
+        on requested offset). """
+
+        vs = self.videostatus
+
+        #print >>sys.stderr,"vod: trans: set_pos",vs.playback_pos,"->",pos
+
+        # Arno,2010-01-08: if all was pushed to buffer (!= read by user!) 
+        # then playbackpos = last+1
+        oldpos = min(vs.playback_pos,vs.last_piece) 
+        vs.playback_pos = pos
+
+        if vs.wraparound:
+            # recalculate
+            self.pieces_in_buffer = 0
+            for i in vs.generate_range( vs.download_range() ):
+                if self.has[i]:
+                    self.pieces_in_buffer += 1
+        else:
+            # fast forward
+            for i in xrange(oldpos,pos+1):
+                if self.has[i]:
+                    self.pieces_in_buffer -= 1
+
+            # fast rewind
+            for i in xrange(pos,oldpos+1):
+                if self.has[i]:
+                    self.pieces_in_buffer += 1
+
+    def inc_pos(self):
+        vs = self.videostatus
+
+        if self.has[vs.playback_pos]:
+            self.pieces_in_buffer -= 1
+
+        vs.inc_playback_pos()
+        
+        if vs.live_streaming:
+            self.live_invalidate_piece_globally(vs.live_piece_to_invalidate())
+
+#    def buffered_time_period(self):
+#        """Length of period of Buffered pieces"""
+#        if self.movieselector.bitrate is None or self.movieselector.bitrate == 0.0:
+#            return 0
+#        else:
+#            return self.pieces_in_buffer * self.movieselector.piece_length / self.movieselector.bitrate
+#    
+#    def playback_time_position(self):
+#        """Time of playback_pos and total duration
+#            Return playback_time in seconds
+#        """
+#        if self.movieselector.bitrate is None or self.movieselector.bitrate == 0.0:
+#            return 0
+#        else:
+#            return self.playback_pos * self.movieselector.piece_length / self.movieselector.bitrate
+    
+    def expected_download_time(self):
+        """ Expected download time left. """
+        vs = self.videostatus
+        if vs.wraparound:
+            return float(2 ** 31)
+
+        pieces_left = vs.last_piece - vs.playback_pos - self.pieces_in_buffer
+        if pieces_left <= 0:
+            return 0.0
+
+        # list all pieces from the high priority set that have not
+        # been completed
+        uncompleted_pieces = filter(self.storagewrapper.do_I_have, vs.generate_high_range())
+
+        # when all pieces in the high-range have been downloaded,
+        # we have an expected download time of zero
+        if not uncompleted_pieces:
+            return 0.0
+
+        # the download time estimator is very inacurate when we only
+        # have a few chunks left. therefore, we will put more emphesis
+        # on the overall_rate as the number of uncompleted_pieces goes
+        # down.
+        total_length = vs.get_high_range_length()
+        uncompleted_length = len(uncompleted_pieces)
+        expected_download_speed = self.high_range_rate.get_rate() * (1 - float(uncompleted_length) / total_length) + \
+                                  self.overall_rate.get_rate() * uncompleted_length / total_length
+        if expected_download_speed < 0.1:
+            return float(2 ** 31)
+
+        return pieces_left * vs.piecelen / expected_download_speed
+
+    def expected_playback_time(self):
+        """ Expected playback time left. """
+
+        vs = self.videostatus
+
+        if vs.wraparound:
+            return float(2 ** 31)
+
+        pieces_to_play = vs.last_piece - vs.playback_pos + 1
+
+        if pieces_to_play <= 0:
+            return 0.0
+
+        if not vs.bitrate:
+            return float(2 ** 31)
+
+        return pieces_to_play * vs.piecelen / vs.bitrate
+
+    def expected_buffering_time(self):
+        """ Expected time required for buffering. """
+        download_time = self.expected_download_time()
+        playback_time = self.expected_playback_time()
+        #print >>sys.stderr,"EXPECT",self.expected_download_time(),self.expected_playback_time()
+        # Infinite minus infinite is still infinite
+        if download_time > float(2 ** 30) and playback_time > float(2 ** 30):
+            return float(2 ** 31)
+        return abs(download_time - playback_time)
+
+    def enough_buffer(self):
+        """ Returns True if we can safely start playback without expecting to run out of
+            buffer. """
+
+        if self.videostatus.wraparound:
+            # Wrapped streaming has no (known) limited duration, so we cannot predict
+            # whether we have enough download speed. The only way is just to hope
+            # for the best, since any buffer will be emptied if the download speed
+            # is too low.
+            return True
+
+        return max(0.0, self.expected_download_time() - self.expected_playback_time()) == 0.0
+
+    def video_printstats_tick_second(self):
+        self.rawserver.add_task( self.video_printstats_tick_second, 1.0 )
+
+        vs = self.videostatus
+
+        # Adjust estimate every second, but don't display every second
+        display = True # (int(time.time()) % 5) == 0
+        if DEBUG: # display
+            print >>sys.stderr,"vod: Estimated download time: %5.1fs [priority: %7.2f Kbyte/s] [overall: %7.2f Kbyte/s]" % (self.expected_download_time(), self.high_range_rate.get_rate()/1024, self.overall_rate.get_rate()/1024)
+
+        if vs.playing and round(self.playbackrate.rate) > self.MINPLAYBACKRATE and not vs.prebuffering:
+            if self.doing_bitrate_est:
+                if display:
+                    print >>sys.stderr,"vod: Estimated playback time: %5.0fs [%7.2f Kbyte/s], doing estimate=%d" % (self.expected_playback_time(),self.playbackrate.rate/1024, self.ffmpeg_est_bitrate is None)
+                if self.ffmpeg_est_bitrate is None:
+                    vs.set_bitrate( self.playbackrate.rate )
+
+        if display:
+            sys.stderr.flush()
+
+    #
+    # MovieTransport interface
+    #
+    # WARNING: these methods will be called by other threads than NetworkThread!
+    #
+    def size( self ):
+        if self.videostatus.get_wraparound():
+            return None
+        else:
+            return self.videostatus.selected_movie["size"]
+
+    def read(self,numbytes=None):
+        """ Read at most numbytes from the stream. If numbytes is not given,
+            pieces are returned. The bytes read will be returned, or None in
+            case of an error or end-of-stream. """
+            
+        if not self.curpiece:
+            # curpiece_pos could be set to something other than 0! 
+            # for instance, a seek request sets curpiece_pos but does not
+            # set curpiece.
+
+            piecetup = self.pop()
+            if piecetup is None:
+                return None
+            
+            piecenr,self.curpiece = piecetup
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: %d: popped piece to transport to player" % piecenr
+
+        curpos = self.curpiece_pos
+        left = len(self.curpiece) - curpos
+
+        if numbytes is None:
+            # default on one piece per read
+            numbytes = left
+
+        if left > numbytes:
+            # piece contains enough -- return what was requested
+            data = self.curpiece[curpos:curpos+numbytes]
+
+            self.curpiece_pos += numbytes
+        else:
+            # return remainder of the piece, could be less than numbytes
+            
+            data = self.curpiece[curpos:]
+
+            self.curpiece = ""
+            self.curpiece_pos = 0
+
+        return data
+
+    def start( self, bytepos = 0, force = False ):
+        """ Initialise to start playing at position `bytepos'. """
+        self._event_reporter.create_and_add_event("play", [self.b64_infohash])
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: start:",bytepos
+
+        # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we 
+        # should. Also see if we need the read numbytes here, or that it
+        # is better handled at a higher layer. For live it is currently
+        # done at a higher level, see VariableReadAuthStreamWrapper because
+        # we have to strip the signature. Hence the self.curpiece buffer here
+        # is superfluous. Get rid off it or check if 
+        #
+        #    curpiece[0:piecelen]
+        #
+        # returns curpiece if piecelen has length piecelen == optimize for
+        # piecesized case.
+        #
+        # For VOD seeking we may use the numbytes facility to seek to byte offsets
+        # not just piece offsets.
+        #
+        vs = self.videostatus
+
+        if vs.playing and not force:
+            return
+
+        # lock before changing startpos or any other playing variable
+        self.data_ready.acquire()
+        try:
+            if vs.live_streaming:
+                # Determine where to start playing. There may be several seconds
+                # between starting the download and starting playback, which we'll
+                # want to skip.
+                self.calc_live_startpos( self.max_prebuf_packets, True )
+
+                # override any position request by VLC, we only have live data
+                piece = vs.playback_pos
+                offset = 0
+            else:
+                # Determine piece number and offset
+                if bytepos < vs.first_piecelen:
+                    piece = vs.first_piece
+                    offset = bytepos
+                else:
+                    newbytepos = bytepos - vs.first_piecelen
+
+                    piece  = vs.first_piece + newbytepos / vs.piecelen + 1
+                    offset = newbytepos % vs.piecelen
+
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force),currentThread().getName()
+
+            # Initialise all playing variables
+            self.curpiece = "" # piece currently being popped
+            self.curpiece_pos = offset
+            self.set_pos( piece )
+            self.outbuf = []
+            self.last_pop = time.time()
+            self.reset_bitrate_prediction()
+            vs.playing = True
+            self.playbackrate = Measure( 60 )
+
+        finally:
+            self.data_ready.release()
+
+        # ARNOTODO: start is called by non-NetworkThreads, these following methods
+        # are usually called by NetworkThread.
+        #
+        # We now know that this won't be called until notify_playable() so
+        # perhaps this can be removed?
+        #
+        # CAREFUL: if we use start() for seeking... that's OK. User won't be
+        # able to seek before he got his hands on the stream, so after 
+        # notify_playable()
+        
+        # See what we can do right now
+        self.update_prebuffering()
+        self.refill_buffer()
+
+    def stop( self ):
+        """ Playback is stopped. """
+        self._event_reporter.create_and_add_event("stop", [self.b64_infohash])
+        # self._event_reporter.flush()
+
+        vs = self.videostatus
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: === STOP  = player closed conn === ",currentThread().getName()
+        if not vs.playing:
+            return
+        vs.playing = False
+
+        # clear buffer and notify possible readers
+        self.data_ready.acquire()
+        self.outbuf = []
+        self.last_pop = None
+        vs.prebuffering = False
+        self.data_ready.notify()
+        self.data_ready.release()
+
+    def pause( self, autoresume = False ):
+        """ Pause playback. If `autoresume' is set, playback is expected to be
+        resumed automatically once enough data has arrived. """
+        self._event_reporter.create_and_add_event("pause", [self.b64_infohash])
+
+        vs = self.videostatus
+        if not vs.playing or not vs.pausable:
+            return
+
+        if vs.paused:
+            vs.autoresume = autoresume
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: paused (autoresume: %s)" % (autoresume,)
+
+        vs.paused = True
+        vs.autoresume = autoresume
+        self.paused_at = time.time()
+        #self.reset_bitrate_prediction()
+        self.videoinfo["usercallback"](VODEVENT_PAUSE,{ "autoresume": autoresume })
+
+    def resume( self ):
+        """ Resume paused playback. """
+        self._event_reporter.create_and_add_event("resume", [self.b64_infohash])
+
+        vs = self.videostatus
+
+        if not vs.playing or not vs.paused or not vs.pausable:
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: resumed"
+
+        vs.paused = False
+        vs.autoresume = False
+        self.stat_stalltime += time.time() - self.paused_at
+        self.addtime_bitrate_prediction( time.time() - self.paused_at )
+        self.videoinfo["usercallback"](VODEVENT_RESUME,{})
+
+        self.update_prebuffering()
+        self.refill_buffer()
+
+    def autoresume( self, testfunc = lambda: True ):
+        """ Resumes if testfunc returns True. If not, will test every second. """
+
+        vs = self.videostatus
+
+        if not vs.playing or not vs.paused or not vs.autoresume:
+            return
+
+        if not testfunc():
+            self.rawserver.add_task( lambda: self.autoresume( testfunc ), 1.0 )
+            return
+
+        if DEBUG:
+            print >>sys.stderr,"vod: trans: Resuming, since we can maintain this playback position"
+        self.resume()
+
+    def done( self ):
+        vs = self.videostatus
+
+        if not vs.playing:
+            return True
+
+        if vs.wraparound:
+            return False
+
+        # Arno, 2010-01-08: Adjusted EOF condition to work well with seeking/HTTP range queries
+        return vs.playback_pos == vs.last_piece+1 and len(self.outbuf) == 0 and self.curpiece_pos == 0 and len(self.curpiece) == 0
+
+    def seek(self,pos,whence=os.SEEK_SET):
+        """ Seek to the given position, a number in bytes relative to both
+        the "whence" reference point and the file being played.
+        
+        We currently actually seek at byte level, via the start() method.
+        We support all forms of seeking, including seeking past the current
+        playback pos. Note this may imply needing to prebuffer again or 
+        being paused.
+        
+        vs.playback_pos in NetworkThread domain. Does data_ready lock cover 
+        that? Nope. However, this doesn't appear to be respected in any
+        of the MovieTransport methods, check all.
+        
+        Check
+        * When seeking reset other buffering, e.g. read()'s self.curpiece
+           and higher layers.
+        
+        """
+        vs = self.videostatus
+        length = self.size()
+
+        # lock before changing startpos or any other playing variable
+        self.data_ready.acquire()
+        try:
+            if vs.live_streaming:
+                # Arno, 2010-07-16: Raise error on seek, is clearer to stream user.
+                raise ValueError("seeking not possible for live")
+            if whence == os.SEEK_SET:
+                abspos = pos
+            elif whence == os.SEEK_END:
+                if pos > 0:
+                    raise ValueError("seeking beyond end of stream")
+                else:
+                    abspos = length+pos
+            else: # SEEK_CUR
+                raise ValueError("seeking does not currently support SEEK_CUR")
+            
+            self.stop()
+            self.start(pos)
+        finally:
+            self.data_ready.release()
+
+
+
+    def get_mimetype(self):
+        return self.mimetype
+
+    def set_mimetype(self,mimetype):
+        self.mimetype = mimetype
+        
+    def available(self):
+        self.data_ready.acquire()
+        try:
+            return self.outbuflen    
+        finally:
+            self.data_ready.release()
+        
+        
+    #
+    # End of MovieTransport interface
+    #
+
+    def have_piece(self,piece):
+        return self.piecepicker.has[piece]
+
+    def get_piece(self,piece):
+        """ Returns the data of a certain piece, or None. """
+
+        vs = self.videostatus
+
+        if not self.have_piece( piece ):
+            return None
+
+        begin = 0
+        length = vs.piecelen
+
+        if piece == vs.first_piece:
+            begin = vs.movie_range[0][1]
+            length -= begin
+
+        if piece == vs.last_piece:
+            cutoff = vs.piecelen - (vs.movie_range[1][1] + 1)
+            length -= cutoff
+
+        #print >>sys.stderr,"get_piece",piece
+        data = self.storagewrapper.do_get_piece(piece, begin, length)
+        if data is None:
+            return None
+        return data.tostring()
+
+    def reset_bitrate_prediction(self):
+        self.start_playback = None
+        self.last_playback = None
+        self.history_playback = collections.deque()
+
+    def addtime_bitrate_prediction(self,seconds):
+        if self.start_playback is not None:
+            self.start_playback["local_ts"] += seconds
+
+    def valid_piece_data(self,i,piece):
+        if not piece:
+            return False
+
+        if not self.start_playback or self.authenticator is None:
+            # no check possible
+            return True
+
+        s = self.start_playback
+
+        seqnum = self.authenticator.get_seqnum( piece )
+        source_ts = self.authenticator.get_rtstamp( piece )
+
+        if seqnum < s["absnr"] or source_ts < s["source_ts"]:
+            # old packet???
+            print >>sys.stderr,"vod: trans: **** INVALID PIECE #%s **** seqnum=%d but we started at seqnum=%d, ts=%f but we started at %f" % (i,seqnum,s["absnr"],source_ts,s["source_ts"])
+            return False
+
+        return True
+
+
+    def update_bitrate_prediction(self,i,piece):
+        """ Update the rate prediction given that piece i has just been pushed to the buffer. """
+
+        if self.authenticator is not None:
+            seqnum = self.authenticator.get_seqnum( piece )
+            source_ts = self.authenticator.get_rtstamp( piece )
+        else:
+            seqnum = i
+            source_ts = 0
+
+        d = {
+            "nr": i,
+            "absnr": seqnum,
+            "local_ts": time.time(),
+            "source_ts": source_ts,
+        }
+
+        # record 
+        if self.start_playback is None:
+            self.start_playback = d
+
+        if self.last_playback and self.last_playback["absnr"] > d["absnr"]:
+            # called out of order
+            return
+
+        self.last_playback = d
+
+        # keep a recent history
+        MAX_HIST_LEN = 10*60 # seconds
+
+        self.history_playback.append( d )
+
+        # of at most 10 entries (or minutes if we keep receiving pieces)
+        while source_ts - self.history_playback[0]["source_ts"] > MAX_HIST_LEN:
+            self.history_playback.popleft()
+
+        if DEBUG:
+            vs = self.videostatus
+            first, last = self.history_playback[0], self.history_playback[-1]
+            
+            if first["source_ts"] and first != last:
+                divd = (last["source_ts"] - first["source_ts"])
+                if divd == 0:
+                    divd = 0.000001
+                bitrate = "%.2f kbps" % (8.0 / 1024 * (vs.piecelen - vs.sigsize) * (last["absnr"] - first["absnr"]) / divd,)
+            else:
+                bitrate = "%.2f kbps (external info)" % (8.0 / 1024 * vs.bitrate)
+
+            print >>sys.stderr,"vod: trans: %i: pushed at t=%.2f, age is t=%.2f, bitrate = %s" % (i,d["local_ts"]-self.start_playback["local_ts"],d["source_ts"]-self.start_playback["source_ts"],bitrate)
+
+    def piece_due(self,i):
+        """ Return the time when we expect to have to send a certain piece to the player. For
+        wraparound, future pieces are assumed. """
+
+        if self.start_playback is None:
+            return float(2 ** 31) # end of time
+
+        s = self.start_playback
+        l = self.last_playback
+        vs = self.videostatus
+
+        if not vs.wraparound and i < l["nr"]:
+            # should already have arrived!
+            return time.time()
+
+        # assume at most one wrap-around between l and i
+        piecedist = (i - l["nr"]) % vs.movie_numpieces
+
+        if s["source_ts"]:
+            # ----- we have timing information from the source
+            first, last = self.history_playback[0], self.history_playback[-1]
+
+            if first != last:
+                # we have at least two recent pieces, so can calculate average bitrate. use the recent history
+                # do *not* adjust for sigsize since we don't want the actual video speed but the piece rate
+                bitrate = 1.0 * vs.piecelen * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"])
+            else:
+                # fall-back to bitrate predicted from torrent / ffmpeg
+                bitrate = vs.bitrate
+           
+            # extrapolate with the average bitrate so far
+            return s["local_ts"] + l["source_ts"] - s["source_ts"] + piecedist * vs.piecelen / bitrate - self.PIECE_DUE_SKEW
+        else:
+            # ----- no timing information from pieces, so do old-fashioned methods
+            if vs.live_streaming:
+                # Arno, 2008-11-20: old-fashioned method is well bad,
+                # ignore.
+                return time.time() + 60.0
+            else:
+                i =  piecedist + (l["absnr"] - s["absnr"])
+                
+                if s["nr"] == vs.first_piece:
+                    bytepos = vs.first_piecelen + (i-1) * vs.piecelen
+                else:
+                    bytepos = i * vs.piecelen
+                
+                return s["local_ts"] + bytepos / vs.bitrate - self.PIECE_DUE_SKEW
+            
+
+    def max_buffer_size( self ):
+        vs = self.videostatus
+        if vs.dropping:
+            # live
+            # Arno: 1/2 MB or based on bitrate if that is above 5 Mbps
+            return max( 0*512*1024, self.BUFFER_TIME * vs.bitrate )
+        else:
+            # VOD
+            # boudewijn: 1/4 MB, bitrate, or 2 pieces (wichever is higher)
+            return max(256*1024, vs.piecelen * 2, self.BUFFER_TIME * vs.bitrate)
+
+
+    def refill_buffer( self ):
+        """ Push pieces into the player FIFO when needed and able. This counts as playing
+            the pieces as far as playback_pos is concerned."""
+            
+        # HttpSeed policy: if the buffer is underrun we start asking pieces from http (always available)
+        # till the next refill_buffer call. As soon as refill_buffer is called again we stop asking http
+        # and start again in case the buffer is still underrun. We never stop asking till prefuffering.
+
+        self.data_ready.acquire()
+
+        vs = self.videostatus
+
+        if vs.prebuffering or not vs.playing:
+            self.data_ready.release()
+            return
+
+        if vs.paused:
+            self.data_ready.release()
+            return
+
+        mx = self.max_buffer_size()
+        self.outbuflen = sum( [len(d) for (p,d) in self.outbuf] )
+        now = time.time()
+
+        if self.http_support is not None:
+            if self.outbuflen < ( mx / 4 ):
+                if not self.done(): # TODO : Diego : correct end test?
+                    self.http_support.start_video_support( 0 )
+                else:
+                    self.http_support.stop_video_support() # Download finished
+            else:
+                self.http_support.stop_video_support()
+
+        def buffer_underrun():
+            return self.outbuflen == 0 and self.start_playback and now - self.start_playback["local_ts"] > 1.0
+
+        # Arno, 2010-04-16: STBSPEED: simplified. If we cannot pause, we
+        # just push everything we got to the player this buys us time to 
+        # retrieve more data.
+        if buffer_underrun() and vs.pausable:
+
+            if vs.dropping: # live
+                def sustainable():
+                    # buffer underrun -- check for available pieces
+                    num_future_pieces = 0
+                    for piece in vs.generate_range( vs.download_range() ):
+                        if self.has[piece]:
+                            num_future_pieces += 1
+
+                    goal = mx / 2
+                    # progress
+                    self.prebufprogress = min(1.0,float(num_future_pieces * vs.piecelen) / float(goal))
+                    
+                    # enough future data to fill the buffer
+                    return num_future_pieces * vs.piecelen >= goal
+            else: # vod
+                def sustainable():
+                    # num_immediate_packets = 0
+                    # for piece in vs.generate_range( vs.download_range() ):
+                    #     if self.has[piece]:
+                    #         num_immediate_packets += 1
+                    #     else:
+                    #         break
+                    # else:
+                    #     # progress                                                                              
+                    #     self.prebufprogress = 1.0
+                    #     # completed loop without breaking, so we have everything we need                        
+                    #     return True
+                    #
+                    # # progress                                                                                  
+                    # self.prebufprogress = min(1.0,float(num_immediate_packets) / float(self.max_prebuf_packets))
+                    #
+                    # return num_immediate_packets >= self.max_prebuf_packets
+
+                    self.sustainable_counter += 1
+                    if self.sustainable_counter > 10:
+                        self.sustainable_counter = 0
+                        
+                        high_range_length = vs.get_high_range_length()
+                        have_length = len(filter(lambda n:self.has[n], vs.generate_high_range()))
+
+                        # progress                                                                                  
+                        self.prebufprogress = min(1.0, float(have_length) / max(1, high_range_length))
+
+                        return have_length >= high_range_length
+
+                    else:
+                        num_immediate_packets = 0
+                        high_range_length = vs.get_high_range_length()
+                        # for piece in vs.generate_range(vs.download_range()): 
+                        for piece in vs.generate_high_range(): 
+                            if self.has[piece]:
+                                num_immediate_packets += 1
+                                if num_immediate_packets >= high_range_length:
+                                    break
+                            else:
+                                break
+                        else:
+                            # progress                                                                              
+                            self.prebufprogress = 1.0
+                            # completed loop without breaking, so we have everything we need                        
+                            return True
+
+                        return num_immediate_packets >= high_range_length
+
+            sus = sustainable()
+            if not sus:
+                if DEBUG:
+                    print >>sys.stderr,"vod: trans:                        BUFFER UNDERRUN -- PAUSING"
+                    
+                # TODO : Diego : The Http support level should be tuned according to the sustainability level
+                if self.http_support is not None:
+                    self.http_support.start_video_support( 0 ) # TODO : Diego : still needed? here the buffer is 0 so already asking for support
+                self.pause( autoresume = True )
+                self.autoresume( sustainable )
+
+                # boudewijn: increase the minimum buffer size
+                vs.increase_high_range()
+                        
+                self.data_ready.release()
+                return
+                    
+        def push( i, data ):
+            # force buffer underrun:
+            #if self.start_playback and time.time()-self.start_playback["local_ts"] > 60:
+            #    # hack: dont push after 1 minute
+            #    return
+
+            # push packet into queue
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: %d: pushed l=%d" % (vs.playback_pos,piece)
+
+            # update predictions based on this piece
+            self.update_bitrate_prediction( i, data )
+
+            self.stat_playedpieces += 1
+            self.stat_pieces.set( i, "tobuffer", time.time() )
+                    
+            self.outbuf.append( (vs.playback_pos,data) )
+            self.outbuflen += len(data)
+
+            self.data_ready.notify()
+            self.inc_pos()
+
+        def drop( i ):
+            # drop packet
+            if DEBUG:
+                print >>sys.stderr,"vod: trans: %d: dropped pos=%d; deadline expired %.2f sec ago !!!!!!!!!!!!!!!!!!!!!!" % (piece,vs.playback_pos,time.time()-self.piece_due(i))
+
+            self.stat_droppedpieces += 1
+            self.stat_pieces.complete( i )
+            self.inc_pos()
+
+        for piece in vs.generate_range( vs.download_range() ): 
+            ihavepiece = self.has[piece]
+            forcedrop = False
+
+            # check whether we have room to store it
+            if self.outbuflen > mx:
+                # buffer full
+                break
+
+            # final check for piece validity
+            if ihavepiece:
+                data = self.get_piece( piece )
+                if not self.valid_piece_data( piece, data ):
+                    # I should have the piece, but I don't: WAAAAHH!
+                    forcedrop = True
+                    ihavepiece = False
+
+            if ihavepiece:
+                # have piece - push it into buffer
+                if DEBUG:
+                    print >>sys.stderr,"vod: trans:                        BUFFER STATUS (max %.0f): %.0f kbyte" % (mx/1024.0,self.outbuflen/1024.0)
+
+                # piece found -- add it to the queue
+                push( piece, data )
+            else:
+                # don't have piece, or forced to drop
+                if not vs.dropping and forcedrop:
+                    print >>sys.stderr,"vod: trans: DROPPING INVALID PIECE #%s, even though we shouldn't drop anything." % piece
+                if vs.dropping or forcedrop:
+                    if time.time() >= self.piece_due( piece ) or (vs.pausable and buffer_underrun()) or forcedrop:
+                        # piece is too late or we have an empty buffer (and future data to play, otherwise we would have paused) -- drop packet
+                        drop( piece )
+                    else:
+                        # we have time to wait for the piece and still have data in our buffer -- wait for packet
+                        if DEBUG:
+                            print >>sys.stderr,"vod: trans: %d: due in %.2fs  pos=%d" % (piece,self.piece_due(piece)-time.time(),vs.playback_pos)
+                    break
+                else: # not dropping
+                    if DEBUG:
+                        print >>sys.stderr,"vod: trans: %d: not enough pieces to fill buffer." % (piece)
+                    break
+
+        self.data_ready.release()
+
+    def video_refillbuf_rawtask( self ):
+        self.refill_buffer()
+
+        self.rawserver.add_task( self.video_refillbuf_rawtask, self.REFILL_INTERVAL )
+
+    def pop( self ):
+        self.data_ready.acquire()
+        vs = self.videostatus
+
+        while vs.prebuffering and not self.done():
+            # wait until done prebuffering
+            self.data_ready.wait()
+
+        while not self.outbuf and not self.done():
+            # wait until a piece is available
+            #if DEBUG:
+            #    print >>sys.stderr,"vod: trans: Player waiting for data"
+            self.data_ready.wait()
+
+        if not self.outbuf:
+            piecetup = None
+        else:
+            piecetup = self.outbuf.pop( 0 ) # nr,data pair
+            # Arno, 2010-02-01: Grrrr...
+            self.outbuflen -= len(piecetup[1])
+            self.playbackrate.update_rate( len(piecetup[1]) )
+
+        self.last_pop = time.time()
+
+        lenoutbuf = len(self.outbuf)
+
+        self.data_ready.release()
+
+        if piecetup:
+            self.stat_pieces.set( piecetup[0], "toplayer", time.time() )
+            self.stat_pieces.complete( piecetup[0] )
+
+        # Arno, 2010-02-11: STBSPEEDMERGE: Do we want this for STB?
+        if vs.pausable:
+            # 23/06/09 Boudewijn: because of vlc buffering the self.outbuf
+            # almost always gets emptied. This results in periodic (every
+            # few seconds) pause signals to VLC. 
+            #
+            # To 'solve' this we delay the delivery to VLC based on the
+            # current buffer size. More delay when there is less data
+            # available.
+            #
+            # 24/06/09 Boudewijn: smaller is (much) better for live as
+            # they never get over a certain amount of outstanding
+            # pieces. So this will need to be made dependent. VOD can be
+            # higher than live.
+            if lenoutbuf < 5:
+                if lenoutbuf > 0:
+                    delay = min(0.1, 3 * 0.1 / lenoutbuf)
+                else:
+                    delay = 0.1
+                if DEBUG: print >>sys.stderr, "Vod: Delaying pop to VLC by", delay, "seconds"
+                time.sleep(delay)
+
+        return piecetup
+
+
+    def notify_playable(self):
+        """ Tell user he can play the media, 
+        cf. BaseLib.Core.DownloadConfig.set_vod_event_callback()
+        """
+        #if self.bufferinfo:
+        #    self.bufferinfo.set_playable()
+        #self.progressinf.bufferinfo_updated_callback()
+        
+        # triblerAPI
+        if self.usernotified:
+            return
+        self.usernotified = True
+        self.prebufprogress = 1.0
+        self.playable = True
+        
+        #print >>sys.stderr,"vod: trans: notify_playable: Calling usercallback to tell it we're ready to play",self.videoinfo['usercallback']
+        
+        # MIME type determined normally in LaunchManyCore.network_vod_event_callback
+        # However, allow for recognition by videoanalyser
+        mimetype = self.get_mimetype()
+        complete = self.piecepicker.am_I_complete()
+        if complete:
+            endstream = None
+            filename = self.videoinfo["outpath"]
+        else:
+            stream = MovieTransportStreamWrapper(self)
+            if self.videostatus.live_streaming and self.videostatus.authparams['authmethod'] != LIVE_AUTHMETHOD_NONE:
+                from BaseLib.Core.Video.LiveSourceAuth import AuthStreamWrapper,VariableReadAuthStreamWrapper
+
+                intermedstream = AuthStreamWrapper(stream,self.authenticator)
+                endstream = VariableReadAuthStreamWrapper(intermedstream,self.authenticator.get_piece_length()) 
+            else:
+                endstream = stream
+            filename = None 
+            
+        # Call user callback
+        print >>sys.stderr,"vod:::::::::: trans: notify_playable: calling:",self.vodeventfunc
+        try:
+            self.vodeventfunc( self.videoinfo, VODEVENT_START, {
+                "complete":  complete,
+                "filename":  filename,
+                "mimetype":  mimetype,
+                "stream":    endstream,
+                "length":      self.size(),
+                "bitrate":   self.videostatus.bitrate,
+            } )
+        except:
+            print_exc()
+
+
+    #
+    # Methods for DownloadState to extract status info of VOD mode.
+    #
+    def get_stats(self):
+        """ Returns accumulated statistics. The piece data is cleared after this call to save memory. """
+        """ Called by network thread """
+        
+        s = { "played": self.stat_playedpieces,
+              "late": self.stat_latepieces,
+              "dropped": self.stat_droppedpieces,
+              "stall": self.stat_stalltime,
+              "pos": self.videostatus.playback_pos,
+              "prebuf": self.stat_prebuffertime,
+              "pp": self.piecepicker.stats,
+              "pieces": self.stat_pieces.pop_completed(),
+              "firstpiece":self.videostatus.first_piece,
+              "npieces":self.videostatus.movie_numpieces}
+        return s
+
+    def get_prebuffering_progress(self):
+        """ Called by network thread """
+        return self.prebufprogress
+    
+    def is_playable(self):
+        """ Called by network thread """
+        if not self.playable or self.videostatus.prebuffering:
+            self.playable = (self.prebufprogress == 1.0 and self.enough_buffer())
+        return self.playable
+        
+    def get_playable_after(self):
+        """ Called by network thread """
+        return self.expected_buffering_time()
+    
+    def get_duration(self):
+        return 1.0 * self.videostatus.selected_movie["size"] / self.videostatus.bitrate
+
+    #
+    # Live streaming
+    #
+    def live_invalidate_piece_globally(self, piece, mevirgin=False):
+        """ Make piece disappear from this peer's view of BT world.
+        mevirgen indicates whether we already downloaded stuff,
+        skipping some cleanup if not.
+        """
+        #print >>sys.stderr,"vod: trans: live_invalidate",piece
+                 
+        self.piecepicker.invalidate_piece(piece)
+        self.piecepicker.downloader.live_invalidate(piece, mevirgin)
+
+    def live_invalidate_piece_ranges_globally(self,toinvalidateranges,toinvalidateset):
+        # STBSPEED optimization
+        # v = Set()
+        for s,e in toinvalidateranges:
+            for piece in xrange(s,e+1):
+                # v.add(piece)
+                self.piecepicker.invalidate_piece(piece)
+                
+        """
+        diffleft = v.difference(toinvalidateset)
+        diffright = toinvalidateset.difference(v)
+        print >>sys.stderr,"vod: live_invalidate_piece_ranges_globally: diff: in v",diffleft,"in invset",diffright
+        assert v == toinvalidateset
+        """
+        self.piecepicker.downloader.live_invalidate_ranges(toinvalidateranges,toinvalidateset)
+
+
+    # LIVESOURCEAUTH
+    def piece_from_live_source(self,index,data):
+        if self.authenticator is not None:
+            return self.authenticator.verify(data,index=index)
+        else:
+            return True
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Video/VideoSource.py b/instrumentation/next-share/BaseLib/Core/Video/VideoSource.py
new file mode 100644 (file)
index 0000000..7836aae
--- /dev/null
@@ -0,0 +1,284 @@
+# written by Jan David Mol
+# see LICENSE.txt for license information
+#
+# Represent a source of video (other than a BitTorrent swarm), which can inject
+# pieces into the downloading engine.
+
+# We assume we are the sole originator of these pieces, i.e. none of the pieces
+# injected are already obtained from another source or requested from some peer.
+
+import sys
+from threading import RLock,Thread
+from traceback import print_exc
+from time import sleep
+from BaseLib.Core.BitTornado.BT1.PiecePicker import PiecePicker
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.Video.LiveSourceAuth import NullAuthenticator,ECDSAAuthenticator,RSAAuthenticator
+from BaseLib.Core.Utilities.Crypto import sha
+
+
+DEBUG = True
+
+class SimpleThread(Thread):
+    """ Wraps a thread around a single function. """
+
+    def __init__(self,runfunc):
+        Thread.__init__(self)
+        self.setDaemon(True)
+        self.setName("VideoSourceSimple"+self.getName())
+        self.runfunc = runfunc
+
+    def run(self):
+        self.runfunc()
+
+
+class VideoSourceTransporter:
+    """ Reads data from an external source and turns it into BitTorrent chunks. """
+
+    def __init__(self, stream, bt1download, authconfig,restartstatefilename):
+        self.stream = stream
+        self.bt1download = bt1download
+        self.restartstatefilename = restartstatefilename
+        self.exiting = False
+
+        # shortcuts to the parts we use
+        self.storagewrapper = bt1download.storagewrapper
+        self.picker = bt1download.picker
+        self.rawserver = bt1download.rawserver
+        self.connecter = bt1download.connecter
+        self.fileselector = bt1download.fileselector
+
+        # generic video information
+        self.videostatus = bt1download.videostatus
+
+        # buffer to accumulate video data
+        self.buffer = []
+        self.buflen = 0
+        self.bufferlock = RLock()
+        self.handling_pieces = False
+        self.readlastseqnum = False
+
+        # LIVESOURCEAUTH
+        if authconfig.get_method() == LIVE_AUTHMETHOD_ECDSA:
+            self.authenticator = ECDSAAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces,keypair=authconfig.get_keypair())
+        elif authconfig.get_method() == LIVE_AUTHMETHOD_RSA:
+            self.authenticator = RSAAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces,keypair=authconfig.get_keypair())
+        else:
+            self.authenticator = NullAuthenticator(self.videostatus.piecelen,self.bt1download.len_pieces)
+
+    def start(self):
+        """ Start transporting data. """
+
+        self.input_thread_handle = SimpleThread(self.input_thread)
+        self.input_thread_handle.start()
+
+    def _read(self,length):
+        """ Called by input_thread. """
+        return self.stream.read(length)
+
+    def input_thread(self):
+        """ A thread reading the stream and buffering it. """
+
+        print >>sys.stderr,"VideoSource: started input thread"
+
+        # we can't set the playback position from this thread, so
+        # we assume all pieces are vs.piecelen in size.
+
+        contentbs = self.authenticator.get_content_blocksize()
+        try:
+            while not self.exiting:
+                data = self._read(contentbs)
+                if not data:
+                    break
+
+                if DEBUG:
+                    print >>sys.stderr,"VideoSource: read %d bytes" % len(data)
+
+                self.process_data(data)
+        except IOError:
+            if DEBUG:
+                print_exc()
+
+        self.shutdown()
+
+    def shutdown(self):
+        """ Stop transporting data. """
+
+        print >>sys.stderr,"VideoSource: shutting down"
+
+        if self.exiting:
+            return
+
+        self.exiting = True
+
+        try:
+            self.stream.close()
+        except IOError:
+            # error on closing, nothing we can do
+            pass
+
+    def process_data(self,data):
+        """ Turn data into pieces and queue them for insertion. """
+        """ Called by input thread. """
+
+        vs = self.videostatus
+
+        self.bufferlock.acquire()
+        try:
+            # add data to buffer
+            self.buffer.append( data )
+            self.buflen += len( data )
+
+            if not self.handling_pieces:
+                # signal to network thread that data has arrived
+                self.rawserver.add_task( self.create_pieces )
+                self.handling_pieces = True
+        finally:
+            self.bufferlock.release()
+
+    def create_pieces(self):
+        """ Process the buffer and create pieces when possible.
+        Called by network thread """
+
+        def handle_one_piece():
+            vs = self.videostatus
+
+            # LIVESOURCEAUTH
+            # Arno: make room for source auth info
+            contentbs = self.authenticator.get_content_blocksize()
+            
+            if self.buflen < contentbs:
+                return False
+
+            if len(self.buffer[0]) == contentbs:
+                content = self.buffer[0]
+                del self.buffer[0]
+            else:
+                if DEBUG:
+                    print >>sys.stderr,"VideoSource: JOIN ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
+                buffer = "".join(self.buffer)
+                self.buffer = [buffer[contentbs:]]
+                content = buffer[:contentbs]
+            self.buflen -= contentbs
+            
+            datas = self.authenticator.sign(content)
+
+            piece = "".join(datas)
+            
+            # add new piece
+            self.add_piece(vs.playback_pos,piece)
+
+            # invalidate old piece
+            self.del_piece( vs.live_piece_to_invalidate() )
+
+            try:
+                lastseqnum = self.authenticator.get_source_seqnum()
+                f = open(self.restartstatefilename,"wb")
+                f.write(str(lastseqnum))
+                f.close()
+            except:
+                print_exc()
+
+            # advance pointer
+            vs.inc_playback_pos()
+            
+            return True
+
+        if not self.readlastseqnum:
+            self.readlastseqnum = True
+            try:
+                f = open(self.restartstatefilename,"rb")
+                data = f.read()
+                f.close()
+                lastseqnum = int(data)
+                
+                print >>sys.stderr,"VideoSource: Restarting stream at abs.piece",lastseqnum
+                
+                # Set playback pos of source and absolute piece nr.
+                lastpiecenum = lastseqnum % self.authenticator.get_npieces()
+                self.authenticator.set_source_seqnum(lastseqnum+1L)
+                
+                self.videostatus.set_live_startpos(lastpiecenum)
+                self.videostatus.inc_playback_pos()
+            except:
+                print_exc()
+            
+        self.bufferlock.acquire()
+        try:
+            while handle_one_piece():
+                pass
+
+            self.handling_pieces = False
+        finally:
+            self.bufferlock.release()
+
+    def add_piece(self,index,piece):
+        """ Push one piece into the BitTorrent system. """
+
+        # Modelled after BitTornado.BT1.Downloader.got_piece
+        # We don't need most of that function, since this piece
+        # was never requested from another peer.
+
+        if DEBUG:
+            print >>sys.stderr,"VideoSource: created piece #%d" % index
+            # ECDSA
+            #print >>sys.stderr,"VideoSource: sig",`piece[-64:]`
+            #print >>sys.stderr,"VideoSource: dig",sha(piece[:-64]).hexdigest()
+            # RSA, 768 bits
+            #print >>sys.stderr,"VideoSource: sig",`piece[-96:]`
+            #print >>sys.stderr,"VideoSource: dig",sha(piece[:-112]).hexdigest()
+
+
+        # act as if the piece was requested and just came in
+        # do this in chunks, as StorageWrapper expects to handle
+        # a request for each chunk
+        chunk_size = self.storagewrapper.request_size
+        length = min( len(piece), self.storagewrapper._piecelen(index) )
+        x = 0
+        while x < length:
+            self.storagewrapper.new_request( index )
+            self.storagewrapper.piece_came_in( index, x, [], piece[x:x+chunk_size], min(chunk_size,length-x) )
+            x += chunk_size
+
+        # also notify the piecepicker
+        self.picker.complete( index )
+
+        # notify our neighbours
+        self.connecter.got_piece( index )
+
+    def del_piece(self,piece):
+        if DEBUG:
+            print >>sys.stderr,"VideoSource: del_piece",piece
+        # See Tribler/Core/Video/VideoOnDemand.py, live_invalidate_piece_globally
+        self.picker.invalidate_piece(piece)
+        self.picker.downloader.live_invalidate(piece)
+
+
+class RateLimitedVideoSourceTransporter(VideoSourceTransporter):
+    """ Reads from the stream at a certain byte rate.
+
+        Useful for creating live streams from file. """
+
+    def __init__( self, ratelimit, *args, **kwargs ):
+        """@param ratelimit: maximum rate in bps"""
+        VideoSourceTransporter.__init__( self, *args, **kwargs )
+
+        self.ratelimit = int(ratelimit)
+
+    def _read(self,length):
+        # assumes reads and processing data is instant, so
+        # we know how long to sleep
+        sleep(1.0 * length / self.ratelimit)
+        return VideoSourceTransporter._read(self,length)
+
+
+class PiecePickerSource(PiecePicker):
+    """ A special piece picker for the source, which never
+        picks any pieces. Used to prevent the injection
+        of corrupted pieces at the source. """
+
+    def next(self,*args,**kwargs):
+        # never pick any pieces
+        return None
+
+    
diff --git a/instrumentation/next-share/BaseLib/Core/Video/VideoStatus.py b/instrumentation/next-share/BaseLib/Core/Video/VideoStatus.py
new file mode 100644 (file)
index 0000000..851e044
--- /dev/null
@@ -0,0 +1,371 @@
+# Written by Jan David Mol, Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import time
+from math import ceil
+from sets import Set
+
+from BaseLib.Core.simpledefs import *
+
+# live streaming means wrapping around
+LIVE_WRAPAROUND = True
+
+DEBUG = False
+
+class VideoStatus:
+    """ Info about the selected video and status of the playback. """
+
+    # TODO: thread safety? PiecePicker, MovieSelector and MovieOnDemandTransporter all interface this
+
+    def __init__(self,piecelen,fileinfo,videoinfo,authparams):
+        """
+            piecelen = length of BitTorrent pieces
+            fileinfo = list of (name,length) pairs for all files in the torrent,
+                       in their recorded order
+            videoinfo = videoinfo object from download engine
+        """
+        self.piecelen = piecelen # including signature, if any
+        self.sigsize = 0
+        self.fileinfo = fileinfo
+        self.videoinfo = videoinfo
+        self.authparams = authparams
+
+        # size of high probability set, in seconds (piecepicker varies
+        # between the limit values depending on network performance,
+        # increases and decreases are in the specified step (min,max,step)
+        self.high_prob_curr_time = 10
+        self.high_prob_curr_time_limit = (10, 180, 10)
+
+        # size of high probability set, in pieces (piecepicker
+        # varies between the limit values depending on network
+        # performance, increases and decreases are in the specified step 
+        # (min,max,step).
+        # Arno, 2010-03-10: max 50 pieces too little for 32K piece-sized
+        # VOD streams.
+        #
+        self.high_prob_curr_pieces = 5
+        self.high_prob_curr_pieces_limit = (5, 1800 ,5) # Arno, 2010-03-11: with 32K pieces and 3 mbps we're talking 10 pieces / sec
+
+        # ----- locate selected movie in fileinfo
+        index = self.videoinfo['index']
+        if index == -1:
+            index = 0
+
+        movie_offset = sum( (filesize for (_,filesize) in fileinfo[:index] if filesize) )
+        movie_name = fileinfo[index][0]
+        movie_size = fileinfo[index][1]
+
+        self.selected_movie = {
+          "offset": movie_offset,
+          "name": movie_name,
+          "size": movie_size,
+        }
+
+        # ----- derive generic movie parameters
+        movie_begin = movie_offset
+        movie_end = movie_offset + movie_size - 1
+
+        # movie_range = (bpiece,offset),(epiece,offset), inclusive
+        self.movie_range = ( (movie_begin/piecelen, movie_begin%piecelen),
+                             (movie_end/piecelen, movie_end%piecelen) )
+        self.first_piecelen = piecelen - self.movie_range[0][1]
+        self.last_piecelen  = self.movie_range[1][1]+1 # Arno, 2010-01-08: corrected off by one error
+        self.first_piece = self.movie_range[0][0]
+        self.last_piece = self.movie_range[1][0]
+        self.movie_numpieces = self.last_piece - self.first_piece + 1
+
+        # ----- live streaming settings
+        self.live_streaming = videoinfo['live']
+        self.live_startpos = None
+        self.playback_pos_observers = []
+        self.wraparound = self.live_streaming and LIVE_WRAPAROUND
+        # /8 means -12.5 % ... + 12.5 % = 25 % window
+        self.wraparound_delta = max(4,self.movie_numpieces/8) 
+
+        # ----- generic streaming settings
+        # whether to drop packets that come in too late
+        if self.live_streaming:
+            self.dropping = True  # drop, but we will autopause as well
+        else:
+            self.dropping = False # just wait and produce flawless playback
+
+        if videoinfo['bitrate']:
+            self.set_bitrate( videoinfo['bitrate'] )
+        else:
+            self.set_bitrate( 512*1024/8 ) # default to 512 Kbit/s
+            self.bitrate_set = False
+
+        # ----- set defaults for dynamic positions
+        self.playing = False     # video has started playback
+        self.paused = False      # video is paused
+        self.autoresume = False  # video is paused but will resume automatically
+        self.prebuffering = True # video is prebuffering
+        self.playback_pos = self.first_piece
+
+        self.pausable = (VODEVENT_PAUSE in videoinfo["userevents"]) and (VODEVENT_RESUME in videoinfo["userevents"])
+
+    def add_playback_pos_observer( self, observer ):
+        """ Add a function to be called when the playback position changes. Is called as follows:
+            observer( oldpos, newpos ). In case of initialisation: observer( None, startpos ). """
+        self.playback_pos_observers.append( observer )
+
+    def real_piecelen( self, x ):
+        if x == self.first_piece:
+            return self.first_piecelen
+        elif x == self.last_piece:
+            return self.last_piecelen
+        else:
+            return self.piecelen
+
+    def set_bitrate( self, bitrate ):
+        #print >>sys.stderr,"vodstatus: set_bitrate",bitrate
+        self.bitrate_set = True
+        self.bitrate = bitrate
+        self.sec_per_piece = 1.0 * bitrate / self.piecelen
+
+    def set_live_startpos( self, pos ):
+        if self.wraparound:
+            if self.live_startpos is None:
+                oldrange = self.first_piece,self.last_piece
+            else:
+                oldrange = self.live_get_valid_range()
+            if DEBUG:
+                print >>sys.stderr,"vodstatus: set_live_pos: old",oldrange
+        self.live_startpos = pos
+        self.playback_pos = pos
+        for o in self.playback_pos_observers:
+            o( None, pos )
+
+        if self.wraparound:
+            newrange = self.live_get_valid_range()
+            if DEBUG:
+                print >>sys.stderr,"vodstatus: set_live_pos: new",newrange
+            return self.get_range_diff(oldrange,newrange)
+        else:
+            return (Set(),[])
+
+
+    def get_live_startpos(self):
+        return self.live_startpos
+
+    # the following functions work with absolute piece numbers,
+    # so they all function within the range [first_piece,last_piece]
+
+    # the range of pieces to download is
+    # [playback_pos,numpieces) for normal downloads and
+    # [playback_pos,playback_pos+delta) for wraparound
+
+    def generate_range( self, (f, t) ):
+        if self.wraparound and f > t:
+            for x in xrange( f, self.last_piece+1 ):
+                yield x
+            for x in xrange( self.first_piece, t ):
+                yield x
+        else:
+            for x in xrange( f, t ):
+                yield x
+
+    def dist_range(self, f, t):
+        """ Returns the distance between f and t """
+        if f > t:
+            return self.last_piece-f + t-self.first_piece 
+        else:
+            return t - f
+
+    def in_range( self, f, t, x ):
+        if self.wraparound and f > t:
+            return self.first_piece <= x < t or f <= x <= self.last_piece
+        else:
+            return f <= x < t
+
+    def inc_playback_pos( self ):
+        oldpos = self.playback_pos
+        self.playback_pos += 1
+
+        if self.playback_pos > self.last_piece:
+            if self.wraparound:
+                self.playback_pos = self.first_piece
+            else:
+                # Arno, 2010-01-08: Adjusted EOF condition to work well with seeking/HTTP range queries
+                self.playback_pos = self.last_piece+1
+
+        for o in self.playback_pos_observers:
+            o( oldpos, self.playback_pos )
+
+    def in_download_range( self, x ):
+        if self.wraparound:
+            wraplen = self.playback_pos + self.wraparound_delta - self.last_piece
+            if wraplen > 0:
+                return self.first_piece <= x < self.first_piece + wraplen or self.playback_pos <= x <= self.last_piece
+
+            return self.playback_pos <= x < self.playback_pos + self.wraparound_delta
+        else:
+            return self.first_piece <= x <= self.last_piece
+
+    def in_valid_range(self,piece):
+        if self.live_streaming:
+            if self.live_startpos is None:
+                # Haven't hooked in yet
+                return True
+            else:
+                (begin,end) = self.live_get_valid_range()
+                ret = self.in_range(begin,end,piece)
+                if ret == False:
+                    print >>sys.stderr,"vod: status: NOT in_valid_range:",begin,"<",piece,"<",end
+                return ret
+        else:
+            return self.first_piece <= piece <= self.last_piece
+        
+    def live_get_valid_range(self):
+        begin = self.normalize(self.playback_pos - self.wraparound_delta)
+        end = self.normalize(self.playback_pos + self.wraparound_delta)
+        return (begin,end)
+        
+    def live_piece_to_invalidate(self):
+        #print >>sys.stderr,"vod: live_piece_to_inval:",self.playback_pos,self.wraparound_delta,self.movie_numpieces
+        return self.normalize(self.playback_pos - self.wraparound_delta)
+
+    def get_range_diff(self,oldrange,newrange):
+        """ Returns the diff between oldrange and newrange as a Set.
+        """
+        rlist = []
+        if oldrange[0] == 0 and oldrange[1] == self.movie_numpieces-1:
+            # Optimize for case where there is no playback pos yet, for STB.
+            if newrange[0] < newrange[1]:
+                # 100-500, diff is 0-99 + 501-7200
+                a = (oldrange[0],newrange[0]-1)
+                b = (newrange[1]+1,oldrange[1])
+                #print >>sys.stderr,"get_range_diff: ranges",a,b
+                rlist = [a,b]
+                return (None,rlist)
+                #return Set(range(a[0],a[1]) + range(b[0],b[1]))
+            else:
+                # 500-100, diff is 101-499
+                a = (newrange[1]+1,newrange[0]-1)
+                #print >>sys.stderr,"get_range_diff: range",a
+                rlist = [a]
+                return (None,rlist)
+                #return Set(xrange(a[0],a[1]))
+             
+        oldset = range2set(oldrange,self.movie_numpieces)
+        newset = range2set(newrange,self.movie_numpieces)
+        return (oldset - newset,rlist)
+    
+    def normalize( self, x ):
+        """ Caps or wraps a piece number. """
+
+        if self.first_piece <= x <= self.last_piece:
+            return x
+
+        if self.wraparound:
+            # in Python, -1 % 3 == 2, so modulo will do our work for us if x < first_piece
+            return (x - self.first_piece) % self.movie_numpieces + self.first_piece
+        else:
+            return max( self.first_piece, min( x, self.last_piece ) )
+
+    def time_to_pieces( self, sec ):
+        """ Returns the number of pieces that are needed to hold "sec" seconds of content. """
+
+        # TODO: take first and last piece into account, as they can have a different size
+        return int(ceil(sec * self.sec_per_piece))
+
+    def download_range( self ):
+        """ Returns the range [first,last) of pieces we like to download. """
+
+        first = self.playback_pos
+
+        if self.wraparound:
+            wraplen = first + self.wraparound_delta + 1 - self.last_piece
+            if wraplen > 0:
+                last = self.first_piece + wraplen
+            else:
+                last = first + self.wraparound_delta + 1
+        else:
+            last = self.last_piece + 1
+
+        return (first,last)
+
+    def get_wraparound(self):
+        return self.wraparound
+
+    def increase_high_range(self, factor=1):
+        """
+        Increase the high priority range (effectively enlarging the buffer size)
+        """
+        assert factor > 0
+        self.high_prob_curr_time += factor * self.high_prob_curr_time_limit[2]
+        if self.high_prob_curr_time > self.high_prob_curr_time_limit[1]:
+            self.high_prob_curr_time = self.high_prob_curr_time_limit[1]
+        
+        self.high_prob_curr_pieces += int(factor * self.high_prob_curr_pieces_limit[2])
+        if self.high_prob_curr_pieces > self.high_prob_curr_pieces_limit[1]:
+            self.high_prob_curr_pieces = self.high_prob_curr_pieces_limit[1]
+
+        if DEBUG: print >>sys.stderr, "VideoStatus:increase_high_range", self.high_prob_curr_time, "seconds or", self.high_prob_curr_pieces, "pieces"
+
+    def decrease_high_range(self, factor=1):
+        """
+        Decrease the high priority range (effectively reducing the buffer size)
+        """
+        assert factor > 0
+        self.high_prob_curr_time -= factor * self.high_prob_curr_time_limit[2]
+        if self.high_prob_curr_time < self.high_prob_curr_time_limit[0]:
+            self.high_prob_curr_time = self.high_prob_curr_time_limit[0]
+        
+        self.high_prob_curr_pieces -= int(factor * self.high_prob_curr_pieces_limit[2])
+        if self.high_prob_curr_pieces < self.high_prob_curr_pieces_limit[0]:
+            self.high_prob_curr_pieces = self.high_prob_curr_pieces_limit[0]
+
+        if DEBUG: print >>sys.stderr, "VideoStatus:decrease_high_range", self.high_prob_curr_time, "seconds or", self.high_prob_curr_pieces, "pieces"
+
+    def set_high_range(self, seconds=None, pieces=None):
+        """
+        Set the minimum size of the high priority range. Can be given
+        in seconds of pieces.
+        """
+        if seconds: self.high_prob_curr_time = seconds
+        if pieces: self.high_prob_curr_pieces = pieces
+
+    def get_high_range(self):
+        """
+        Returns (first, last) tuple
+        """
+        first, _ = self.download_range()
+        number_of_pieces = self.time_to_pieces(self.high_prob_curr_time)
+        last = min(self.last_piece,                                              # last piece
+                   1 + first + max(number_of_pieces, self.high_prob_curr_pieces), # based on time OR pieces
+                   1 + first + self.high_prob_curr_pieces_limit[1])               # hard-coded buffer maximum
+        return first, last
+
+    def in_high_range(self, piece):
+        """
+        Returns True when PIECE is in the high priority range.
+        """
+        first, last = self.get_high_range()
+        return self.in_range(first, last, piece)
+
+    def get_range_length(self, first, last):
+        if self.wraparound and first > last:
+            return self.last_piece - first + \
+                   last - self.first_piece
+        else:
+            return last - first
+
+    def get_high_range_length(self):
+        first, last = self.get_high_range()
+        return self.get_range_length(first, last)
+
+    def generate_high_range(self):
+        """
+        Returns the high current high priority range in piece_ids
+        """
+        first, last = self.get_high_range()
+        return self.generate_range((first, last))
+
+def range2set(range,maxrange):    
+    if range[0] <= range[1]:
+        set = Set(xrange(range[0],range[1]+1))
+    else:
+        set = Set(xrange(range[0],maxrange)) | Set(xrange(0,range[1]+1))
+    return set
diff --git a/instrumentation/next-share/BaseLib/Core/Video/__init__.py b/instrumentation/next-share/BaseLib/Core/Video/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/__init__.py b/instrumentation/next-share/BaseLib/Core/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Core/defaults.py b/instrumentation/next-share/BaseLib/Core/defaults.py
new file mode 100644 (file)
index 0000000..45fd9c9
--- /dev/null
@@ -0,0 +1,246 @@
+# Written by Arno Bakker and Bram Cohen, George Milescu 
+# see LICENSE.txt for license information
+""" Default values for all configurarable parameters of the Core"""
+#
+# For an explanation of each parameter, see SessionConfig/DownloadConfig.py
+#
+# defaults with comments behind them are not user-setable via the 
+# *ConfigInterface classes, because they are not currently implemented (IPv6)
+# or we only use them internally.
+#
+# WARNING:  
+#    As we have release Tribler 4.5.0 you must now take into account that
+#    people have stored versions of these params on their disk. Make sure 
+#    you change the version number of the structure and provide upgrade code
+#    such that your code won't barf because we loaded an older version from
+#    disk that does not have your new fields.
+#
+import sys
+
+from simpledefs import *
+
+DEFAULTPORT=7760
+
+#
+# Session opts
+#
+# History: 
+#  Version 2: as released in Tribler 4.5.0
+#
+SESSDEFAULTS_VERSION = 2
+sessdefaults = {}
+sessdefaults['version'] = SESSDEFAULTS_VERSION
+sessdefaults['state_dir'] = None
+sessdefaults['install_dir'] = u'.'
+sessdefaults['ip'] = ''
+sessdefaults['minport'] = DEFAULTPORT
+sessdefaults['maxport'] = DEFAULTPORT
+sessdefaults['random_port'] = 1
+sessdefaults['bind'] = []
+sessdefaults['ipv6_enabled'] = 0      # allow the client to connect to peers via IPv6 (currently not supported)
+sessdefaults['ipv6_binds_v4'] = None  # set if an IPv6 server socket won't also field IPv4 connections (default = set automatically)
+sessdefaults['upnp_nat_access'] = UPNPMODE_UNIVERSAL_DIRECT
+sessdefaults['timeout'] = 300.0
+sessdefaults['timeout_check_interval'] = 60.0
+sessdefaults['eckeypairfilename'] = None
+sessdefaults['megacache'] = True
+sessdefaults['overlay'] = True
+sessdefaults['crawler'] = True
+sessdefaults['buddycast'] = True
+sessdefaults['magnetlink'] = True
+sessdefaults['start_recommender'] = True
+sessdefaults['download_help'] = True
+sessdefaults['torrent_collecting'] = True
+sessdefaults['superpeer'] = False
+sessdefaults['overlay_log'] = None
+sessdefaults['buddycast_interval'] = 15
+sessdefaults['buddycast_max_peers'] = 2500  # max number of peers to use for recommender. 
+sessdefaults['torrent_collecting_max_torrents'] = 50000
+sessdefaults['torrent_collecting_dir'] = None
+sessdefaults['torrent_collecting_rate'] = 5 * 10
+sessdefaults['torrent_checking'] = 1
+sessdefaults['torrent_checking_period'] = 31 #will be changed to min(max(86400/ntorrents, 15), 300) at runtime
+sessdefaults['dialback'] = True
+sessdefaults['dialback_active'] = True  # do active discovery (needed to disable for testing only) (0 = disabled)
+sessdefaults['dialback_trust_superpeers'] = True # trust superpeer replies (needed to disable for testing only) (0 = disabled)
+sessdefaults['socnet'] = True
+sessdefaults['rquery'] = True
+sessdefaults['stop_collecting_threshold'] = 200
+sessdefaults['internaltracker'] = True
+sessdefaults['nickname'] = '__default_name__' # is replaced with hostname in LaunchManyCore.py
+sessdefaults['mugshot'] = None
+sessdefaults['videoanalyserpath'] = None
+sessdefaults['overlay_max_message_length'] = 2 ** 23
+sessdefaults['download_help_dir'] = None
+sessdefaults['bartercast'] = True
+sessdefaults['superpeer_file'] = None
+sessdefaults['crawler_file'] = None
+sessdefaults['buddycast_collecting_solution'] = BCCOLPOLICY_SIMPLE
+sessdefaults['peer_icon_path'] = None
+sessdefaults['stop_collecting_threshold'] = 200
+sessdefaults['coopdlconfig'] = None
+sessdefaults['family_filter'] = True
+sessdefaults['nat_detect'] = True
+sessdefaults['puncturing_internal_port'] = 6700
+sessdefaults['stun_servers'] = [('stun1.tribler.org',6701),('stun2.tribler.org',6702)]
+sessdefaults['pingback_servers'] = [('pingback.tribler.org',6703),('pingback2.tribler.org',6703)]
+sessdefaults['live_aux_seeders'] = []
+sessdefaults['mainline_dht'] = True
+sessdefaults['multicast_local_peer_discovery'] = True
+sessdefaults['votecast_recent_votes']=25
+sessdefaults['votecast_random_votes']=25
+sessdefaults['channelcast_recent_own_subscriptions'] = 13
+sessdefaults['channelcast_random_own_subscriptions'] = 12 
+
+# 14-04-2010, Andrea: settings to limit the results for a remote query in channels
+# if there are too many results the gui got freezed for a considerable amount of
+# time
+sessdefaults['max_channel_query_results'] = 25
+
+# 13-04-2010 Andrea, config for subtitle dissemination subsytem
+sessdefaults['subtitles_collecting'] = False
+sessdefaults['subtitles_collecting_dir'] = None
+sessdefaults['subtitles_upload_rate'] = 1024 # KB/s 
+
+# ProxyService global config
+sessdefaults['proxyservice_status'] = PROXYSERVICE_OFF
+
+trackerdefaults = {}
+trackerdefaults['tracker_url'] = None
+trackerdefaults['tracker_dfile'] = None
+trackerdefaults['tracker_dfile_format'] = ITRACKDBFORMAT_PICKLE
+trackerdefaults['tracker_socket_timeout'] = 15
+trackerdefaults['tracker_save_dfile_interval'] = 300
+trackerdefaults['tracker_timeout_downloaders_interval'] = 2700
+trackerdefaults['tracker_reannounce_interval'] = 1800
+trackerdefaults['tracker_response_size'] = 50
+trackerdefaults['tracker_timeout_check_interval'] = 5
+trackerdefaults['tracker_nat_check'] = 3
+trackerdefaults['tracker_log_nat_checks'] = 0
+trackerdefaults['tracker_min_time_between_log_flushes'] = 3.0
+trackerdefaults['tracker_min_time_between_cache_refreshes'] = 600.0
+trackerdefaults['tracker_allowed_dir'] = None
+trackerdefaults['tracker_allowed_list'] = ''
+trackerdefaults['tracker_allowed_controls'] = 0
+trackerdefaults['tracker_multitracker_enabled'] = 0
+trackerdefaults['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_AUTODETECT
+trackerdefaults['tracker_multitracker_reannounce_interval'] = 120
+trackerdefaults['tracker_multitracker_maxpeers'] = 20
+trackerdefaults['tracker_aggregate_forward'] = [None,None]
+trackerdefaults['tracker_aggregator'] = 0
+trackerdefaults['tracker_hupmonitor'] = 0
+trackerdefaults['tracker_multitracker_http_timeout'] = 60
+trackerdefaults['tracker_parse_dir_interval'] = 60
+trackerdefaults['tracker_show_infopage'] = 1
+trackerdefaults['tracker_infopage_redirect'] = None
+trackerdefaults['tracker_show_names'] = 1
+trackerdefaults['tracker_favicon'] = None
+trackerdefaults['tracker_allowed_ips'] = []
+trackerdefaults['tracker_banned_ips'] = []
+trackerdefaults['tracker_only_local_override_ip'] = ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK
+
+trackerdefaults['tracker_logfile'] = None
+trackerdefaults['tracker_allow_get'] = 1
+trackerdefaults['tracker_keep_dead'] = 0
+trackerdefaults['tracker_scrape_allowed'] = ITRACKSCRAPE_ALLOW_FULL
+
+sessdefaults.update(trackerdefaults)
+
+#
+# BT per download opts
+#
+# History: 
+#  Version 2: as released in Tribler 4.5.0
+#  Version 3: 
+DLDEFAULTS_VERSION = 3
+dldefaults = {}
+dldefaults['version'] = DLDEFAULTS_VERSION
+dldefaults['max_uploads'] = 7
+dldefaults['keepalive_interval'] = 120.0
+dldefaults['download_slice_size'] = 2 ** 14
+dldefaults['upload_unit_size'] = 1460
+dldefaults['request_backlog'] = 10
+dldefaults['max_message_length'] = 2 ** 23
+dldefaults['selector_enabled'] = 1  # whether to enable the file selector and fast resume function. Arno, 2009-02-9: Must be on for checkpoints to work. 
+dldefaults['expire_cache_data'] = 10 # the number of days after which you wish to expire old cache data (0 = disabled)
+dldefaults['priority'] = []  # a list of file priorities separated by commas, must be one per file, 0 = highest, 1 = normal, 2 = lowest, -1 = download disabled'
+dldefaults['saveas'] = None # Set to get_default_destdir()
+dldefaults['max_slice_length'] = 2 ** 17
+dldefaults['max_rate_period'] = 20.0
+dldefaults['upload_rate_fudge'] = 5.0
+dldefaults['tcp_ack_fudge'] = 0.03
+dldefaults['rerequest_interval'] = 300
+dldefaults['min_peers'] = 20
+dldefaults['http_timeout'] = 60
+dldefaults['max_initiate'] = 40
+dldefaults['check_hashes'] = 1
+dldefaults['max_upload_rate'] = 0
+dldefaults['max_download_rate'] = 0
+# Arno, 2009-12-11: Sparse as default reduces CPU usage. Previously this was
+# also set, but in DownloadConfig.__init__
+if sys.platform == 'win32':
+    dldefaults['alloc_type'] = DISKALLOC_NORMAL
+else:
+    dldefaults['alloc_type'] = DISKALLOC_SPARSE
+dldefaults['alloc_rate'] = 2.0
+dldefaults['buffer_reads'] = 1
+dldefaults['write_buffer_size'] = 4
+dldefaults['breakup_seed_bitfield'] = 1
+dldefaults['snub_time'] = 30.0
+dldefaults['rarest_first_cutoff'] = 2
+dldefaults['rarest_first_priority_cutoff'] = 5
+dldefaults['min_uploads'] = 4
+dldefaults['max_files_open'] = 50
+dldefaults['round_robin_period'] = 30
+dldefaults['super_seeder'] = 0
+dldefaults['security'] = 1
+dldefaults['max_connections'] = 0
+dldefaults['auto_kick'] = 1
+dldefaults['double_check'] = 0
+dldefaults['triple_check'] = 0
+dldefaults['lock_files'] = 0
+dldefaults['lock_while_reading'] = 0
+dldefaults['auto_flush'] = 0
+#
+# Tribler per-download opts
+#
+dldefaults['coopdl_role'] = COOPDL_ROLE_COORDINATOR
+dldefaults['coopdl_coordinator_permid'] = ''
+dldefaults['proxy_mode'] = PROXY_MODE_OFF
+dldefaults['max_helpers'] = 10
+dldefaults['exclude_ips'] = ''
+dldefaults['mode'] = 0
+dldefaults['vod_usercallback'] = None
+dldefaults['vod_userevents'] = []
+dldefaults['video_source'] = None
+dldefaults['video_ratelimit'] = 0
+dldefaults['video_source_authconfig'] = None
+dldefaults['selected_files'] = []
+dldefaults['ut_pex_max_addrs_from_peer'] = 16
+# Version 3:
+dldefaults['same_nat_try_internal'] = 0
+dldefaults['unchoke_bias_for_internal'] = 0
+
+tdefdictdefaults = {}
+tdefdictdefaults['comment'] = None
+tdefdictdefaults['created by'] = None
+tdefdictdefaults['announce'] = None
+tdefdictdefaults['announce-list'] = None
+tdefdictdefaults['nodes'] = None # mainline DHT
+tdefdictdefaults['httpseeds'] = None
+tdefdictdefaults['url-list'] = None
+tdefdictdefaults['encoding'] = None
+
+tdefmetadefaults = {}
+tdefmetadefaults['version'] = 1
+tdefmetadefaults['piece length'] = 0
+tdefmetadefaults['makehash_md5'] = 0
+tdefmetadefaults['makehash_crc32'] = 0
+tdefmetadefaults['makehash_sha1'] = 0
+tdefmetadefaults['createmerkletorrent'] = 0
+tdefmetadefaults['torrentsigkeypairfilename'] = None
+tdefmetadefaults['thumb'] = None # JPEG data
+
+tdefdefaults = {}
+tdefdefaults.update(tdefdictdefaults)
+tdefdefaults.update(tdefmetadefaults)
diff --git a/instrumentation/next-share/BaseLib/Core/exceptions.py b/instrumentation/next-share/BaseLib/Core/exceptions.py
new file mode 100644 (file)
index 0000000..a704380
--- /dev/null
@@ -0,0 +1,82 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+""" The Tribler-specifc Exceptions the Core may throw. """
+
+#
+# Exceptions
+#
+class TriblerException(Exception):
+    """ Super class for all Tribler-specific Exceptions the Tribler Core 
+    throws.
+    """
+    def __init__(self,msg=None):
+        Exception.__init__(self,msg)
+
+    def __str__(self):
+        return str(self.__class__)+': '+Exception.__str__(self)
+
+class OperationNotPossibleAtRuntimeException(TriblerException):
+    """ The requested operation is not possible after the Session or Download
+    has been started.
+    """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+class OperationNotPossibleWhenStoppedException(TriblerException):
+    """ The requested operation is not possible when the Download
+    has been stopped.
+    """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+class OperationNotEnabledByConfigurationException(TriblerException):
+    """ The requested operation is not possible with the current
+    Session/Download configuration.
+    """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+    
+class NotYetImplementedException(TriblerException):
+    """ The requested operation is not yet fully implemented. """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+
+class DuplicateDownloadException(TriblerException):
+    """ The Download already exists in the Session, i.e., a Download for
+    a torrent with the same infohash already exists. """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+class VODNoFileSelectedInMultifileTorrentException(TriblerException):
+    """ Attempt to download a torrent in Video-On-Demand mode that contains
+    multiple video files, but without specifying which one to play. """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+class LiveTorrentRequiresUsercallbackException(TriblerException):
+    """ Attempt to download a live-stream torrent without specifying a 
+    callback function to call when the stream is ready to play. 
+    Use set_video_event_callback(usercallback) to correct this problem. """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+class TorrentDefNotFinalizedException(TriblerException):
+    """ Attempt to start downloading a torrent from a torrent definition
+    that was not finalized. """
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+
+
+class TriblerLegacyException(TriblerException):
+    """ Wrapper around fatal errors that happen in the download engine,
+    but which are not reported as Exception objects for legacy reasons,
+    just as text (often containing a stringified Exception).
+    Will be phased out.
+    """
+    
+    def __init__(self,msg=None):
+        TriblerException.__init__(self,msg)
+    
diff --git a/instrumentation/next-share/BaseLib/Core/osutils.py b/instrumentation/next-share/BaseLib/Core/osutils.py
new file mode 100644 (file)
index 0000000..80dd50c
--- /dev/null
@@ -0,0 +1,352 @@
+# Written by Arno Bakker, ABC authors
+# see LICENSE.txt for license information
+"""
+OS-independent utility functions
+
+get_home_dir()      : Returns CSIDL_APPDATA i.e. App data directory on win32
+get_picture_dir()
+getfreespace(path)
+"""
+    
+#
+# Multiple methods for getting free diskspace
+#
+import sys
+import os
+import time
+import binascii
+
+if sys.platform == "win32":
+    try:
+        from win32com.shell import shell
+        def get_home_dir():
+            # http://www.mvps.org/access/api/api0054.htm
+            # CSIDL_PROFILE = &H28
+            # C:\Documents and Settings\username
+            return shell.SHGetSpecialFolderPath(0, 0x28)
+
+        def get_appstate_dir():
+            # http://www.mvps.org/access/api/api0054.htm
+            # CSIDL_APPDATA = &H1A
+            # C:\Documents and Settings\username\Application Data
+            return shell.SHGetSpecialFolderPath(0, 0x1a)
+
+        def get_picture_dir():
+            # http://www.mvps.org/access/api/api0054.htm
+            # CSIDL_MYPICTURES = &H27
+            # C:\Documents and Settings\username\My Documents\My Pictures
+            return shell.SHGetSpecialFolderPath(0, 0x27)
+
+        def get_desktop_dir():
+            # http://www.mvps.org/access/api/api0054.htm
+            # CSIDL_DESKTOPDIRECTORY = &H10
+            # C:\Documents and Settings\username\Desktop
+            return shell.SHGetSpecialFolderPath(0, 0x10)
+
+    except ImportError:
+        def get_home_dir():
+            try:
+                # when there are special unicode characters in the username,
+                # the following will fail on python 2.4, 2.5, 2.x this will
+                # always succeed on python 3.x
+                return os.path.expanduser(u"~")
+            except Exception, unicode_error:
+                pass
+
+            # non-unicode home
+            home = os.path.expanduser("~")
+            head, tail = os.path.split(home)
+
+            dirs = os.listdir(head)
+            udirs = os.listdir(unicode(head))
+
+            # the character set may be different, but the string length is
+            # still the same
+            islen = lambda dir: len(dir) == len(tail)
+            dirs = filter(islen, dirs)
+            udirs = filter(islen, udirs)
+            if len(dirs) == 1 and len(udirs) == 1:
+                return os.path.join(head, udirs[0])
+
+            # remove all dirs that are equal in unicode and non-unicode. we
+            # know that we don't need these dirs because the initial
+            # expandusers would not have failed on them
+            for dir in dirs[:]:
+                if dir in udirs:
+                    dirs.remove(dir)
+                    udirs.remove(dir)
+            if len(dirs) == 1 and len(udirs) == 1:
+                return os.path.join(head, udirs[0])
+
+            # assume that the user has write access in her own
+            # directory. therefore we can filter out any non-writable
+            # directories
+            writable_udir = [udir for udir in udirs if os.access(udir, os.W_OK)]
+            if len(writable_udir) == 1:
+                return os.path.join(head, writable_udir[0])
+
+            # fallback: assume that the order of entries in dirs is the same
+            # as in udirs
+            for dir, udir in zip(dirs, udirs):
+                if dir == tail:
+                    return os.path.join(head, udir)
+
+            # failure
+            raise unicode_error
+
+        def get_appstate_dir():
+            homedir = get_home_dir()
+            # 5 = XP, 6 = Vista
+            # [E1101] Module 'sys' has no 'getwindowsversion' member
+            # pylint: disable-msg=E1101
+            winversion = sys.getwindowsversion()
+            # pylint: enable-msg=E1101
+            if winversion[0] == 6:
+                appdir = os.path.join(homedir,u"AppData",u"Roaming")
+            else:
+                appdir = os.path.join(homedir,u"Application Data")
+            return appdir
+
+        def get_picture_dir():
+            return get_home_dir()
+
+        def get_desktop_dir():
+            home = get_home_dir()
+            return os.path.join(home,u"Desktop")
+            
+else:
+    # linux or darwin (mac)
+    def get_home_dir():
+        return os.path.expanduser(u"~")
+
+    def get_appstate_dir():
+        return get_home_dir()
+
+    def get_picture_dir():
+        return get_desktop_dir()
+
+    def get_desktop_dir():
+        home = get_home_dir()
+        desktop = os.path.join(home, "Desktop")
+        if os.path.exists(desktop):
+            return desktop
+        else:
+            return home
+
+if sys.version.startswith("2.4"):
+    os.SEEK_SET = 0
+    os.SEEK_CUR = 1
+    os.SEEK_END = 2
+
+try:
+    # Unix
+    from os import statvfs
+    import statvfs
+    def getfreespace(path):
+        s = os.statvfs(path.encode("utf-8"))
+        size = s[statvfs.F_BAVAIL] * long(s[statvfs.F_BSIZE])
+        return size
+except:
+    if (sys.platform == 'win32'):
+        try:
+            # Windows if win32all extensions are installed
+            import win32file
+            try:
+                # Win95 OSR2 and up
+                # Arno: this code was totally broken as the method returns
+                # a list of values indicating 1. free space for the user,
+                # 2. total space for the user and 3. total free space, so
+                # not a single value.
+                win32file.GetDiskFreeSpaceEx(".")
+                def getfreespace(path):
+                    # Boudewijn: the win32file module is NOT unicode
+                    # safe! We will try directories further up the
+                    # directory tree in the hopes of getting a path on
+                    # the same disk without the unicode...
+                    while True:
+                        try:
+                            return win32file.GetDiskFreeSpaceEx(path)[0]
+                        except:
+                            path = os.path.split(path)[0]
+                            if not path:
+                                raise
+            except:                
+                # Original Win95
+                # (2GB limit on partition size, so this should be
+                #  accurate except for mapped network drives)
+                # Arno: see http://aspn.activestate.com/ASPN/docs/ActivePython/2.4/pywin32/win32file__GetDiskFreeSpace_meth.html
+                def getfreespace(path):
+                    [spc, bps, nfc, tnc] = win32file.GetDiskFreeSpace(path)
+                    return long(nfc) * long(spc) * long(bps)
+                    
+        except ImportError:
+            # Windows if win32all extensions aren't installed
+            # (parse the output from the dir command)
+            def getfreespace(path):
+                try:
+                    mystdin, mystdout = os.popen2(u"dir " + u"\"" + path + u"\"")
+                    
+                    sizestring = "0"
+                
+                    for line in mystdout:
+                        line = line.strip()
+                        # Arno: FIXME: this won't work on non-English Windows, as reported by the IRT
+                        index = line.rfind("bytes free")
+                        if index > -1 and line[index:] == "bytes free":
+                            parts = line.split(" ")
+                            if len(parts) > 3:
+                                part = parts[-3]
+                                part = part.replace(",", "")
+                                sizestring = part
+                                break
+
+                    size = long(sizestring)                    
+                    
+                    if size == 0L:
+                        print >>sys.stderr,"getfreespace: can't determine freespace of ",path
+                        for line in mystdout:
+                            print >>sys.stderr,line
+                            
+                        size = 2**80L
+                except:
+                    # If in doubt, just return something really large
+                    # (1 yottabyte)
+                    size = 2**80L
+                
+                return size
+    else:
+        # Any other cases
+        # TODO: support for Mac? (will statvfs work with OS X?)
+        def getfreespace(path):
+            # If in doubt, just return something really large
+            # (1 yottabyte)
+            return 2**80L
+
+
+invalidwinfilenamechars = ''
+for i in range(32):
+    invalidwinfilenamechars += chr(i)
+invalidwinfilenamechars += '"*/:<>?\\|'
+invalidlinuxfilenamechars = '/'
+
+def fix_filebasename(name, unit=False, maxlen=255):
+    """ Check if str is a valid Windows file name (or unit name if unit is true)
+     * If the filename isn't valid: returns a corrected name
+     * If the filename is valid: returns the filename
+    """
+    if unit and (len(name) != 2 or name[1] != ':'):
+        return 'c:'
+    if not name or name == '.' or name == '..':
+        return '_'
+    
+    if unit:
+        name = name[0]
+    fixed = False
+    if len(name) > maxlen:
+        name = name[:maxlen]
+        fixed = True
+
+    fixedname = ''
+    spaces = 0
+    for c in name:
+        if sys.platform.startswith('win'):
+            invalidchars = invalidwinfilenamechars
+        else:
+            invalidchars = invalidlinuxfilenamechars
+             
+        if c in invalidchars:
+            fixedname += '_'
+            fixed = True
+        else:
+            fixedname += c
+            if c == ' ':
+                spaces += 1
+    
+    file_dir, basename = os.path.split(fixedname)
+    while file_dir != '':
+        fixedname = basename
+        file_dir, basename = os.path.split(fixedname)
+        fixed = True
+    
+    if fixedname == '':
+        fixedname = '_'
+        fixed = True
+        
+    if fixed:
+        return last_minute_filename_clean(fixedname)
+    elif spaces == len(name):
+        # contains only spaces
+        return '_'
+    else:
+        return last_minute_filename_clean(name)
+    
+def last_minute_filename_clean(name):
+    s = name.strip() # Arno: remove initial or ending space
+    if sys.platform == 'win32' and s.endswith('..'):
+        s = s[:-2]
+    return s
+
+
+def get_readable_torrent_name(infohash, raw_filename):
+    # return name__infohash.torrent
+    hex_infohash = binascii.hexlify(infohash)
+    suffix = '__' + hex_infohash + '.torrent'
+    save_name = ' ' + fix_filebasename(raw_filename, maxlen=254-len(suffix)) + suffix
+    # use a space ahead to distinguish from previous collected torrents
+    return save_name
+
+
+if sys.platform == "win32":
+    import win32pdh
+    
+    def getcpuload():
+        """ Returns total CPU usage as fraction (0..1).
+        Warning: side-effect: sleeps for 0.1 second to do diff """
+        #mempath = win32pdh.MakeCounterPath((None, "Memory", None, None, -1, "Available MBytes"))
+        cpupath = win32pdh.MakeCounterPath((None, "Processor", "_Total", None, -1, "% Processor Time"))
+        query = win32pdh.OpenQuery(None, 0)
+        counter = win32pdh.AddCounter(query, cpupath, 0)
+        
+        win32pdh.CollectQueryData(query)
+        # Collect must be called twice for CPU, see http://support.microsoft.com/kb/262938
+        time.sleep(0.1)
+        win32pdh.CollectQueryData(query)
+            
+        status, value = win32pdh.GetFormattedCounterValue(counter,win32pdh.PDH_FMT_LONG)
+             
+        return float(value)/100.0
+    
+elif sys.platform == "linux2":
+    def read_proc_stat():
+        """ Read idle and total CPU time counters from /proc/stat, see
+        man proc """
+        f = open("/proc/stat","rb")
+        try:
+            while True:
+                line = f.readline()
+                if len(line) == 0:
+                    break
+                if line.startswith("cpu "): # note space
+                    words = line.split()
+                    total = 0
+                    for i in range(1,5):
+                        total += int(words[i])
+                    idle = int(words[4])
+                    return (total,idle)
+        finally:
+            f.close()
+    
+    
+    def getcpuload():
+        """ Returns total CPU usage as fraction (0..1).
+        Warning: side-effect: sleeps for 0.1 second to do diff """
+        (total1,idle1) = read_proc_stat()
+        time.sleep(0.1)
+        (total2,idle2) = read_proc_stat()
+        total = total2 - total1
+        idle = idle2 - idle1
+        return 1.0-(float(idle))/float(total)
+else:
+    # Mac
+    def getupload():
+        raise ValueError("Not yet implemented")
diff --git a/instrumentation/next-share/BaseLib/Core/simpledefs.py b/instrumentation/next-share/BaseLib/Core/simpledefs.py
new file mode 100644 (file)
index 0000000..049060a
--- /dev/null
@@ -0,0 +1,177 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+""" Simple definitions for the Tribler Core. """
+import os
+
+DLSTATUS_ALLOCATING_DISKSPACE = 0 # TODO: make sure this get set when in this alloc mode
+DLSTATUS_WAITING4HASHCHECK = 1
+DLSTATUS_HASHCHECKING = 2
+DLSTATUS_DOWNLOADING = 3
+DLSTATUS_SEEDING = 4
+DLSTATUS_STOPPED = 5
+DLSTATUS_STOPPED_ON_ERROR = 6
+DLSTATUS_REPEXING = 7
+
+dlstatus_strings = ['DLSTATUS_ALLOCATING_DISKSPACE',
+'DLSTATUS_WAITING4HASHCHECK', 
+'DLSTATUS_HASHCHECKING',
+'DLSTATUS_DOWNLOADING',
+'DLSTATUS_SEEDING',
+'DLSTATUS_STOPPED',
+'DLSTATUS_STOPPED_ON_ERROR',
+'DLSTATUS_REPEXING']
+
+UPLOAD = 'up'
+DOWNLOAD = 'down'
+
+DLMODE_NORMAL = 0
+DLMODE_VOD = 1
+DLMODE_SVC = 2 # Ric: added download mode for Scalable Video Coding (SVC) 
+
+PERSISTENTSTATE_CURRENTVERSION = 3
+"""
+V1 = SwarmPlayer 1.0.0
+V2 = Tribler 4.5.0: SessionConfig: Added NAT fields
+V3 = SessionConfig: Added multicast_local_peer_discovery, 
+     Removed rss_reload_frequency + rss_check_frequency.
+
+For details see API.py
+"""
+
+STATEDIR_ITRACKER_DIR = 'itracker'
+STATEDIR_DLPSTATE_DIR = 'dlcheckpoints'
+STATEDIR_PEERICON_DIR = 'icons'
+STATEDIR_TORRENTCOLL_DIR = 'collected_torrent_files'
+
+# 13-04-2010, Andrea: subtitles collecting dir default
+STATEDIR_SUBSCOLL_DIR = 'collected_subtitles_files'
+STATEDIR_SESSCONFIG = 'sessconfig.pickle'
+STATEDIR_SEEDINGMANAGER_DIR = 'seeding_manager_stats'
+DESTDIR_COOPDOWNLOAD = 'downloadhelp' 
+
+# For observer/callback mechanism, see Session.add_observer()
+   
+# subjects
+NTFY_PEERS = 'peers'
+NTFY_TORRENTS = 'torrents'
+NTFY_PREFERENCES = 'preferences'
+NTFY_SUPERPEERS = 'superpeers' # use NTFY_PEERS !!
+NTFY_FRIENDS = 'friends'       # use NTFY_PEERS !!
+NTFY_MYPREFERENCES = 'mypreferences' # currently not observable
+NTFY_BARTERCAST = 'bartercast' # currently not observable
+NTFY_MYINFO = 'myinfo'
+NTFY_SEEDINGSTATS = 'seedingstats'
+NTFY_SEEDINGSTATSSETTINGS = 'seedingstatssettings'
+NTFY_VOTECAST = 'votecast'
+NTFY_CHANNELCAST = 'channelcast'
+# this corresponds to the event of a peer advertising
+# new rich metadata available (for now just subtitles)
+NTFY_RICH_METADATA = 'rich_metadata'
+# this corresponds to the event of a subtitle file (the actual .srt)
+# received from a remote peer
+NTFY_SUBTITLE_CONTENTS = 'subtitles_in'
+NTFY_SEARCH = 'clicklogsearch' # BuddyCast 4
+NTFY_TERM= 'clicklogterm'
+
+
+# non data handler subjects
+NTFY_ACTIVITIES = 'activities' # an activity was set (peer met/dns resolved)
+NTFY_REACHABLE = 'reachable'   # the Session is reachable from the Internet
+
+# changeTypes
+NTFY_UPDATE = 'update'         # data is updated
+NTFY_INSERT = 'insert'         # new data is inserted
+NTFY_DELETE = 'delete'         # data is deleted
+NTFY_SEARCH_RESULT = 'search_result'     # new search result
+NTFY_CONNECTION = 'connection' # connection made or broken
+
+# object IDs for NTFY_ACTIVITIES subject 
+NTFY_ACT_NONE = 0
+NTFY_ACT_UPNP = 1
+NTFY_ACT_REACHABLE = 2
+NTFY_ACT_GET_EXT_IP_FROM_PEERS = 3
+NTFY_ACT_MEET = 4
+NTFY_ACT_GOT_METADATA = 5
+NTFY_ACT_RECOMMEND = 6
+NTFY_ACT_DISK_FULL = 7
+NTFY_ACT_NEW_VERSION = 8
+NTFY_ACT_ACTIVE = 9
+# Disk-allocation policies for download, see DownloadConfig.set_alloc_type
+DISKALLOC_NORMAL = 'normal'              
+DISKALLOC_BACKGROUND = 'background'      
+DISKALLOC_PREALLOCATE = 'pre-allocate'
+DISKALLOC_SPARSE = 'sparse'
+
+# UPnP modes, see SessionConfig.set_upnp_mode
+UPNPMODE_DISABLED = 0
+UPNPMODE_WIN32_HNetCfg_NATUPnP = 1
+UPNPMODE_WIN32_UPnP_UPnPDeviceFinder = 2
+UPNPMODE_UNIVERSAL_DIRECT = 3
+
+# Buddycast Collecting Policy parameters
+BCCOLPOLICY_SIMPLE = 1
+# BCCOLPOLICY_T4T = 2 # Future work
+
+# Internal tracker scrape
+ITRACKSCRAPE_ALLOW_NONE = 'none'
+ITRACKSCRAPE_ALLOW_SPECIFIC = 'specific'
+ITRACKSCRAPE_ALLOW_FULL = 'full'
+
+ITRACKDBFORMAT_BENCODE = 'bencode'
+ITRACKDBFORMAT_PICKLE= 'pickle'
+
+ITRACKMULTI_ALLOW_NONE = 'none'
+ITRACKMULTI_ALLOW_AUTODETECT = 'autodetect'
+ITRACKMULTI_ALLOW_ALL = 'all'
+
+ITRACK_IGNORE_ANNOUNCEIP_NEVER = 0
+ITRACK_IGNORE_ANNOUNCEIP_ALWAYS = 1
+ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK = 2
+
+# Cooperative download
+COOPDL_ROLE_COORDINATOR = 'coordinator'
+COOPDL_ROLE_HELPER = 'helper'
+
+# Methods for authentication of the source in live streaming
+LIVE_AUTHMETHOD_NONE = "None"   # No auth, also no abs. piece nr. or timestamp.
+LIVE_AUTHMETHOD_ECDSA = "ECDSA" # Elliptic Curve DSA signatures
+LIVE_AUTHMETHOD_RSA = "RSA"     # RSA signatures
+
+# Video-On-Demand / live events
+VODEVENT_START = "start"
+VODEVENT_PAUSE = "pause"
+VODEVENT_RESUME = "resume"
+
+
+# Friendship messages
+F_REQUEST_MSG = "REQ"
+F_RESPONSE_MSG = "RESP"
+F_FORWARD_MSG = "FWD" # Can forward any type of other friendship message
+
+
+# States for a friend
+FS_NOFRIEND = 0
+FS_MUTUAL = 1
+FS_I_INVITED = 2
+FS_HE_INVITED = 3
+FS_I_DENIED = 4
+FS_HE_DENIED = 5
+
+P2PURL_SCHEME = "tribe" # No colon
+
+URL_MIME_TYPE = 'text/x-url'
+TSTREAM_MIME_TYPE = "application/x-ns-stream"
+
+TRIBLER_TORRENT_EXT = ".tribe" # Unused
+# Infohashes are always 20 byte binary strings
+INFOHASH_LENGTH = 20
+
+
+# ProxyService 
+PROXY_MODE_OFF = 0
+PROXY_MODE_PRIVATE = 1
+PROXY_MODE_SPEED= 2
+PROXYSERVICE_OFF = 0
+PROXYSERVICE_ON = 1
diff --git a/instrumentation/next-share/BaseLib/Core/superpeer.txt b/instrumentation/next-share/BaseLib/Core/superpeer.txt
new file mode 100644 (file)
index 0000000..8f9410d
--- /dev/null
@@ -0,0 +1,9 @@
+#ip, port, permid, [name]
+superpeer1.das2.ewi.tudelft.nl, 7001, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2I5yVc1+dWVEx3nbriRKJmOSlQePZ9LU7yYQoGABMvU1uGHvqnT9t+53eaCGziV12MZ1g2p0GLmZP9, SuperPeer1@Tribler
+superpeer2.cs.vu.nl,            7002, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk, SuperPeer2@Tribler
+superpeer3.tribler.org,         7003, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh, SuperPeer3@Tribler
+superpeer4.das2.ewi.tudelft.nl, 7004, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA, SuperPeer4@Tribler
+superpeer5.das2.ewi.tudelft.nl, 7005, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu, SuperPeer5@Tribler
+superpeer6.das2.ewi.tudelft.nl, 7006, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd, SuperPeer6@Tribler
+superpeer7.das2.ewi.tudelft.nl, 7007, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc, SuperPeer7@Tribler
+superpeer8.das2.ewi.tudelft.nl, 7008, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU, SuperPeer8@Tribler
diff --git a/instrumentation/next-share/BaseLib/Debug/__init__.py b/instrumentation/next-share/BaseLib/Debug/__init__.py
new file mode 100644 (file)
index 0000000..1485bab
--- /dev/null
@@ -0,0 +1,3 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
diff --git a/instrumentation/next-share/BaseLib/Debug/console.py b/instrumentation/next-share/BaseLib/Debug/console.py
new file mode 100644 (file)
index 0000000..aa8e423
--- /dev/null
@@ -0,0 +1,43 @@
+"""
+Alternate stdout and stderr with much more protection
+"""
+
+import sys 
+
+class SafePrintStream:
+    def __init__(self, stream):
+        self._stream = stream
+
+    def write(self, arg):
+        try:
+            self._stream.write(arg.encode("ASCII", "backslashreplace"))
+        except Exception, e:
+            try:
+                s = u"{%s}" % repr(arg)
+                self._stream.write(s)
+            except:
+                self._stream.write("TriblerConsole: ERROR printing\n")
+                self._stream.write(repr(e))
+                self._stream.write("\n")
+        
+    def flush(self):
+        self._stream.flush()
+
+class SafeLinePrintStream:
+    def __init__(self, stream):
+        self._stream = stream
+        self._parts = []
+
+    def write(self, arg):
+        self._parts.append(arg.encode("ASCII", "backslashreplace"))
+        if arg == "\n":
+            self._stream.write("".join(self._parts))
+            self._parts = []
+        
+    def flush(self):
+        self._stream.write("".join(self._parts))
+        self._parts = []
+        self._stream.flush()
+        
+sys.stderr = SafePrintStream(sys.stderr)
+sys.stdout = sys.stderr
diff --git a/instrumentation/next-share/BaseLib/Debug/memory.py b/instrumentation/next-share/BaseLib/Debug/memory.py
new file mode 100644 (file)
index 0000000..6d46b96
--- /dev/null
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+"""
+Use the garbage collector to monitor memory usage
+"""
+
+from types import *
+import gc
+import inspect
+import sys
+import thread
+import time
+
+def _get_default_footprint(obj, depth):
+    return 4
+def _get_int_footprint(obj, depth):
+    return 4
+def _get_float_footprint(obj, depth):
+    return 8
+def _get_string_footprint(obj, depth):
+    return len(obj)
+def _get_unicode_footprint(obj, depth):
+    return 2 * len(obj)
+def _get_tuple_footprint(obj, depth):
+    if depth == 0:
+        return 4 + 4 * len(obj)
+    else:
+        return 4 + 4 * len(obj) + sum(map(lambda obj:get_memory_footprint(obj, depth), obj))
+def _get_list_footprint(obj, depth):
+    if depth == 0:
+        return 8 + 4 * len(obj)
+    else:
+        if len(obj) in (2, 3):
+            print >> sys.stderr, "Len:", type(obj[0]), type(obj[1])
+            print >> sys.stderr, `obj`
+            return 42
+        print >> sys.stderr, "Len:", len(obj)
+        return 8 + 4 * len(obj) + sum(map(lambda obj:get_memory_footprint(obj, depth), obj))
+def _get_dict_footprint(obj, depth):
+    if depth == 0:
+        return 32 + 8 * len(obj)
+    else:
+        return 32 + 8 * len(obj) + sum(map(lambda obj:get_memory_footprint(obj, depth), obj.iterkeys())) + sum(map(lambda obj:get_memory_footprint(obj, depth), obj.itervalues()))
+
+memory_footprint_map = {IntType:_get_int_footprint,
+                        FloatType:_get_float_footprint,
+                        StringType:_get_float_footprint,
+                        UnicodeType:_get_unicode_footprint,
+                        TupleType:_get_tuple_footprint,
+                        ListType:_get_list_footprint,
+                        DictType:_get_dict_footprint}
+def get_memory_footprint(obj, depth=100):
+    return memory_footprint_map.get(type(obj), _get_default_footprint)(obj, depth-1)
+
+def _get_default_description(obj):
+    return type(obj)
+def _get_function_description(obj):
+    return "<function '%s' from '%s'>" % (obj.__name__, obj.__module__)
+def _get_module_description(obj):
+    return str(obj)
+def _get_frame_description(obj):
+    return "<frame for '%s' from %s:%d >" % (obj.f_code.co_name, obj.f_code.co_filename, obj.f_code.co_firstlineno)
+
+description_map = {FunctionType:_get_function_description,
+                   ModuleType:_get_module_description,
+                   FrameType:_get_frame_description}
+def get_description(obj):
+    return description_map.get(type(obj), _get_default_description)(obj)
+
+def get_datetime():
+    return time.strftime("%Y/%m/%d %H:%M:%S")
+
+def byte_uint_to_human(i, format="%(value).1f%(unit)s"):                       
+    """Convert a number into a formatted string.                             
+                                                                             
+    format: %(value)d%(unit)s                                                
+    1           --> 1B                                                       
+    1024        --> 1KB                                                      
+    1048576     --> 1MB                                                      
+    1073741824  --> 1GB                                                      
+                                                                             
+    format: %(value).1f %(unit-long)s                                        
+    1           --> 1.0 byte                                                 
+    2           --> 2.0 bytes                                                
+                                                                             
+    todo:                                                                    
+    - uint_to_human(1025, format="%(value)d %(unit-long)s") --> '1 kilobytes'
+      however, this should result in '1 kilobyte'                            
+                                                                             
+    """                                                                      
+    assert type(i) in (int, long)                                            
+    assert i >= 0                                                            
+    assert type(format) is str                                               
+    dic = {}                                                                 
+    if i < 1024:                                                             
+        dic["value"] = i                                                     
+        dic["unit"] = "B"                                                    
+        dic["unit-long"] = (i == 1 and "byte" or "bytes")                    
+    elif i < 1048576:                                                        
+        dic["value"] = i / 1024.0                                            
+        dic["unit"] = "KB"                                                   
+        dic["unit-long"] = (i == 1024 and "kilobyte" or "kilobytes")         
+    elif i < 1073741824:                                                     
+        dic["value"] = i / 1048576.0                                         
+        dic["unit"] = "MB"                                                   
+        dic["unit-long"] = (i == 1048576 and "megabyte" or "megabytes")      
+    else:                                                                    
+        dic["value"] = i / 1073741824.0                                      
+        dic["unit"] = "GB"                                                   
+        dic["unit-long"] = (i == 1073741824 and "gigabyte" or "gigabytes")   
+                                                                             
+    return format % dic                                                      
+
+def monitor(delay=10.0, interval=60.0, min_footprint=100000):
+    def parallel():
+        time.sleep(delay)
+
+        history = [min_footprint]
+        while True:
+            high_foot = 0
+            history = history[-2:]
+            low_foot = min(history)
+            datetime = get_datetime()
+            print >> sys.stderr, "Memory:", datetime, "using minimal footprint:", byte_uint_to_human(low_foot)
+
+            gc.collect()
+            for obj in gc.get_objects():
+                if type(obj) in (TupleType, ListType, DictType, StringType, UnicodeType):
+                    try:
+                        footprint = get_memory_footprint(obj)
+                    except:
+                        print >> sys.stderr, "Memory:", datetime, "unable to get footprint for", get_description(obj)
+                    else:
+                        if footprint > high_foot:
+                            high_foot = footprint
+                        if footprint >= low_foot:
+
+                            print >> sys.stderr, "Memory:", datetime, get_description(obj), "footprint:", byte_uint_to_human(footprint)
+                            for referrer in gc.get_referrers(obj):
+                                print >> sys.stderr, "Memory:", datetime, "REF", get_description(referrer)
+                            print >> sys.stderr, "Memory"
+
+            history.append(high_foot)
+            time.sleep(interval)
+    
+    thread.start_new_thread(parallel, ())
+
+
+def main():
+    """
+    Test the memory monitor
+    """
+    monitor(1.0)
+    time.sleep(10)
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Images/SwarmPlayerIcon.ico b/instrumentation/next-share/BaseLib/Images/SwarmPlayerIcon.ico
new file mode 100644 (file)
index 0000000..3128616
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/SwarmPlayerIcon.ico differ
diff --git a/instrumentation/next-share/BaseLib/Images/SwarmPlayerLogo.png b/instrumentation/next-share/BaseLib/Images/SwarmPlayerLogo.png
new file mode 100644 (file)
index 0000000..5db0cdd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/SwarmPlayerLogo.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/SwarmPluginIcon.ico b/instrumentation/next-share/BaseLib/Images/SwarmPluginIcon.ico
new file mode 100644 (file)
index 0000000..3128616
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/SwarmPluginIcon.ico differ
diff --git a/instrumentation/next-share/BaseLib/Images/SwarmPluginLogo.png b/instrumentation/next-share/BaseLib/Images/SwarmPluginLogo.png
new file mode 100644 (file)
index 0000000..5db0cdd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/SwarmPluginLogo.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/SwarmServerIcon.ico b/instrumentation/next-share/BaseLib/Images/SwarmServerIcon.ico
new file mode 100644 (file)
index 0000000..3128616
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/SwarmServerIcon.ico differ
diff --git a/instrumentation/next-share/BaseLib/Images/background.png b/instrumentation/next-share/BaseLib/Images/background.png
new file mode 100644 (file)
index 0000000..50075ad
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/background.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/fullScreen.png b/instrumentation/next-share/BaseLib/Images/fullScreen.png
new file mode 100644 (file)
index 0000000..efe2b4b
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/fullScreen.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/fullScreen_hover.png b/instrumentation/next-share/BaseLib/Images/fullScreen_hover.png
new file mode 100644 (file)
index 0000000..0feefe3
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/fullScreen_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/logoTribler.png b/instrumentation/next-share/BaseLib/Images/logoTribler.png
new file mode 100644 (file)
index 0000000..481116e
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/logoTribler.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/logoTribler_small.png b/instrumentation/next-share/BaseLib/Images/logoTribler_small.png
new file mode 100644 (file)
index 0000000..af53270
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/logoTribler_small.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/mute.png b/instrumentation/next-share/BaseLib/Images/mute.png
new file mode 100644 (file)
index 0000000..7841c6f
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/mute.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/mute_hover.png b/instrumentation/next-share/BaseLib/Images/mute_hover.png
new file mode 100644 (file)
index 0000000..556c893
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/mute_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/pause.png b/instrumentation/next-share/BaseLib/Images/pause.png
new file mode 100644 (file)
index 0000000..6b6be23
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/pause.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/pause_hover.png b/instrumentation/next-share/BaseLib/Images/pause_hover.png
new file mode 100644 (file)
index 0000000..754b934
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/pause_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/play.png b/instrumentation/next-share/BaseLib/Images/play.png
new file mode 100644 (file)
index 0000000..1fc173d
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/play.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/play_hover.png b/instrumentation/next-share/BaseLib/Images/play_hover.png
new file mode 100644 (file)
index 0000000..7eecfbc
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/play_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/save.png b/instrumentation/next-share/BaseLib/Images/save.png
new file mode 100644 (file)
index 0000000..0000d9f
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/save.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/saveDisabled.png b/instrumentation/next-share/BaseLib/Images/saveDisabled.png
new file mode 100644 (file)
index 0000000..45df000
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/saveDisabled.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/saveDisabled_hover.png b/instrumentation/next-share/BaseLib/Images/saveDisabled_hover.png
new file mode 100644 (file)
index 0000000..45df000
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/saveDisabled_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/save_hover.png b/instrumentation/next-share/BaseLib/Images/save_hover.png
new file mode 100644 (file)
index 0000000..c4f3dff
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/save_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/sliderDot.png b/instrumentation/next-share/BaseLib/Images/sliderDot.png
new file mode 100644 (file)
index 0000000..cf77466
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/sliderDot.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/sliderVolume.png b/instrumentation/next-share/BaseLib/Images/sliderVolume.png
new file mode 100644 (file)
index 0000000..3da46e0
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/sliderVolume.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/splash.jpg b/instrumentation/next-share/BaseLib/Images/splash.jpg
new file mode 100644 (file)
index 0000000..a119c3f
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/splash.jpg differ
diff --git a/instrumentation/next-share/BaseLib/Images/torrenticon.ico b/instrumentation/next-share/BaseLib/Images/torrenticon.ico
new file mode 100644 (file)
index 0000000..aaea20e
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/torrenticon.ico differ
diff --git a/instrumentation/next-share/BaseLib/Images/tribler.ico b/instrumentation/next-share/BaseLib/Images/tribler.ico
new file mode 100644 (file)
index 0000000..aaea20e
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/tribler.ico differ
diff --git a/instrumentation/next-share/BaseLib/Images/volume.png b/instrumentation/next-share/BaseLib/Images/volume.png
new file mode 100644 (file)
index 0000000..edbc2f7
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/volume.png differ
diff --git a/instrumentation/next-share/BaseLib/Images/volume_hover.png b/instrumentation/next-share/BaseLib/Images/volume_hover.png
new file mode 100644 (file)
index 0000000..debc16a
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Images/volume_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/LICENSE.txt b/instrumentation/next-share/BaseLib/LICENSE.txt
new file mode 100644 (file)
index 0000000..ef95946
--- /dev/null
@@ -0,0 +1,970 @@
+------------------------------------------------------------------------------\r
+\r
+    Next-Share content-delivery library.\r
+\r
+    The research leading to this library has received funding from the European\r
+    Community's Seventh Framework Programme in the P2P-Next project under grant\r
+    agreement no 216217.\r
+\r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    The following library modules are Copyright (c) 2008-2012, VTT Technical Research Centre of Finland; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Norut AS; All rights reserved:\r
+      BaseLib/Core/Multicast/*\r
+      BaseLib/Core/Statistics/Status/*\r
+      BaseLib/Core/ClosedSwarm/*\r
+      BaseLib/Player/swarmplayer-njaal.py\r
+      BaseLib/Plugin/BackgroundProcess-njaal.py\r
+      BaseLib/Test/test_closedswarm.py\r
+      BaseLib/Test/test_status.py\r
+      BaseLib/Tools/createlivestream-njaal.py\r
+      BaseLib/Tools/createpoa.py\r
+      BaseLib/Tools/trial_poa_server.py\r
+      BaseLib/UPnP/*\r
+      BaseLib/Test/test_upnp.py\r
+\r
+    The following library modules are Copyright (c) 2008-2012, DACC Systems AB; All rights reserved:\r
+      DACC/transfer.php\r
\r
+    The following library modules are Copyright (c) 2008-2012, Lancaster University; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Jožef Stefan Institute; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, First Oversi Ltd.; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; All rights reserved:\r
+      BaseLib/Core/NATFirewall/NatCheck.py\r
+      BaseLib/Core/NATFirewall/TimeoutCheck.py\r
+      BaseLib/Core/NATFirewall/NatCheckMsgHandler.py\r
+      BaseLib/Policies/SeedingManager.py\r
+      BaseLib/Core/Statistics/SeedingStatsCrawler.py\r
+      BaseLib/Core/CacheDB/SqliteSeedingStatsCacheDB.py\r
+      BaseLib/Core/BuddyCast/moderationcast.py\r
+      BaseLib/Core/BuddyCast/moderationcast_util.py\r
+      BaseLib/Core/BuddyCast/votecast.py\r
+      BaseLib/Core/CacheDB/maxflow.py\r
+      BaseLib/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py\r
+      BaseLib/Core/NATFirewall/ConnectionCheck.py\r
+      BaseLib/Core/NATFirewall/NatTraversal.py\r
+      BaseLib/Core/Search/Reranking.py\r
+      BaseLib/Core/Statistics/tribler_videoplayback_stats.sql\r
+      BaseLib/Core/Statistics/VideoPlaybackCrawler.py\r
+      BaseLib/Core/Utilities/Crypto.py\r
+      BaseLib/Images/\r
+      BaseLib/Player/BaseApp.py\r
+      BaseLib/Player/EmbeddedPlayer4Frame.py\r
+      BaseLib/Player/PlayerVideoFrame.py\r
+      BaseLib/Plugin\r
+      BaseLib/Test/test_multicast.py\r
+      BaseLib/Test/test_na_extend_hs.py\r
+      BaseLib/Test/test_na_extend_hs.sh\r
+      BaseLib/Test/test_sqlitecachedbhandler.sh\r
+      BaseLib/Tools/dirtrackerseeder.py\r
+      BaseLib/Tools/pipe-babscam-h264-nosound-mencoder.sh\r
+      BaseLib/Tools/superpeer.py\r
+      BaseLib/Utilities/LinuxSingleInstanceChecker.py\r
+      BaseLib/Video/Images\r
+      BaseLib/Video/VideoFrame.py\r
+      reset.bat\r
+      reset-keepid.bat\r
+      BaseLib/Core/Video/PiecePickerSVC.py\r
+      BaseLib/Core/Video/SVCTransporter.py\r
+      BaseLib/Core/Video/SVCVideoStatus.py\r
+      BaseLib/schema_sdb_v5.sql\r
+      BaseLib/Core/APIImplementation/makeurl.py\r
+      BaseLib/Core/BuddyCast/channelcast.py\r
+      BaseLib/Core/DecentralizedTracking/repex.py\r
+      BaseLib/Core/NATFirewall/TimeoutFinder.py\r
+      BaseLib/Core/NATFirewall/UDPPuncture.py\r
+      BaseLib/Core/Statistics/RepexCrawler.py\r
+      BaseLib/Debug/*\r
+      BaseLib/Tools/createtorrent.py\r
+      BaseLib/Tools/pingbackserver.py\r
+      BaseLib/Tools/seeking.py\r
+      BaseLib/Tools/stunserver.py\r
+      lucid-xpicreate.sh\r
+      patentfreevlc.bat\r
+      BaseLib/Core/BitTornado/BT1/GetRightHTTPDownloader.py\r
+      BaseLib/Core/BitTornado/BT1/HoffmanHTTPDownloader.py\r
+      BaseLib/Core/CacheDB/MetadataDBHandler.py\r
+      BaseLib/Core/DecentralizedTracking/MagnetLink/*\r
+      BaseLib/Core/Subtitles/*\r
+      BaseLib/Images/SwarmServerIcon.ico\r
+      BaseLib/Main/Build/Ubuntu/tribler.gconf-defaults\r
+      BaseLib/Main/Utility/logging_util.py\r
+      BaseLib/Main/vwxGUI/ChannelsPanel.py\r
+      BaseLib/Main/vwxGUI/images/iconSaved_state4.png\r
+      BaseLib/schema_sdb_v5.sql\r
+      BaseLib/Test/Core/*\r
+      BaseLib/Test/extend_hs_dir/proxyservice.test.torrent\r
+      BaseLib/Test/subtitles_test_res\r
+      BaseLib/Test/test_channelcast_plus_subtitles.py\r
+      BaseLib/Test/test_magnetlink.py\r
+      BaseLib/Test/test_miscutils.py\r
+      BaseLib/Test/test_subtitles.bat\r
+      BaseLib/Test/test_subtitles_isolation.py\r
+      BaseLib/Test/test_subtitles_msgs.py\r
+      BaseLib/Test/test_subtitles.sh\r
+      BaseLib/Test/test_threadpool.py\r
+      BaseLib/Tools/dirtracker.py\r
+      BaseLib/Tools/duration2torrent.py\r
+      BaseLib/Tools/httpseeder.py\r
+      BaseLib/Transport/*\r
+      BaseLib/Video/Ogg.py\r
+      BaseLib/WebUI/*\r
+      xpitransmakedeb.sh\r
+      xpitransmakedist.bat\r
+      xpitransmakedist.sh\r
+      xpitransmakedistmac.sh\r
+      xie8transmakedist.bat\r
+      TUD/swift-spbackend-r1598/*\r
+      vlc-1.0.5-swarmplugin-switch-kcc-src-aug2010-r16968.patch (except bindings/python)\r
+\r
+    The following library modules are Copyright (c) 2008-2012, STMicroelectronics S.r.l.; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Kungliga Tekniska Högskolan (The Royal Institute of Technology); All rights reserved:\r
+      BaseLib/Core/DecentralizedTracking/kadtracker/*\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Markenfilm GmbH & Co. KG; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Radiotelevizija Slovenija Javni Zavvod Ljubljana; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Kendra Foundation; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Universitaet Klagenfurt; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, AG Projects; All rights reserved:\r
+      AGP/sipsimple-20100621.tgz\r
+      \r
+    The following library modules are Copyright (c) 2008-2012, The British Broadcasting Corporation; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Pioneer Digital Design Centre Limited; All rights reserved:\r
+  \r
+    The following library modules are Copyright (c) 2008-2012, INSTITUT FUER RUNDFUNKTECHNIK GMBH; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Fabchannel BV; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, University Politehnica Bucharest; All rights reserved:\r
+      BaseLib/Core/ProxyService/*\r
+      BaseLib/Tools/proxy-cmdline.py\r
+      BaseLib/Test/test_proxyservice_as_coord.bat\r
+      BaseLib/Test/test_proxyservice_as_coord.py\r
+      BaseLib/Test/test_proxyservice_as_coord.sh\r
+      BaseLib/Test/test_proxyservice.bat\r
+      BaseLib/Test/test_proxyservice.py\r
+      BaseLib/Test/test_proxyservice.sh\r
+      BaseLib/Test/extend_hs_dir/proxyservice.test.torrent\r
+      \r
+\r
+    The following library modules are Copyright (c) 2008-2012, EBU-UER; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Università di Roma Sapienza; All rights reserved:\r
+\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    VTT Technical Research Centre of Finland, \r
+    Tekniikankatu 1, \r
+    FIN-33710 Tampere, \r
+    Finland\r
+\r
+    Norut AS,\r
+    Postboks 6434 \r
+    Forskningsparken, \r
+    9294 Tromsø,\r
+    Norway\r
+\r
+    DACC Systems AB\r
+    Glimmervägen 4, \r
+    SE18734, Täby,\r
+    Sweden\r
+\r
+    Lancaster University, \r
+    University House, \r
+    Bailrigg, Lancaster, LA1 4YW\r
+    United Kingdom\r
+\r
+    Jožef Stefan Institute, \r
+    Jamova cesta 39, \r
+    1000 Ljubljana, \r
+    Slovenia\r
+\r
+    First Oversi Ltd.,\r
+    Rishon Lezion 1,\r
+    Petah Tikva 49723, \r
+    Israel\r
+\r
+    TECHNISCHE UNIVERSITEIT DELFT, \r
+    Faculty of Electrical Engineering, Mathematics and Computer Science, \r
+    Mekelweg 4, \r
+    2628 CD Delft, \r
+    The Netherlands\r
+\r
+    STMicroelectronics S.r.l., \r
+    via C.Olivetti 2, \r
+    I-20041 Agrate Brianza,\r
+    Italy\r
+\r
+    Kungliga Tekniska Högskolan (The Royal Institute of Technology), \r
+    KTH/ICT/ECS/TSLab\r
+    Electrum 229\r
+    164 40 Kista\r
+    Sweden\r
+\r
+    Markenfilm GmbH & Co. KG, \r
+    Schulauer Moorweg 25, \r
+    22880 Wedel, \r
+    Germany\r
+\r
+    Radiotelevizija Slovenija Javni Zavvod Ljubljana, \r
+    Kolodvorska 2, \r
+    SI-1000 Ljubljana,\r
+    Slovenia\r
+\r
+\r
+    Kendra Foundation, \r
+    Meadow Barn, Holne, \r
+    Newton Abbot, Devon, TQ13 7SP,\r
+    United Kingdom\r
+\r
+\r
+    Universitaet Klagenfurt, \r
+    Universitaetstrasse 65-67, \r
+    9020 Klagenfurt, \r
+    Austria\r
+\r
+    AG Projects, \r
+    Dr. Leijdsstraat 92,\r
+    2021RK Haarlem, \r
+    The Netherlands\r
+\r
+    The British Broadcasting Corporation,\r
+    Broadcasting House, Portland Place, \r
+    London, W1A 1AA \r
+    United Kingdom\r
+\r
+    Pioneer Digital Design Centre Limited, \r
+    Pioneer House, Hollybush Hill, Stoke Poges, \r
+    Slough, SL2 4QP\r
+    United Kingdom\r
+\r
+    INSTITUT FUER RUNDFUNKTECHNIK GMBH\r
+    Floriansmuehlstrasse 60,\r
+    80939 München, \r
+    Germany\r
+\r
+    Fabchannel BV, \r
+    Kleine-Gartmanplantsoen 21, \r
+    1017 RP Amsterdam, \r
+    The Netherlands\r
+\r
+    University Politehnica Bucharest, \r
+    313 Splaiul Independentei, \r
+    District 6, cod 060042, Bucharest,\r
+    Romania\r
+\r
+    EBU-UER, \r
+    L'Ancienne Route 17A, 1218\r
+    Grand Saconnex - Geneva, \r
+    Switzerland\r
+\r
+    Università di Roma Sapienza\r
+    Dipartimento di Informatica e Sistemistica (DIS),\r
+    Via Ariosto 25, \r
+    00185 Rome, \r
+    Italy\r
+\r
+\r
+------------------------------------------------------------------------------\r
+\r
+    BaseLib content-delivery library.\r
+\r
+    Development of the BaseLib library was supported by various research \r
+    grants:\r
+\r
+     - BSIK Freeband Communication I-Share project (Dutch Ministry of Economic \r
+       Affairs)\r
+     - Netherlands Organisation for Scientific Research (NWO) grant 612.060.215\r
+     - Dutch Technology Foundation STW: Veni project DTC.7299\r
+     - European Community's Sixth Framework Programme in the P2P-FUSION project\r
+       under contract no 035249.\r
\r
+    The following library modules are Copyright (c) 2005-2010,\r
+    Delft University of Technology and Vrije Universiteit Amsterdam; \r
+    All rights reserved.\r
+\r
+      BaseLib/*\r
+\r
+    This library is free software; you can redistribute it and/or\r
+    modify it under the terms of the GNU Lesser General Public\r
+    License as published by the Free Software Foundation; either\r
+    version 2.1 of the License, or (at your option) any later version.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    Delft University of Technology\r
+    Postbus 5\r
+    2600 AA Delft\r
+    The Netherlands\r
+    \r
+    Vrije Universiteit\r
+    De Boelelaan 1105\r
+    1081 HV Amsterdam\r
+    The Netherlands\r
+\r
+\r
\r
+-------------------------------------------------------------------------------\r
+\r
+    BuddyCast4 content-recommendation library.\r
+\r
+    The research leading to this library has received funding from the\r
+    European Community's Seventh Framework Programme [FP7/2007-2011] \r
+    in the Petamedia project under grant agreement no. 216444\r
+\r
+    The following library modules are Copyright (c) 2008-2010,\r
+    Delft University of Technology and Technische Universität Berlin; \r
+    All rights reserved.\r
+\r
+      BaseLib/Core/BuddyCast/buddycast.py\r
+\r
+    The following library modules are Copyright (c) 2008-2010,\r
+    Technische Universität Berlin; \r
+    All rights reserved.\r
+\r
+      BaseLib/Core/Search/Reranking.py\r
+      BaseLib/Test/test_buddycast4.py\r
+      BaseLib/Test/test_buddycast4_stresstest.py\r
+      \r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    Delft University of Technology\r
+    Postbus 5\r
+    2600 AA Delft\r
+    The Netherlands\r
+    \r
+    Technische Universität Berlin\r
+    Strasse des 17. Juni 135\r
+    10623 Berlin\r
+    Germany\r
+\r
+\r
+------------------------------------------------------------------------------\r
+\r
+    SwarmTransport/SwarmPlayer Firefox library.\r
+\r
+    The research leading to this library has received funding from the European\r
+    Community's Seventh Framework Programme in the P2P-Next project under grant\r
+    agreement no 216217.\r
+\r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+\r
+    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; \r
+    and Jan Gerber; All rights reserved:\r
+      BaseLib/Transport/tribeIChannel.idl\r
+      BaseLib/Transport/tribeISwarmTransport.idl\r
+      BaseLib/Transport/components/TribeChannel.js\r
+      BaseLib/Transport/components/TribeProtocolHandler.js\r
+      BaseLib/Transport/components/SwarmTransport.js\r
+      BaseLib/Transport/install.rdf\r
+      BaseLib/Transport/chrome.manifest\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    TECHNISCHE UNIVERSITEIT DELFT, \r
+    Faculty of Electrical Engineering, Mathematics and Computer Science, \r
+    Mekelweg 4, \r
+    2628 CD Delft, \r
+    The Netherlands\r
+    \r
+    Jan Gerber\r
+    j@thing.net\r
+\r
+-------------------------------------------------------------------------------\r
+\r
+Unless otherwise noted, all files written by Bram Cohen, John Hoffman, Petru \r
+Paler, Uoti Urpala, Ross Cohen, Tim Tucker, Choopan RATTANAPOKA, Yejun Yang,\r
+Myers Carpenter, Bill Bumgarner, Henry 'Pi' James, Loring Holden, \r
+Dustin Pate ("noirsoldats@codemeu.com"), kratoak5, Roee Shlomo, Greg Fleming, \r
+N. Goldmann ("Pir4nhaX,www.clanyakuza.com"), and Michel Hartmann is released\r
+under the MIT license, exceptions contain licensing information in them.\r
+\r
+Copyright (C) 2001-2002 Bram Cohen\r
+\r
+Permission is hereby granted, free of charge, to any person\r
+obtaining a copy of this software and associated documentation files\r
+(the "Software"), to deal in the Software without restriction,\r
+including without limitation the rights to use, copy, modify, merge,\r
+publish, distribute, sublicense, and/or sell copies of the Software,\r
+and to permit persons to whom the Software is furnished to do so,\r
+subject to the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be\r
+included in all copies or substantial portions of the Software.\r
+\r
+The Software is provided "AS IS", without warranty of any kind,\r
+express or implied, including but not limited to the warranties of\r
+merchantability,  fitness for a particular purpose and\r
+noninfringement. In no event shall the  authors or copyright holders\r
+be liable for any claim, damages or other liability, whether in an\r
+action of contract, tort or otherwise, arising from, out of or in\r
+connection with the Software or the use or other dealings in the\r
+Software.\r
+\r
+-------------------------------------------------------------------------------\r
+\r
+                  GNU LESSER GENERAL PUBLIC LICENSE\r
+                       Version 2.1, February 1999\r
+\r
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.\r
+     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+ Everyone is permitted to copy and distribute verbatim copies\r
+ of this license document, but changing it is not allowed.\r
+\r
+[This is the first released version of the Lesser GPL.  It also counts\r
+ as the successor of the GNU Library Public License, version 2, hence\r
+ the version number 2.1.]\r
+\r
+                            Preamble\r
+\r
+  The licenses for most software are designed to take away your\r
+freedom to share and change it.  By contrast, the GNU General Public\r
+Licenses are intended to guarantee your freedom to share and change\r
+free software--to make sure the software is free for all its users.\r
+\r
+  This license, the Lesser General Public License, applies to some\r
+specially designated software packages--typically libraries--of the\r
+Free Software Foundation and other authors who decide to use it.  You\r
+can use it too, but we suggest you first think carefully about whether\r
+this license or the ordinary General Public License is the better\r
+strategy to use in any particular case, based on the explanations below.\r
+\r
+  When we speak of free software, we are referring to freedom of use,\r
+not price.  Our General Public Licenses are designed to make sure that\r
+you have the freedom to distribute copies of free software (and charge\r
+for this service if you wish); that you receive source code or can get\r
+it if you want it; that you can change the software and use pieces of\r
+it in new free programs; and that you are informed that you can do\r
+these things.\r
+\r
+  To protect your rights, we need to make restrictions that forbid\r
+distributors to deny you these rights or to ask you to surrender these\r
+rights.  These restrictions translate to certain responsibilities for\r
+you if you distribute copies of the library or if you modify it.\r
+\r
+  For example, if you distribute copies of the library, whether gratis\r
+or for a fee, you must give the recipients all the rights that we gave\r
+you.  You must make sure that they, too, receive or can get the source\r
+code.  If you link other code with the library, you must provide\r
+complete object files to the recipients, so that they can relink them\r
+with the library after making changes to the library and recompiling\r
+it.  And you must show them these terms so they know their rights.\r
+\r
+  We protect your rights with a two-step method: (1) we copyright the\r
+library, and (2) we offer you this license, which gives you legal\r
+permission to copy, distribute and/or modify the library.\r
+\r
+  To protect each distributor, we want to make it very clear that\r
+there is no warranty for the free library.  Also, if the library is\r
+modified by someone else and passed on, the recipients should know\r
+that what they have is not the original version, so that the original\r
+author's reputation will not be affected by problems that might be\r
+introduced by others.\r
+\r
+\r
+  Finally, software patents pose a constant threat to the existence of\r
+any free program.  We wish to make sure that a company cannot\r
+effectively restrict the users of a free program by obtaining a\r
+restrictive license from a patent holder.  Therefore, we insist that\r
+any patent license obtained for a version of the library must be\r
+consistent with the full freedom of use specified in this license.\r
+\r
+  Most GNU software, including some libraries, is covered by the\r
+ordinary GNU General Public License.  This license, the GNU Lesser\r
+General Public License, applies to certain designated libraries, and\r
+is quite different from the ordinary General Public License.  We use\r
+this license for certain libraries in order to permit linking those\r
+libraries into non-free programs.\r
+\r
+  When a program is linked with a library, whether statically or using\r
+a shared library, the combination of the two is legally speaking a\r
+combined work, a derivative of the original library.  The ordinary\r
+General Public License therefore permits such linking only if the\r
+entire combination fits its criteria of freedom.  The Lesser General\r
+Public License permits more lax criteria for linking other code with\r
+the library.\r
+\r
+  We call this license the "Lesser" General Public License because it\r
+does Less to protect the user's freedom than the ordinary General\r
+Public License.  It also provides other free software developers Less\r
+of an advantage over competing non-free programs.  These disadvantages\r
+are the reason we use the ordinary General Public License for many\r
+libraries.  However, the Lesser license provides advantages in certain\r
+special circumstances.\r
+\r
+  For example, on rare occasions, there may be a special need to\r
+encourage the widest possible use of a certain library, so that it becomes\r
+a de-facto standard.  To achieve this, non-free programs must be\r
+allowed to use the library.  A more frequent case is that a free\r
+library does the same job as widely used non-free libraries.  In this\r
+case, there is little to gain by limiting the free library to free\r
+software only, so we use the Lesser General Public License.\r
+\r
+  In other cases, permission to use a particular library in non-free\r
+programs enables a greater number of people to use a large body of\r
+free software.  For example, permission to use the GNU C Library in\r
+non-free programs enables many more people to use the whole GNU\r
+operating system, as well as its variant, the GNU/Linux operating\r
+system.\r
+\r
+  Although the Lesser General Public License is Less protective of the\r
+users' freedom, it does ensure that the user of a program that is\r
+linked with the Library has the freedom and the wherewithal to run\r
+that program using a modified version of the Library.\r
+\r
+  The precise terms and conditions for copying, distribution and\r
+modification follow.  Pay close attention to the difference between a\r
+"work based on the library" and a "work that uses the library".  The\r
+former contains code derived from the library, whereas the latter must\r
+be combined with the library in order to run.\r
+\r
+\r
+                  GNU LESSER GENERAL PUBLIC LICENSE\r
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+\r
+  0. This License Agreement applies to any software library or other\r
+program which contains a notice placed by the copyright holder or\r
+other authorized party saying it may be distributed under the terms of\r
+this Lesser General Public License (also called "this License").\r
+Each licensee is addressed as "you".\r
+\r
+  A "library" means a collection of software functions and/or data\r
+prepared so as to be conveniently linked with application programs\r
+(which use some of those functions and data) to form executables.\r
+\r
+  The "Library", below, refers to any such software library or work\r
+which has been distributed under these terms.  A "work based on the\r
+Library" means either the Library or any derivative work under\r
+copyright law: that is to say, a work containing the Library or a\r
+portion of it, either verbatim or with modifications and/or translated\r
+straightforwardly into another language.  (Hereinafter, translation is\r
+included without limitation in the term "modification".)\r
+\r
+  "Source code" for a work means the preferred form of the work for\r
+making modifications to it.  For a library, complete source code means\r
+all the source code for all modules it contains, plus any associated\r
+interface definition files, plus the scripts used to control compilation\r
+and installation of the library.\r
+\r
+  Activities other than copying, distribution and modification are not\r
+covered by this License; they are outside its scope.  The act of\r
+running a program using the Library is not restricted, and output from\r
+such a program is covered only if its contents constitute a work based\r
+on the Library (independent of the use of the Library in a tool for\r
+writing it).  Whether that is true depends on what the Library does\r
+and what the program that uses the Library does.\r
+  \r
+  1. You may copy and distribute verbatim copies of the Library's\r
+complete source code as you receive it, in any medium, provided that\r
+you conspicuously and appropriately publish on each copy an\r
+appropriate copyright notice and disclaimer of warranty; keep intact\r
+all the notices that refer to this License and to the absence of any\r
+warranty; and distribute a copy of this License along with the\r
+Library.\r
+\r
+  You may charge a fee for the physical act of transferring a copy,\r
+and you may at your option offer warranty protection in exchange for a\r
+fee.\r
+\r
+\r
+  2. You may modify your copy or copies of the Library or any portion\r
+of it, thus forming a work based on the Library, and copy and\r
+distribute such modifications or work under the terms of Section 1\r
+above, provided that you also meet all of these conditions:\r
+\r
+    a) The modified work must itself be a software library.\r
+\r
+    b) You must cause the files modified to carry prominent notices\r
+    stating that you changed the files and the date of any change.\r
+\r
+    c) You must cause the whole of the work to be licensed at no\r
+    charge to all third parties under the terms of this License.\r
+\r
+    d) If a facility in the modified Library refers to a function or a\r
+    table of data to be supplied by an application program that uses\r
+    the facility, other than as an argument passed when the facility\r
+    is invoked, then you must make a good faith effort to ensure that,\r
+    in the event an application does not supply such function or\r
+    table, the facility still operates, and performs whatever part of\r
+    its purpose remains meaningful.\r
+\r
+    (For example, a function in a library to compute square roots has\r
+    a purpose that is entirely well-defined independent of the\r
+    application.  Therefore, Subsection 2d requires that any\r
+    application-supplied function or table used by this function must\r
+    be optional: if the application does not supply it, the square\r
+    root function must still compute square roots.)\r
+\r
+These requirements apply to the modified work as a whole.  If\r
+identifiable sections of that work are not derived from the Library,\r
+and can be reasonably considered independent and separate works in\r
+themselves, then this License, and its terms, do not apply to those\r
+sections when you distribute them as separate works.  But when you\r
+distribute the same sections as part of a whole which is a work based\r
+on the Library, the distribution of the whole must be on the terms of\r
+this License, whose permissions for other licensees extend to the\r
+entire whole, and thus to each and every part regardless of who wrote\r
+it.\r
+\r
+Thus, it is not the intent of this section to claim rights or contest\r
+your rights to work written entirely by you; rather, the intent is to\r
+exercise the right to control the distribution of derivative or\r
+collective works based on the Library.\r
+\r
+In addition, mere aggregation of another work not based on the Library\r
+with the Library (or with a work based on the Library) on a volume of\r
+a storage or distribution medium does not bring the other work under\r
+the scope of this License.\r
+\r
+  3. You may opt to apply the terms of the ordinary GNU General Public\r
+License instead of this License to a given copy of the Library.  To do\r
+this, you must alter all the notices that refer to this License, so\r
+that they refer to the ordinary GNU General Public License, version 2,\r
+instead of to this License.  (If a newer version than version 2 of the\r
+ordinary GNU General Public License has appeared, then you can specify\r
+that version instead if you wish.)  Do not make any other change in\r
+these notices.\r
+\r
+\r
+  Once this change is made in a given copy, it is irreversible for\r
+that copy, so the ordinary GNU General Public License applies to all\r
+subsequent copies and derivative works made from that copy.\r
+\r
+  This option is useful when you wish to copy part of the code of\r
+the Library into a program that is not a library.\r
+\r
+  4. You may copy and distribute the Library (or a portion or\r
+derivative of it, under Section 2) in object code or executable form\r
+under the terms of Sections 1 and 2 above provided that you accompany\r
+it with the complete corresponding machine-readable source code, which\r
+must be distributed under the terms of Sections 1 and 2 above on a\r
+medium customarily used for software interchange.\r
+\r
+  If distribution of object code is made by offering access to copy\r
+from a designated place, then offering equivalent access to copy the\r
+source code from the same place satisfies the requirement to\r
+distribute the source code, even though third parties are not\r
+compelled to copy the source along with the object code.\r
+\r
+  5. A program that contains no derivative of any portion of the\r
+Library, but is designed to work with the Library by being compiled or\r
+linked with it, is called a "work that uses the Library".  Such a\r
+work, in isolation, is not a derivative work of the Library, and\r
+therefore falls outside the scope of this License.\r
+\r
+  However, linking a "work that uses the Library" with the Library\r
+creates an executable that is a derivative of the Library (because it\r
+contains portions of the Library), rather than a "work that uses the\r
+library".  The executable is therefore covered by this License.\r
+Section 6 states terms for distribution of such executables.\r
+\r
+  When a "work that uses the Library" uses material from a header file\r
+that is part of the Library, the object code for the work may be a\r
+derivative work of the Library even though the source code is not.\r
+Whether this is true is especially significant if the work can be\r
+linked without the Library, or if the work is itself a library.  The\r
+threshold for this to be true is not precisely defined by law.\r
+\r
+  If such an object file uses only numerical parameters, data\r
+structure layouts and accessors, and small macros and small inline\r
+functions (ten lines or less in length), then the use of the object\r
+file is unrestricted, regardless of whether it is legally a derivative\r
+work.  (Executables containing this object code plus portions of the\r
+Library will still fall under Section 6.)\r
+\r
+  Otherwise, if the work is a derivative of the Library, you may\r
+distribute the object code for the work under the terms of Section 6.\r
+Any executables containing that work also fall under Section 6,\r
+whether or not they are linked directly with the Library itself.\r
+\r
+\r
+  6. As an exception to the Sections above, you may also combine or\r
+link a "work that uses the Library" with the Library to produce a\r
+work containing portions of the Library, and distribute that work\r
+under terms of your choice, provided that the terms permit\r
+modification of the work for the customer's own use and reverse\r
+engineering for debugging such modifications.\r
+\r
+  You must give prominent notice with each copy of the work that the\r
+Library is used in it and that the Library and its use are covered by\r
+this License.  You must supply a copy of this License.  If the work\r
+during execution displays copyright notices, you must include the\r
+copyright notice for the Library among them, as well as a reference\r
+directing the user to the copy of this License.  Also, you must do one\r
+of these things:\r
+\r
+    a) Accompany the work with the complete corresponding\r
+    machine-readable source code for the Library including whatever\r
+    changes were used in the work (which must be distributed under\r
+    Sections 1 and 2 above); and, if the work is an executable linked\r
+    with the Library, with the complete machine-readable "work that\r
+    uses the Library", as object code and/or source code, so that the\r
+    user can modify the Library and then relink to produce a modified\r
+    executable containing the modified Library.  (It is understood\r
+    that the user who changes the contents of definitions files in the\r
+    Library will not necessarily be able to recompile the application\r
+    to use the modified definitions.)\r
+\r
+    b) Use a suitable shared library mechanism for linking with the\r
+    Library.  A suitable mechanism is one that (1) uses at run time a\r
+    copy of the library already present on the user's computer system,\r
+    rather than copying library functions into the executable, and (2)\r
+    will operate properly with a modified version of the library, if\r
+    the user installs one, as long as the modified version is\r
+    interface-compatible with the version that the work was made with.\r
+\r
+    c) Accompany the work with a written offer, valid for at\r
+    least three years, to give the same user the materials\r
+    specified in Subsection 6a, above, for a charge no more\r
+    than the cost of performing this distribution.\r
+\r
+    d) If distribution of the work is made by offering access to copy\r
+    from a designated place, offer equivalent access to copy the above\r
+    specified materials from the same place.\r
+\r
+    e) Verify that the user has already received a copy of these\r
+    materials or that you have already sent this user a copy.\r
+\r
+  For an executable, the required form of the "work that uses the\r
+Library" must include any data and utility programs needed for\r
+reproducing the executable from it.  However, as a special exception,\r
+the materials to be distributed need not include anything that is\r
+normally distributed (in either source or binary form) with the major\r
+components (compiler, kernel, and so on) of the operating system on\r
+which the executable runs, unless that component itself accompanies\r
+the executable.\r
+\r
+  It may happen that this requirement contradicts the license\r
+restrictions of other proprietary libraries that do not normally\r
+accompany the operating system.  Such a contradiction means you cannot\r
+use both them and the Library together in an executable that you\r
+distribute.\r
+\r
+\r
+  7. You may place library facilities that are a work based on the\r
+Library side-by-side in a single library together with other library\r
+facilities not covered by this License, and distribute such a combined\r
+library, provided that the separate distribution of the work based on\r
+the Library and of the other library facilities is otherwise\r
+permitted, and provided that you do these two things:\r
+\r
+    a) Accompany the combined library with a copy of the same work\r
+    based on the Library, uncombined with any other library\r
+    facilities.  This must be distributed under the terms of the\r
+    Sections above.\r
+\r
+    b) Give prominent notice with the combined library of the fact\r
+    that part of it is a work based on the Library, and explaining\r
+    where to find the accompanying uncombined form of the same work.\r
+\r
+  8. You may not copy, modify, sublicense, link with, or distribute\r
+the Library except as expressly provided under this License.  Any\r
+attempt otherwise to copy, modify, sublicense, link with, or\r
+distribute the Library is void, and will automatically terminate your\r
+rights under this License.  However, parties who have received copies,\r
+or rights, from you under this License will not have their licenses\r
+terminated so long as such parties remain in full compliance.\r
+\r
+  9. You are not required to accept this License, since you have not\r
+signed it.  However, nothing else grants you permission to modify or\r
+distribute the Library or its derivative works.  These actions are\r
+prohibited by law if you do not accept this License.  Therefore, by\r
+modifying or distributing the Library (or any work based on the\r
+Library), you indicate your acceptance of this License to do so, and\r
+all its terms and conditions for copying, distributing or modifying\r
+the Library or works based on it.\r
+\r
+  10. Each time you redistribute the Library (or any work based on the\r
+Library), the recipient automatically receives a license from the\r
+original licensor to copy, distribute, link with or modify the Library\r
+subject to these terms and conditions.  You may not impose any further\r
+restrictions on the recipients' exercise of the rights granted herein.\r
+You are not responsible for enforcing compliance by third parties with\r
+this License.\r
+\r
+\r
+  11. If, as a consequence of a court judgment or allegation of patent\r
+infringement or for any other reason (not limited to patent issues),\r
+conditions are imposed on you (whether by court order, agreement or\r
+otherwise) that contradict the conditions of this License, they do not\r
+excuse you from the conditions of this License.  If you cannot\r
+distribute so as to satisfy simultaneously your obligations under this\r
+License and any other pertinent obligations, then as a consequence you\r
+may not distribute the Library at all.  For example, if a patent\r
+license would not permit royalty-free redistribution of the Library by\r
+all those who receive copies directly or indirectly through you, then\r
+the only way you could satisfy both it and this License would be to\r
+refrain entirely from distribution of the Library.\r
+\r
+If any portion of this section is held invalid or unenforceable under any\r
+particular circumstance, the balance of the section is intended to apply,\r
+and the section as a whole is intended to apply in other circumstances.\r
+\r
+It is not the purpose of this section to induce you to infringe any\r
+patents or other property right claims or to contest validity of any\r
+such claims; this section has the sole purpose of protecting the\r
+integrity of the free software distribution system which is\r
+implemented by public license practices.  Many people have made\r
+generous contributions to the wide range of software distributed\r
+through that system in reliance on consistent application of that\r
+system; it is up to the author/donor to decide if he or she is willing\r
+to distribute software through any other system and a licensee cannot\r
+impose that choice.\r
+\r
+This section is intended to make thoroughly clear what is believed to\r
+be a consequence of the rest of this License.\r
+\r
+  12. If the distribution and/or use of the Library is restricted in\r
+certain countries either by patents or by copyrighted interfaces, the\r
+original copyright holder who places the Library under this License may add\r
+an explicit geographical distribution limitation excluding those countries,\r
+so that distribution is permitted only in or among countries not thus\r
+excluded.  In such case, this License incorporates the limitation as if\r
+written in the body of this License.\r
+\r
+  13. The Free Software Foundation may publish revised and/or new\r
+versions of the Lesser General Public License from time to time.\r
+Such new versions will be similar in spirit to the present version,\r
+but may differ in detail to address new problems or concerns.\r
+\r
+Each version is given a distinguishing version number.  If the Library\r
+specifies a version number of this License which applies to it and\r
+"any later version", you have the option of following the terms and\r
+conditions either of that version or of any later version published by\r
+the Free Software Foundation.  If the Library does not specify a\r
+license version number, you may choose any version ever published by\r
+the Free Software Foundation.\r
+\r
+\r
+  14. If you wish to incorporate parts of the Library into other free\r
+programs whose distribution conditions are incompatible with these,\r
+write to the author to ask for permission.  For software which is\r
+copyrighted by the Free Software Foundation, write to the Free\r
+Software Foundation; we sometimes make exceptions for this.  Our\r
+decision will be guided by the two goals of preserving the free status\r
+of all derivatives of our free software and of promoting the sharing\r
+and reuse of software generally.\r
+\r
+                            NO WARRANTY\r
+\r
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\r
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\r
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\r
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY\r
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\r
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\r
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\r
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\r
+\r
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\r
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\r
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\r
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\r
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\r
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\r
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\r
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\r
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\r
+DAMAGES.\r
+\r
+                     END OF TERMS AND CONDITIONS\r
+\r
+\r
+           How to Apply These Terms to Your New Libraries\r
+\r
+  If you develop a new library, and you want it to be of the greatest\r
+possible use to the public, we recommend making it free software that\r
+everyone can redistribute and change.  You can do so by permitting\r
+redistribution under these terms (or, alternatively, under the terms of the\r
+ordinary General Public License).\r
+\r
+  To apply these terms, attach the following notices to the library.  It is\r
+safest to attach them to the start of each source file to most effectively\r
+convey the exclusion of warranty; and each file should have at least the\r
+"copyright" line and a pointer to where the full notice is found.\r
+\r
+    <one line to give the library's name and a brief idea of what it does.>\r
+    Copyright (C) <year>  <name of author>\r
+\r
+    This library is free software; you can redistribute it and/or\r
+    modify it under the terms of the GNU Lesser General Public\r
+    License as published by the Free Software Foundation; either\r
+    version 2.1 of the License, or (at your option) any later version.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+Also add information on how to contact you by electronic and paper mail.\r
+\r
+You should also get your employer (if you work as a programmer) or your\r
+school, if any, to sign a "copyright disclaimer" for the library, if\r
+necessary.  Here is a sample; alter the names:\r
+\r
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the\r
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.\r
+\r
+  <signature of Ty Coon>, 1 April 1990\r
+  Ty Coon, President of Vice\r
+\r
+That's all there is to it!\r
+\r
+-------------------------------------------------------------------------------\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Lang/__init__.py b/instrumentation/next-share/BaseLib/Lang/__init__.py
new file mode 100644 (file)
index 0000000..84ea404
--- /dev/null
@@ -0,0 +1,3 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
diff --git a/instrumentation/next-share/BaseLib/Lang/english.lang b/instrumentation/next-share/BaseLib/Lang/english.lang
new file mode 100644 (file)
index 0000000..22933a8
--- /dev/null
@@ -0,0 +1,1429 @@
+##################################################################
+# You can change language here, this is all Tribler using variables
+# Make sure that try to keep text length as the original text
+#
+# Note: text strings can be written as either:
+# stringname = "some string"
+# or:
+# stringname_line1 = "first line of a string"
+# stringname_line2 = "second line of a string"
+#
+# (Tribler will automatically add the lines together)
+#
+##################################################################
+
+[ABC/language]
+
+# The name of the language defined in this file:
+languagename = "English"
+
+# this credit will display in aboutme dialog
+# translate = "Translator: <Your name>"
+translate = ""
+
+# All ABC Variables
+#######################
+title = "Tribler"
+
+superseederrornotcompleted = "Super-seed can only be enabled for completed torrents"
+superseedmustruntorrentbefore = "This torrent must be running before using Super-Seed mode"
+
+superwarningmsg_line1 = "This option greatly reduces the torrent's efficiency."
+superwarningmsg_line2 = "Super-seed should only be used for initial seeding or"
+superwarningmsg_line3 = "for re-seeding."
+superwarningmsg_line4 = ""
+superwarningmsg_line5 = "Super-seed mode will stay in effect until the torrent"
+superwarningmsg_line6 = "is stopped."
+
+failedinvalidtorrent = "Failed : Invalid torrent file."
+failedtorrentmissing = "Failed : .torrent file does not exist or cannot be read."
+removetorrent          = "Do you wish to delete the torrent file?"
+ok                                     = "OK"
+cancel                         = "Cancel"
+apply                          = "Apply"
+close                          = "Close"
+save                           = "Save"
+saveandapply           = "Save and Apply"
+done                           = "Done"
+
+choosefiletosaveas  = "Choose file to save as, pick a partial download to resume"
+choosedirtosaveto      = "Choose a directory to save to (pick a partial download to resume)"
+enterurl                       = "Enter the URL for the torrent you wish to add:"
+confirmupgrademsg   = "Do you want to close Tribler and upgrade to the next version?  See release notes below"
+confirmupgrade      = "Upgrade Tribler?"
+confirmmsg                     = "Do you want to close Tribler ?"
+confirm                        = "Confirm"
+aboutabc                       = "About Tribler"
+abcpreference          = "Preferences"
+managefriendspeers         = "Manage Friends/Encountered Peer List"
+managefriends          = "Manage Friends"
+recommendatefiles   = "Recommendation"
+addfriend            = "Add a friend"
+editfriend           = "Edit a friend's info"
+viewpeerlist           = "View Encountered Peers"
+addpeeradfriend         = "Add this peer as your friend"
+deletepeer              = "Delete this peer"
+deletepeerfriend        = "Remove the peer from your friends list"
+fakefile                = "Fake File"
+norating                = "No Rating"
+rankitems               = "Rank Items"
+assignrating            = "Right click on a torrent to assign a 1--5 star rating"
+showabcwindow          = "Show Tribler Window"
+error                          = "Error"
+warning                        = "Warning"
+invalidinput                   = "Invalid input"
+cantconnectwebserver_line1 = "Could not connect to update server."
+cantconnectwebserver_line2 = "It may be down or you are not connected to the Internet."
+abclatestversion       = "Latest Version"
+nonewversion = "There is no new version available. <br> Please visit <a href=http://www.tribler.org>www.tribler.org</a> for more information"
+hasnewversion = "There is a new version available. Please upgrade."
+globaluploadsetting = "Global Upload"
+downloadsetting        = "Download Setting"
+ratelimits                     = "Rate Limiting"
+seedoptions            = "Seeding Options"
+webinterfaceservice = "Web Interface Service"
+
+duplicatetorrent       = "Duplicate Torrent"
+duplicatetorrentinlist  = "This torrent (or one with the same hash value) already exists in the list"
+duplicatetorrentmsg = "This torrent is a duplicate!\nAre you sure you want to replace it?"
+choosetorrentfile      = "Choose a torrent file"
+cantgettorrentfromurl = "Can't get torrent from this URL"
+localsetting           = "Local Settings"
+errordeletefile        = "Error, while trying to delete file.\nFile not cannot be found or is in use"
+filenotfound           = "File not found or cannot be accessed."
+confirmdeletefile      = "Are you sure you want to remove this file or folder?"
+choosenewlocation      = "Choose a new location for this torrent"
+
+extracterrorduplicatemsg_line1 = "A file with the same name already exists in the destination folder.
+extracterrorduplicatemsg_line2 = "Do you want to overwrite it?"
+extracterrorduplicate = "Duplicate file name"
+
+
+extracterrorinactive = "At least one selected torrent is active. Please deactivate before extracting."
+
+
+extracterrormoving     = "Can't move the torrent file."
+torrentdetail          = "Torrent Details..."
+
+moveup                                 = "Move torrent up"
+movedown                       = "Move torrent down"
+movetop                        = "Move torrent to top"
+movebottom                     = "Move torrent to bottom"
+clearallcompleted      = "Clear all completed torrents"
+pauseall                       = "Pause All"
+stopall                        = "Stop All"
+restartall                     = "Restart All"
+unstopall                      = "Queue all stopped torrents"
+mode                           = "Mode Manual/Auto"
+webservice                     = "Web Service: "
+torrentfilenotfound = "Torrent file not found"
+clear                          = "Clear"
+errormovefile          = "Error while moving files"
+
+totaldlspeed           = "Total DL Speed:"
+totalulspeed           = "Total UL Speed:"
+
+failbehavior1          = "Set status to:"
+failbehavior2          = "when a torrent fails"
+
+defaultpriority        = "Default priority for new torrents:"
+
+################################
+# Menu
+################################
+menu_file                      = "&File"
+menuaction                     = "&Action"
+menutools                      = "&Tools"
+menuversion            = "&Help"
+menuaboutabc           = "&About Tribler"
+menuaboutabcmsg        = "See Credits"
+menuchecklatestversion = "&Check for updates"
+menuchecklatestversionmsg = "Check Latest Version"
+menuwebinterfaceservice = "&Web Interface Service"
+menuwebinterfaceservicemsg = "Start/Stop and Config Web Interface Service"
+menucreatetorrent      = "&Create Torrent"
+menucreatetorrentmsg = "Create .torrent file"
+menumanagefriends     = "&Manage Friends List"
+menumyinfo             = "My &Info"
+menuexit                       = "&Exit"
+menuexitmsg            = "Close Program"
+menuglobaluploadsetting = "&Global Upload Setting"
+menuglobaluploadsettingmsg = "Setting global upload value"
+menuabcpreference      = "&Preferences"
+menuabcpreferencemsg = "Set preferences"
+
+menu_addtorrent = "&Add Torrent"
+menu_addtorrentfile = "Add torrent from &file"
+menu_addtorrentnondefault = "Add torrent from file (to &non-default location)"
+menu_addtorrenturl = "Add torrent from &URL"
+
+menu_pauseall = "&Pause All"
+menu_stopall = "&Stop All"
+menu_unstopall = "&Queue all stopped torrents"
+menu_clearcompleted = "&Clear Completed"
+
+#########################
+# Library Overview
+#########################
+
+playFastDisabled       = "Give high priority and play ASAP"
+playFastEnabled                = "Back to normal mode"
+playerDisabled         = "Please wait until first part is available \n(Tribler is currently giving first part high priority)"
+playerEnabled          = "Click to play"
+boostDisabled          = "Ask friends to boost your download"
+boostEnabled           = "Boosting"
+
+
+#########################
+# ToolBar
+#########################
+addtorrentfile_short = "Add Torrent File"
+addtorrentfiletonondefault_short = "Add Torrent File to non-default location"
+addtorrenturl_short = "Add Torrent from URL"
+
+tb_play_short   = "Play video"
+tb_resume_short = "Resume torrent"
+tb_resume_long         = "Resume/Launch torrent"
+tb_reseedresume_short = "Reseed Resume"
+tb_reseedresume_long = "Resume without hashcheck, use only for seeding/reseeding."
+tb_pause_short         = "Pause torrent"
+tb_pause_long  = "Pause active torrent(s) (without releasing resources)"
+tb_stop_short  = "Stop torrent"
+tb_stop_long   = "Stop torrent (release resources)"
+tb_queue_short         = "Queue torrent"
+tb_queue_long  = "Force torrent into queue"
+tb_delete_short = "Remove torrent"
+tb_delete_long         = "Remove torrent only from Tribler list"
+tb_spy_short   = "Current Seed/Peer"
+tb_spy_long    = "See current number of seed/peer of torrent on the tracker"
+tb_torrentdetail_short = "Torrent Details"
+tb_buddy_short  = "Manage Friends/Encountered Peers"
+tb_file_short   = "Show Download History"
+tb_video_short  = "Video Player"
+tb_dlhelp_short  = "Download Booster"
+
+tb_urm = "URM:"
+tb_maxsim = "Active:"
+
+##########################
+# Priority
+##########################
+# These are used for display in the list
+highest                = "Highest"
+high                   = "High"
+normal                         = "Normal"
+low                    = "Low"
+lowest                         = "Lowest"
+
+# These are used for menus
+rhighest               = "H&ighest"
+rhigh                  = "&High"
+rnormal                = "&Normal"
+rlow                   = "&Low"
+rlowest                = "L&owest"
+
+###################################################
+# Seeding Setting
+###################################################
+uploadoptforcompletedfile = "Upload option for completed files"
+unlimitedupload = "Unlimited seeding"
+continueuploadfor = "Continue seeding for"
+untilratio             = "Seeding until UL/DL ratio = "
+uploadsetting  = "Upload Setting"
+maxuploads             = "Maximum uploads:"
+maxuploadrate  = "Maximum upload rate:"
+maxoveralluploadrate = "Maximum overall upload rate:"
+whendownload   = "when downloading"
+whennodownload         = "when no downloading"
+
+maxdownloadrate = "Maximum download rate:"
+maxoveralldownloadrate = "Maximum overall download rate:"
+
+zeroisunlimited = "(0 = Unlimited)"
+zeroisauto             = "(0 = Auto)"
+
+uploadrateintwarning = "Only integer allowed in Maximum upload rate setting"
+uploadrateminwarning = "Minimum upload rate is 3kB/s or 0 for unlimited upload rate"
+uploadrateminwarningauto = "Minimum upload rate is 3kB/s or 0 for auto upload rate"
+
+#Common option for t4t and g2g
+default_setting = "default"
+seed_sometime = "Seeding for"
+seed_hours = "hours"
+seed_mins = "minutes"
+no_seeding = "No seeding"
+
+#Seeding option texts for tit-4-tat
+tit-4-tat = "tit-4-tat: (Forgets about uploads)"
+no_leeching = "Seed until UL/DL ratio > 1.0 (no Bittorrent leeching)"
+unlimited_seeding = "Unlimited seeding"
+
+#Seeding option texts for give-2-get
+give-2-get = "give-2-get: (Remembers every upload)"
+seed_for_large_ratio = "Seed only to peers with UL/DL ratio >"
+boost__reputation = "Unlimited seeding (Boost your reputation)"
+
+
+############################################
+# Units
+############################################
+Byte                   = "B"
+KB                             = "KB"
+MB                             = "MB"
+GB                             = "GB"
+TB                             = "TB"
+
+week                   = "W"
+day                    = "D"
+hour                   = "H"
+minute                         = "M"
+second                         = "S"
+l_week                         = "w"
+l_day                  = "d"
+l_hour                         = "h"
+l_minute               = "m"
+l_second               = "s"
+
+############################################
+# Tribler Tweak
+############################################
+up                             = "upload speed"
+down                   = "download speed"
+columns                = "Columns"
+column                         = "Column :"
+displayname    = "Column Name :"
+columnwidth    = "Column Width :"
+eta                            = "Estimated time needed to complete: "
+
+customizetoolbar = "Customize Toolbar"
+
+##############################################
+# Tribler Detail Frame
+##############################################
+
+networkinfo    = "Network Info"
+fileinfo               = "File Info"
+torrentinfo     = "Torrent Info"
+geoinfo         = "Geographic Info"
+helperinfo     = "Download Booster"
+
+dnumconnectedseed = "# Connected seed :"
+dseenseed              = "# Seen seed"
+dnumconnectedpeer = "# Connected peer :"
+dseeingcopies  = "# Seeing copies :"
+davgpeerprogress = "Avg peer progress :"
+ddownloadedsize = "Downloaded size :"
+duploadedsize  = "Uploaded size : "
+dtotalspeed    = "Total speed : "
+dportused              = "Port used : "
+updateseedpeer         = "Update #Seed/#Peer"
+manualannounce         = "Manual Announce"
+externalannounce = "External Announce"
+finishallocation = "Finish Allocation"
+spewoptunchoke         = "Optimistic Unchoke"
+spewIP                         = "IP"
+spewlr                         = "Local/Remote"
+spewinterested         = "Interested"
+spewchoking    = "Choking"
+spewinteresting = "Interesting"
+spewchoecked   = "Choked"
+spewsnubbed    = "Snubbed"
+spewdownloaded         = "Downloaded"
+spewuploaded   = "Uploaded"
+spewcompleted  = "Completed"
+spewpeerdownloadspeed = "Peer Download Speed"
+entertrackerannounceurl = "Enter tracker anounce URL:"
+TOTALS                         = "TOTALS:"
+KICKED                         = "KICKED"
+BANNED                         = "BANNED"
+detailline1    = "currently downloading %d pieces (%d just started), %d pieces partially retrieved"
+detailline2    = "%d of %d pieces complete (%d just downloaded), %d failed hash check"
+
+country_name           = "Country"
+country_code           = "Country Code"
+city                           = "City"
+latitude                       = "Latitude"
+longitude                      = "Longitude"
+coordinate                     = "Coordinate"
+peer_active                    = "Active"
+peer_inactive          = "Inactive"
+name                   = "Name"
+permid                 = "PermID"
+mypermid               = "My PermID"
+pasteinvitationemail = "Your friend should provide you the following information by sending you an invitation:"
+ipaddress              = "IP"
+icon                   = "Icon"
+#nickname_help           = "Input the friend's nickname or whatever you'd like to identify him/her"
+#friendsipaddr_help      = "Input the friend's IP address, e.g. 202.115.39.65"
+#friendsport_help        = "Input the friend's listening port number"
+#friendspermid_help      = "Input the friend's PermID"
+#friendsicon_help        = "Input full path of the friend's icon"
+nicknameempty_error     = "Name is empty"
+friendsport_error       = "Port is not a number"
+friendspermid_error     = "PermID must be given (in BASE64, single line)"
+fiendsiconnotfound_error= "Icon file does not exist"
+friendsiconnot32bmp_error= "Icon file is not a 32x32 BMP"
+friendsiconnotbmp_error = "Icon file is not BMP"
+myinfo                 = "My information"
+myinfo_explanation     = "Copy and paste this information in an email to your friends, so they can add you to their Friends List in Tribler."
+invitation_body = "Hi,\r\n\r\nI am using Tribler (http://tribler.org) and want to ask you to do the same and add me as a friend. To do so, start Tribler, click on Friends, then click on the Add Friends button, and paste the following information:\r\n\r\n"
+invitation_subject = "Friendship invitation on Tribler"
+invitationbtn = "Invite friends"
+dlhelpdisabledstop      = "Download Booster is disabled because the torrent is stopped"
+dlhelpdisabledhelper    = "Download Booster is disabled because you are a helper"
+dlhelphowto1               = "You can only request mutual (two way) friends to boost your downloads."
+dlhelphowto2               = "\nMore info: \nTo use the download booster you must make friends with other Tribler users, and they must make friends with you. To make friends, use the 'Add as friend' button in the Persons overview or the 'Invite Friends' and 'Add Friends' button in the Friends overview."
+friends                        = "Friends"
+helpers                = "Helpers"
+availcandidates         = "Available Candidates"
+requestdlhelp          = "Request Help ->"
+requestdlhelp_help     = "Ask friends to help in downloading this torrent"
+stopdlhelp             = "<- Stop Help"
+stopdlhelp_help                = "Stop friends' help"
+helping_friend         = "Helping "
+helping_stopped                = "Helping was stopped remotely, please remove torrent"
+
+#####################################################
+# Meta info frame
+#####################################################
+fileinfo0_text  = "Filename"
+fileinfo1_text  = "Size"
+fileinfo2_text  = "Progress"
+fileinfo3_text  = "MD5"
+fileinfo4_text  = "CRC-32"
+fileinfo5_text  = "SHA-1"
+fileinfo6_text  = "ED2K"
+
+encoding        = "Encoding :"
+
+filename               = "File name :"
+destination    = "Destination :"
+
+directoryname  = "Directory name :"
+file                   = "File"
+progress               = "Progress"
+infohash               = "Info Hash :"
+pieces                         = "Pieces : "
+str1                   = "%s (%s bytes)"
+str2                   = "%i (%s bytes each)"
+announceurl    = "Announce URL :"
+announceurls   = "Announce URLs"
+tier                   = "Tier "
+single                         = "Single:"
+likelytracker  = "Likely Tracker :"
+comment                = "Comments :"
+creationdate   = "Creation Date :"
+filesize               = "Filesize"
+archivesize    = "Archive Size"
+
+########################################################
+# ABCOptionDlg
+#######################################################
+networksetting         = "Network"
+portnumber     = "Port:"
+portsetting    = "Ports"
+minportnumber  = "Minimum port : "
+maxportnumber  = "Maximum port :"
+portrangewarning = "Minimum port cannot be greater than maximum port"
+randomport             = "Randomize Ports"
+kickban                = "Kick/Ban clients that send you bad data"
+security               = "Don't allow multiple connections from the same IP"
+scrape                         = "Retrieve scrape data"
+internaltrackerurl = "URL of internal tracker"
+
+scrape_hint_line1 = "Automatically retrieve the total number of seeds/peers"
+scrape_hint_line2 = "connected to the tracker"
+scrape_hint_line3 = "(rather than just the number of connected seeds/peers)"
+scrape_hint_line4 = ""
+scrape_hint_line5 = "Note: This can put an additional burden on trackers"
+scrape_hint_line6 = "      and is therefore disabled by default"
+
+global_uprate_hint_line1 = "Amount of bandwidth to distribute between"
+global_uprate_hint_line2 = "uploading torrents"
+global_uprate_hint_line3 = ""
+global_uprate_hint_line4 = "Note: Each torrent will always get a minimum"
+global_uprate_hint_line5 = "      of 3KB/s"
+
+choose_language = "Language: "
+recategorize = "Recategorize all torrents: "
+recategorize_button = "Recategorize now"
+choosevideoplayer = "Choose video player"
+choosevideoanalyser = "Locate FFMPEG"
+
+queuesetting   = "Queue"
+maxnumsimul     = "Maximum number of active torrents"
+trignexttorrent = "Consider torrents active if they are:"
+after_downloading = "Downloading"
+after_seeding  = "Downloading or Seeding"
+prioritizelocal = "Don't count torrents with local settings towards global limit"
+fastresume             = "Fast Resume (also enables File Selector)"
+
+skipcheck       = "Skip hashcheck for completed torrents"
+skipcheck_hint_line1 = "Don't conduct a hashcheck for torrents"
+skipcheck_hint_line2 = "that have already completed."
+
+fastresume_hint_line1 = "Automatically resume torrents that have already"
+fastresume_hint_line2 = "conducted a hashcheck."
+fastresume_hint_line3 = ""
+fastresume_hint_line4 = "Note: This option is required in order to set"
+fastresume_hint_line5 = "      priorities for individual files within"
+fastresume_hint_line6 = "      a multi-file torrent."
+
+
+displaysetting  = "Display"
+miscsetting    = "Misc."
+removebackuptorrent = "Remove .torrent backup file when using remove"
+confirmonexit  = "Confirm on exit program"
+triblersetting  = "Tribler"
+corefuncsetting = "Core functionality"
+myinfosetting = "My information"
+torrentcollectsetting = "Torrent collecting"
+enablerecommender = "Enable Recommender"
+enabledlhelp    = "Enable Download Booster"
+enabledlcollecting = "Enable Torrent Collecting"
+myname             = "My name (as broadcast to others):"
+maxntorrents    = "Max number of torrents to collect:"
+maxnpeers       = "Max number of peers to discover:"
+tc_threshold    = "Stop collecting more torrents if the disk has less than:"
+current_free_space = "current available space:"
+torrentcollectingrate = "Maximum rate of torrent collecting (Kbps):"
+myicon            = "My Tribler icon (as broadcast to others):"
+setdefaultfolder = "Set default download folder"
+stripedlist    = "Striped list"
+videosetting = "Video"
+
+choosedefaultdownloadfolder = "Choose a default folder for download files"
+maxsimdownloadwarning_line1 = "The maximum number of simultaneous downloading torrents"
+maxsimdownloadwarning_line2 = "must not be greater than the number of reserved ports"
+
+choosemovedir  = "Choose a folder to move completed files to"
+movecompleted  = "\"Clear Completed\" moves files to:"
+
+showtray               = "Show in tray:"
+showtray_never         = "Never"
+showtray_min   = "When Minimized"
+showtray_always = "Always"
+showtray_only  = "Only show in Tray"
+
+########################################################
+# ABCOptionDlg - Advanced Options
+#######################################################
+
+disksettings   = "Disk"
+advanced               = "Advanced"
+advsetting             = "Advanced settings"
+changeownrisk  = "(Under most circumstances, these settings do not need to be changed)"
+localip                = "Local IP: "
+iptobindto             = "IP to bind to: "
+minnumberofpeer = "Minimum number of peers: "
+diskalloctype  = "Disk allocation type:"
+allocrate              = "Allocation rate:"
+filelocking    = "File locking:"
+extradatachecking = "Extra data checking:"
+maxfileopen    = "Max files open:"
+maxpeerconnection = "Max peer connections:"
+reverttodefault = "Restore Defaults"
+bufferdisk             = "Disk Buffering"
+buffer_read    = "Read Cache"
+buffer_write   = "Write Cache"
+ut_pex_maxaddrs1 = "Maximum number of addresses to accept"
+ut_pex_maxaddrs2 = "via peer exchange per client"
+flush_data             = "Flush data to disk every"
+
+iphint_line1   = "The IP reported to the tracker."
+iphint_line2   = "(unless the tracker is on the same intranet as this client,"
+iphint_line3   = " the tracker will autodetect the client's IP and ignore this"
+iphint_line4   = " value)"
+
+bindhint_line1         = "The IP the client will bind to."
+bindhint_line2         = "Only useful if your machine is directly handling multiple IPs."
+bindhint_line3         = "If you don't know what this is, leave it blank."
+
+minpeershint_line1 = "The minimum number of peers the client tries to stay connected with."
+minpeershint_line2 = ""
+minpeershint_line3 = "Do not set this higher unless you have a very fast connection and a lot of system resources."
+
+ut_pex_maxaddrs_hint_line1 = "When you meet other peers they can give you addresses of the peers they know."
+ut_pex_maxaddrs_hint_line2 = "This value sets the maximum number of gossiped addresses you accept from each peer."
+ut_pex_maxaddrs_hint_line3 = "Don't set this too high as these gossiped addresses are from an untrusted source"
+ut_pex_maxaddrs_hint_line4 = "(i.e. a random peer) and not the trustworthy tracker."
+
+alloctypehint_line1 = "How to allocate disk space:
+alloctypehint_line2 = ""
+alloctypehint_line3 = "'Normal' allocates space as data is received"
+alloctypehint_line4 = "'background' also adds space in the background"
+alloctypehint_line5 = "'pre-allocate' reserves space up front"
+alloctypehint_line6 = "'sparse' is only for filesystems that support it by default"
+
+allocratehint_line1 = "At what rate to allocate disk space when allocating in the background."
+allocratehint_line2 = ""
+allocratehint_line3 = "Set this too high on a slow filesystem and your download will slow to a crawl."
+
+lockinghint_line1 = "File locking prevents other programs (including other instances"
+lockinghint_line2 = "of BitTorrent) from accessing files you are downloading."
+
+doublecheckhint_line1 = "How much extra checking to do to make sure no data is corrupted."
+doublecheckhint_line2 = "Double-check requires higher CPU usage"
+doublecheckhint_line3 = "Triple-check also increases disk accesses"
+
+maxfileopenhint_line1 = "The maximum number of files to keep open at the same time."
+maxfileopenhint_line2 = "Please note that if this option is in effect,"
+maxfileopenhint_line3 = "files are not guaranteed to be locked."
+
+maxconnectionhint_line1 = "Some operating systems, (most notably Win9x/ME) combined"
+maxconnectionhint_line2 = "with certain network drivers, can only handle a limited"
+maxconnectionhint_line3 = "number of open ports."
+maxconnectionhint_line4 = ""
+maxconnectionhint_line5 = "If the client freezes, try setting this to 60 or below."
+
+
+
+recommendinstructions = "Double click on a torrent to start downloading; right click to delete or manually check health of the torrent"
+recommendfilter = "Don't show torrents with recommendation value less than"
+recommendfilterall = "(set to 0.0 to see all known torrents)"
+
+############################################################
+# BTMakeTorrentGUI
+############################################################
+btfilemakertitle       = "Create Torrent"
+btmaketorrenttitle     = "Make Torrent"
+maketorrentof          = "Source :"
+dir                            = "Dir"
+add                            = "Add"
+remove                                 = "Remove"
+announce                       = "Tracker"
+announcelist           = "Announce list :"
+copyannouncefromtorrent = "Copy tracker from torrent"
+createdby                      = "Created By :"
+
+trackerinfo                    = "Tracker Info"
+miscinfo                       = "Misc. Info"
+
+selectdir                      = "Select a directory"
+
+multiannouncehelp_line1="(A list of announces separated by commas or whitespace.
+multiannouncehelp_line2=" Trackers on the same line will be tried randomly."
+multiannouncehelp_line3=" All the trackers on one line will be tried before the trackers on the next.)"
+
+httpseeds              = "HTTP Seeds :"
+httpseedshelp = "(A list of HTTP seeds separated by commas or whitespace.)
+
+saveasdefaultconfig = "Save as default config"
+maketorrent            = "Make Torrent"
+
+choosefiletouse        = "Choose file or directory to use"
+choosedottorrentfiletouse = "Choose .torrent file to use"
+youmustselectfileordir = "You must select a\n file or directory"
+
+dirnotice_line1        = "Do you want to make a separate .torrent"
+dirnotice_line2        = "for every item in this directory?"
+yes                            = "Yes"
+yestoall                       = "Yes to All"
+no                                     = "No"
+notoall                                = "No to All"
+playtime                       = "Duration of video ([hh:]mm:ss)"
+addthumbnail           = "Thumbnail"
+useinternaltracker = "Use internal tracker"
+manualtrackerconfig = "Use additional trackers (you must add internal tracker URL)"
+
+###########################################################
+# BTcompletedirgui
+###########################################################
+directorytomake     = "Directory to build :"
+select              = "Select"
+piecesize           = "Piece size :"
+make                = "Make"
+errormustselectdir     = "You must select a directory"
+btmakedirtitle                 = "Make Directory"
+checkfilesize          = "Checking file sizes"
+building                       = "Building "
+
+#################################################
+# Timeouts
+#################################################
+timeout                        = "Timeouts"
+schedulerrulemsg       = "Set timeout rules for torrents"
+setrule_line1          = "Reduce a torrent's priority and force it into queue so"
+setrule_line2          = "other torrents in queue won't be blocked when:"
+timeout_tracker        = "Torrent can't connect for:"
+timeout_download       = "Torrent can't download for:"
+timeout_upload         = "Seeding torrent doesn't upload for:"
+minute_long            = "Minutes"
+hour_long                      = "Hours"
+time                           = "Time"
+
+################################################################################################
+#(-Right-) Click Menu
+################################################################################################
+rHashCheck                     = "&Hash Check"
+rResume                        = "&Resume Torrent"
+rPlay                          = "   &Play Video"
+rStop                          = "&Stop"
+rPause                                 = "P&ause"
+rQueue                                 = "&Queue"
+rRemoveTorrent                 = "Remove Torrent"
+rRemoveTorrentandFile = "Remove Torrent and File(s)"
+
+rChangeViewModusThumb= "Thumbnail view"
+rChangeViewModusList= "List view"
+
+
+############# FILE and LIBRARY
+rOptions            = "Options:"
+rDownloadSecretly   = "   Download and hide this from other Tribler users"
+rDownloadOpenly     = "   Download"
+rModerate           = "   Change info..."
+rModerateCat        = "   Change category"
+rRecommend          = "   Recommend to a friend..."
+rAdvancedInfo          = "   Advanced info..."
+
+# Arno: categories must be completely defined by category.conf,
+# not in the code
+
+############# LIBRARY
+rLibraryOptions     = "Library options:"
+rOpenfilename      = "   Open file"
+rOpenfiledestination= "   Open destination"
+rRemoveFromList            = "   Remove from library"
+rRemoveFromListAndHD= "   Remove from library and harddisk"
+
+############# PERSONS and FRIENDS
+rAddAsFriend           = "   Add as friend"
+rRemoveAsFriend                = "   Remove this friend"
+rChangeInfo                    = "   Change friend info"
+
+############# FRIENDS
+rFriendsOptions     = "Friends options:"
+rSendAMessage       = "   Send a message..."
+
+############# SUBSCRIPTIONS
+rChangeSubscrTitle  = "   Change title"
+rRemoveSubscr       = "   Remove subscription"
+
+
+################################################################################################
+# Mouse roll over
+################################################################################################
+
+############# FILE
+rNumberOfSeeders    = "Number of current uploaders (seeders)
+rNumberOfLeechers   = "Number of current downloaders (leechers)
+
+
+
+rcopyfilename       = "&Copy Filename"
+rcopypath           = "Copy &Path"
+
+rcopyfromlist       = "&Copy from list..."
+rexportfromlist     = "&Export torrent"
+rextractfromlist       = "&Extract from List..."
+rclearmessage          = "&Clear Message"
+rtorrentdetail                 = "&Torrent Details..."
+rcurrentseedpeer       = "Current Seed/Peer"
+rchangedownloaddest = "Change Download Destination..."
+ropenfiledest          = "Open &File..."
+ropendest                      = "&Open Destination..."
+rsuperseedmode                 = "Use Super-seed &Mode"
+rpriosetting           = "&Priority Setting"
+rlocaluploadsetting = "&Local Settings..."
+
+openfiledest       = "Open File"
+opendest                       = "Open Destination"
+
+################################
+# BT status
+################################
+completed                      = "completed"
+completedseeding       = "completed/sharing"
+working                        = "downloading"
+superseeding           = "super-seeding"
+waiting                        = "waiting.."
+pause                          = "pause"
+queue                          = "queue"
+stopping                       = "stopping.."
+stop                           = "stopped"
+checkingdata           = "checking existing data"
+allocatingspace                = "allocating disk space"
+movingdata                     = "moving data"
+connectingtopeers      = "connecting"
+
+##############################################
+# Web Interface Service
+#############################################
+cantopensocket                 = "Can't open socket"
+socketerror            = "Socket Error!"
+inactive                       = "Webservice: Inactive"
+active                                 = "Webservice: Active"
+toolbar_webservice  = "Webservice"
+webinterfacetitle      = "Web Interface Service (version 3.0)"
+webip                          = "IP :"
+webport                        = "Port :"
+uniquekey                      = "Unique Key :"
+commandpermission      = "Command Permissions"
+webquery                       = "Query"
+webdelete                      = "Delete"
+webadd                                 = "Add"
+webqueue                       = "Queue"
+webstop                        = "Stop"
+webpause            = "Pause/Unpause"
+webresume                      = "Resume"
+websetparam                    = "Set Parameters"
+webgetparam                    = "Get Parameters"
+priority                       = "Priority"
+webclearallcompleted = "Clear all completed"
+webautostart           = "Auto start web service when launching Tribler"
+startservice           = "Start Service"
+stopservice            = "Stop Service"
+warningportunder1024_line1 = "Ports below 1024 are normally used for system services"
+warningportunder1024_line2 = "Do you really want to use this port?"
+cantconnectabcwebinterface = "Unable to connect to Tribler web service"
+
+##############################################
+# Scrape Dialog
+##############################################
+cantreadmetainfo       = "Can't read metainfo"
+cantgetdatafromtracker = "Can't get data from tracker"
+noannouncetrackerinmeta = "No announce tracker in your metainfo"
+warningscrapelessthanmin = "Please don't scrape more than once per minute."
+trackernoscrape     = "Tracker does not support scraping"
+seed                           = "Seed :"
+peer                           = "Peer :"
+status                                 = "Status :"
+scraping                       = "Scraping..."
+scrapingdone           = "Scraping done"
+
+##############################################
+# Upload Rate Maximizer
+##############################################
+autostart_threshold    = "Start a new torrent if upload is more than"
+autostart_delay                = "below the global limit for at least"
+
+activetorrents      = "Active Torrents"
+autostart           = "Auto Start"
+
+dynmaxuprate           = "Adjust upload rate for network overhead"
+dynrate                        = "(Dynamic Rate = Global Upload Rate - DownCalc - ConnectCalc)"
+downcalc_left          = "DownCalc = "
+downcalc_top           = "Download Rate"
+downcalc_bottom        = " * Download Rate + "
+connectcalc_left       = "ConnectCalc = "
+connectcalc_top        = "(Seeds + Peers)"
+connectcalc_bottom     = " * (Seeds + Peers) + "
+
+errorlanguagefile_line1 = "Your language file is missing at least one string."
+errorlanguagefile_line2 = "Please check to see if an updated version is available."
+restartabc                     = "(takes effect next time Tribler is opened)"
+
+messagelog                     = "Message Log"
+clearlog                       = "Clear Log"
+date                           = "Date"
+
+close_title            = "Closing"
+
+noportavailable        = "Couldn't find an available port to listen on"
+tryotherport        = "Would you like Tribler to try using another port?"
+
+column4_text           = "Title"
+column5_text           = "Progress"
+column6_text           = "BT Status"
+column7_text           = "Priority"
+column8_text           = "ETA"
+column9_text           = "Size"
+column10_text          = "DL Speed"
+column11_text          = "UL Speed"
+column12_text          = "%U/D Size"
+column13_text          = "Message"
+column14_text          = "Seeds"
+column15_text          = "Peers"
+column16_text          = "Copies"
+column17_text          = "Peer Avg Progress"
+column18_text          = "DL Size"
+column19_text          = "UL Size"
+column20_text          = "Total Speed"
+column21_text          = "Torrent Name"
+column22_text          = "Destination"
+column23_text          = "Seeding Time"
+column24_text          = "Connections"
+column25_text          = "Seeding Option"
+
+savecolumnwidth        = "Save column widths when resizing"
+showearthpanel         = "Show worldmap in detail window (higher CPU load)"
+
+errorinactivesingle_line1   = "Torrent must be inactive before proceeding"
+errorinactivesingle_line2      = "Stop this torrent?"
+
+errorinactivemultiple_line1   = "Torrents must be inactive before proceeding"
+errorinactivemultiple_line2     = "Stop torrents?"
+
+disabletimeout                 = "Disable timeouts for this torrent"
+
+forcenewdir            = "Always create new directory for multi-file torrents"
+
+forcenewdir_hint_line1 = "If this is enabled, a multi-file torrent will always"
+forcenewdir_hint_line2 = "be placed within its own directory."
+forcenewdir_hint_line3 = ""
+forcenewdir_hint_line4 = "If this is disabled, a multi-file torrent will be"
+forcenewdir_hint_line5 = "placed in its own directory only if no pieces"
+forcenewdir_hint_line6 = "of the file are already present to resume from."
+
+upnp                           = "UPnP"
+upnp_0                                 = "Disabled"
+upnp_1                                 = "Mode 1 (indirect via Windows)"
+upnp_2                                 = "Mode 2 (indirect via Windows)"
+upnp_3                         = "Mode 3 (direct via network)"
+tribler_warning                        = "Tribler Warning"
+tribler_information            = "Tribler Information"
+tribler_startup_nonfatalerror  = "A non-fatal error occured during Tribler startup, you may need to change the network Preferences:  \n\n"
+tribler_upnp_error_intro       = "An error occured while trying to open the listen port "
+tribler_upnp_error_intro_postfix= " on the firewall."
+tribler_upnp_error1            = "request to the firewall failed."
+tribler_upnp_error2            = "request to firewall returned:  '"
+tribler_upnp_error2_postfix     = "'. "
+tribler_upnp_error3            = "was enabled, but initialization failed."
+tribler_upnp_error_extro       = " This will hurt the performance of Tribler.\n\nTo fix this, configure your firewall/router/modem or try setting a different listen port or UPnP mode in (advanced) network Preferences."
+tribler_unreachable_explanation = "Others cannot contact you over the Internet. This will hurt the performance of Tribler.\n\nTo fix this, configure your firewall/router/modem or try different UPnP settings in the advanced network preferences."
+currentdiscoveredipaddress = "Your discovered IP address"
+
+associate                      = "Associate with .torrent files"
+notassociated_line1 = "Tribler is not currently associated with .torrent files"
+notassociated_line2 = "Do you wish to use Tribler to open .torrent files?"
+errorassociating    = "Error associating Tribler with .torrent files"
+
+savelog                                = "Save Log"
+savelogas                      = "Save log file as..."
+error_savelog          = "Error writing log file"
+
+download_normal        = "Download &Normally"
+download_never                 = "Download Ne&ver"
+download_later                 = "Download &Later"
+download_first                 = "Download &First"
+download_start         = "Start downloading"
+click_and_download     = "Click and Download"
+delete_torrent         = "The associated torrent file %s is not found on disk. Do you want to delete this entry from the Tribler database?"
+delete_dead_torrent = "Remove Torrent"
+
+###
+# Abbreviations in the status bar:
+###
+
+reachable_tooltip      = "Others can reach you, i.e. you are not firewalled. This is good"
+restart_tooltip                = "Please restart Tribler for your changes to take place"
+connecting_tooltip      = "Your current firewall status is being checked ..."
+unknownreach_tooltip   = "Others cannot reach you. This is not good. Click to learn more."
+abbrev_loaded          = "L:"
+abbrev_running         = "R:"
+abbrev_pause           = "P:"
+abbrev_downloading     = "D:"
+abbrev_seeding         = "S:"
+abbrev_connections     = "CX:"
+abbrev_down                    = "D:"
+abbrev_up                      = "U:"
+discover_peer   = "# Peers:"
+discover_file   = "# Files:"
+
+
+alloc_normal           = "normal"
+alloc_background       = "background"
+alloc_prealloc         = "pre-allocate"
+alloc_sparse           = "sparse"
+
+lock_never                     = "no locking"
+lock_writing           = "lock while writing"
+lock_always                    = "lock always"
+
+check_none                     = "no extra checking"
+check_double           = "double-check"
+check_triple           = "triple-check"
+
+nolimit                                = "no limit"
+
+automatic                      = "Automatic"
+loopback                       = "Loop Back"
+
+move_up                                = "Move Up"
+move_down                      = "Move Down"
+
+interfacemode          = "Interface mode:"
+mode_simple                    = "Simple"
+mode_intermediate      = "Intermediate"
+mode_expert                    = "Expert"
+
+spew0_text          = "Optimistic Unchoke"
+spew1_text          = "IP"
+spew2_text          = "Local/Remote"
+spew3_text          = "Up"
+spew4_text          = "Interested"
+spew5_text          = "Choking"
+spew6_text          = "Down"
+spew7_text          = "Interesting"
+spew8_text          = "Choked"
+spew9_text          = "Snubbed"
+spew10_text         = "Downloaded"
+spew11_text         = "Uploaded"
+spew12_text         = "Completed"
+spew13_text         = "Peer Download Speed"
+spew14_text        = "PermID"
+
+spew_direction_local = "L"
+spew_direction_remote = "R"
+
+color_startup          = "Not active"
+color_disconnected  = "Can't contact server"
+color_noconnections = "No connections"
+color_noincoming    = "No incoming connections"
+color_nocomplete    = "No complete copies"
+color_good          = "All good"
+
+color_stripe           = "Stripe color"
+
+torrentcolors          = "Torrent Colors"
+
+more                           = "More..."
+
+trackererror_problemconnecting = "Problem connecting to tracker"
+trackererror_rejected          = "Rejected by tracker"
+trackererror_baddata           = "Bad data from tracker"
+
+###################
+#Rename Torrent Dlg
+###################
+rrenametorrent="Rename torrent"
+renametorrent="Rename torrent : "
+edittorname="Edit torrent name :"
+usenamefrom="Use name from"
+currenttorname="Current torrent name :"
+
+originalname="Original name :"
+torrentfilename=".torrent file name :"
+othername = "Other :"
+
+destname="Destination name :"
+
+copybtn="Copy"
+rendestwithtor="Also rename destination"
+rtwd = "Rename torrent with destination by default"
+
+###
+#######################################################
+# Change destination dialog
+#######################################################
+choosedowndest="Change download destination..."
+downdestloc="Set download directory location"
+downdirname="Set download directory name"
+downfilename="Set download file name"
+choosenewdestloc="Choose new download directory location"
+choosenewdirname="Choose new download directory name :"
+choosenewfilename="Choose new download file name :"
+totalsize="total size :"
+updatetorname="Rename torrent"
+choosenewdest="New download destination :"
+browsebtn="Browse"
+
+rentorwithdest="Also change title in list"
+
+#errors:
+errorinvalidpath="Invalid syntax in the path. \nTry to add a \\"
+errorinvalidwinunitname="This name cannot be used as a Windows unit name."
+
+suggestedname="Suggested corrected name :"
+invalidwinname="This name cannot be used as a Windows file or folder name."
+iconbadformat="The icon you selected is not in a supported format"
+
+#########
+#Other
+#########
+warningopenfile = "Torrent is not completed yet, are you sure you want to open it?"
+upgradeabc = "Your software is outdated. Would you like to visit http://tribler.org to upgrade?"
+upgradeabctitle = "Update to Tribler "
+mainpage = "Tribler Main Page"
+sharing_reputation_information_title = "Sharing reputation information"
+sharing_reputation_information_message = "This progress bar shows your sharing reputation. You will have faster video playback by sharing more. Leaving Tribler running will improve your sharing reputation."
+sharing_reputation_poor = "Your current sharing reputation is low! This could affect your download speed. Please leave Tribler running to improve this."
+
+#############
+#Make Torrent
+#############
+savedtofolderwithsource = "Torrent will be saved to folder containing source"
+notadir="The default download directory is a file"
+savetor="Torrent location"
+savetordefault="Save to default folder :"
+savetorsource="Save to folder containing source"
+savetorask="Ask where to save to"
+choosetordeffolder="Choose a default folder to save torrents"
+
+torrentfileswildcard = ".torrent files"
+allfileswildcard = "All Files"
+logfileswildcard = "Log Files"
+
+listfont = "List font:"
+choosefont = "Choose Font..."
+sampletext = "Sample Text, 0123456789"
+
+startnow = "Start seeding immediately"
+makehash_md5 = "MD5"
+makehash_crc32 = "CRC-32"
+makehash_sha1 = "SHA-1"
+makehash_optional = "Optional hashes:"
+createmerkletorrent = "Create Merkle torrent (Tribler-only feature)"
+createtorrentsig = "Create signature (only if PermIDs enabled)"
+
+diskfull = "Error: Not enough space left on the destination disk"
+diskfullthreshold = "Stop torrents if destination has less than:"
+
+changetitle = "Change title to"
+
+separator          = "Separator"
+buttons_available  = "Available toolbar buttons:"
+buttons_current    = "Current toolbar buttons:"
+buttons_add        = "Add"
+buttons_remove     = "Remove"
+buttons_update    = "Update"
+buttons_edit      = "Edit"
+
+customizecontextmenu      = "Customize Context Menu"
+menu_available  = "Available menu items:"
+menu_current    = "Current menu items:"
+
+lowuploadstart1 = "Start next torrent if upload speed stays"
+lowuploadstart2 = "below global limit for at least"
+
+
+#############
+#Torrent List
+#############
+torrent0_text  = "Torrent Name"
+torrent1_text  = "Content Name"
+torrent2_text  = "Recommendation"
+torrent3_text  = "Sources"
+torrent4_text  = "Leechers"
+torrent5_text  = "Seeders"
+torrent6_text  = "Injected"
+torrent7_text  = "Size"
+torrent8_text  = "Files"
+torrent9_text  = "Tracker"
+torrent10_text  = "Category"
+
+#############
+#My Preference List
+#############
+mypref0_text   = "Torrent Name"
+mypref1_text   = "Content Name"
+mypref2_text   = "Rank"
+mypref3_text   = "Size"
+mypref4_text   = "Last Seen"
+
+#############
+#Taste Buddy List
+#############
+buddy0_text    = "Friend"
+buddy1_text = "Name"
+buddy2_text = "IP"
+buddy3_text = "Similarity"
+buddy4_text = "Last Seen"
+buddy5_text = "Downloads"
+buddy6_text = "Connnected"
+buddy7_text = "Exchanged"
+
+#############
+#Tribler UI
+#############
+configcolumns = "Configure Columns"
+file_list_title = "Recommended Torrents"
+mypref_list_title = "My Download History"
+click_download = "Click and Download"
+start_downloading = "Start downloading "
+add_friend_notes = "Right click on a peer to add as a friend or delete it"
+delete = "Delete"
+download = "Download"
+checkstatus = "Check health"
+loading = "Loading ..."
+#############
+# Tribler activities
+#############
+act_upnp = "Opening firewall (if any) via UPnP"
+act_reachable = "Seeing if not firewalled"
+act_get_ext_ip_from_peers = "Asking peers for my IP address"
+act_meet = "Person connected: "
+act_got_metadata = "File discovered:"
+act_recommend = "Discovered more persons and files from"
+act_disk_full = "Disk is full to collect more torrents. Please change your preferences or free space on "
+act_new_version = "New version of Tribler available"
+
+#############
+#Tribler UI - ContentFrontPanel, Tribler 3.6
+#############
+item = "item"
+person_item = "person"
+page = "page"
+order_by = "Order by"
+swarmsize = "Popular"
+swarmsize_tool = "Order content by the number people in the swarm"
+recommended = "Recommended"
+recommendation = "Recommendation"
+recommendation_tool = "Order the content by how it's related to your taste"
+myhistory_tool = "Show the files you have recently downloaded"
+categories = "Categories"
+leecher = "leecher"
+leecher_tool = "%d downloaders"
+seeder = "seeder"
+seeder_tool = "%d uploaders"
+swarm_outdated_tool = "The tracker status is unknown"
+swarm_unavailable_tool = "The swarm status could not be queried"
+no_info = "No info"
+refresh = "Refresh info"
+refresh_tool = "Refresh the number of seeders and leechers in the swarm"
+size = "Size"
+size_tool = "Total size of content"
+tracker = "Tracker"
+created = "Created"
+last_checked = "Last checked"
+refreshing = "Refreshing"
+swarm = "Swarm"
+no_information = "No information"
+searching_content = "Searching for Tribler content..."
+delete_sure = "Are you sure you want to delete %s"
+delete_mypref_sure = "Are you sure you want to remove %s from your download history"
+recomm_relevance = "How much is it related to your taste"
+torrent_files = "Included files(%d)"
+
+#################
+# Tribler Video #
+#################
+videoplayererrortitle = "Tribler Video Error"
+videoplayerstartfailure = "Problem while starting video player:"
+videoplayernotfound = "Could not find video player:"
+videoplayernotfoundfor = "Could not find video player for file:"
+videoanalysernotfound = "Could not find video analyser:"
+
+# PREFERENCES/VIDEO MENU DOES NOT EXIST ANYMORE
+# videoanalyserwhereset = "Set it to FFMPEG in the Preferences / Video menu"
+videoanalyserwhereset = ""
+
+videonotcomplete = "The video cannot yet be played as it has not been completely downloaded:"
+notvideotorrent = "Nothing to play, no video files found in torrent"
+videoplaycontentnotfound = "Cannot find video file on disk"
+selectvideofiletitle = "Select video file"
+selectvideofile = "Select which video file to play:\n"
+playback_section = "Playback options"
+analysis_section = "Video-analysis options"
+videoplayer_default_path = "Path to external video player:"
+videoanalyserpath = "Path to the FFMPEG video analyser:"
+playback_mode = "Which video player to use: "
+playback_external_default = "Use external player specified below"
+playback_internal = "Use internal player (recommended)"
+playback_external_mime = "Use default Windows player"
+selectbandwidthtitle = "Enter your Internet speed"
+selectdlulbwprompt = "Your download/upload bandwidth is"
+selectdlulbwexplan = "For optimal performance, Tribler needs to know your Internet connection speed. Please specify it below. 'xxxx' means any, so if you have 512/256 kbps subscription, select 'xxxx/256 kbps'"
+savemedia = "Save content as"
+vodwarntitle = "Play As Soon As Possible"
+vodwarngeneral = "Be warned that Tribler Video-On-Demand unfortunately only works if you have high upload bandwidth and/or a lot of people are offering the video for download. It also won't work for some file types (e.g. .mov) as they are meant to be played from disk and not incrementally from the network as Tribler VOD does, sorry. But please give it a spin!"
+livewarntitle = "Play Live Stream"
+livewarngeneral = "You are about to play a live video stream that probably needs all your upload bandwidth"
+vodwarnbitrateunknown = ""
+vodwarnbitrateinsufficient = ""
+vodwarnbitrateinsufficientmeasured = ""
+vodwarnmov = ""
+vodwarnconclusionno = ""
+vodwarnbitratesufficient = ""
+vodwarnconclusionyes = ""
+
+vodwarntitle_old = "Experimental Feature Warning"
+vodwarngeneral_old = "Tribler Video-On-Demand is a highly experimental feature that allows you to watch videos while they are downloading, given you have sufficient upload bandwidth and/or a lot of people are offering the video for download. "
+vodwarnbitrateunknown_old = "The video you have selected has a unknown bitrate. "
+vodwarnbitrateinsufficient_old = "The video you have selected has a bitrate of %s KB/s, and your upload bandwidth is just %s. "
+vodwarnbitrateinsufficientmeasured_old = "The video you have selected has a bitrate of %s KB/s, and your best measured upload bandwidth is just %s. "
+vodwarnmov_old = "The selected video is a .MOV which usually cannot be played on demand. "
+vodwarnconclusionno_old = "So it's not clear whether there is enough bandwidth to watch it."
+vodwarnbitratesufficient_old = "The video you have selected has a bitrate of %s KB/s, and your upload bandwidth is %s. "
+vodwarnconclusionyes_old = "So you should be able to play it, but keep in mind this is highly experimental!"
+
+vodwhataboutothertorrents = "What to do with other downloads? \n"
+vodrestartothertorrents = "Stop all others and resume them afterwards (recommended)"
+vodstopothertorrents = "Stop all other downloads"
+vodleaveothertorrents = "Leave other downloads running"
+
+vodwarnprompt = "Continue?"
+vodwarnprompt_old = "Would you like to continue?"
+
+
+unlimited = "unlimited"
+bitrateprompt = "Bitrate:"
+unknown = "unknown"
+doesnotapply = "n/a"
+videoposition = "Position:"
+videoprogress = "Progress:"
+playprompt = "Play"
+pauseprompt = "Pause"
+fullscreen = "Fullscreen"
+volumeprompt = "Volume:"
+backtocontentview = "Back to Content View"
+vodprogress = "Progress:"
+launchvideoplayer = "Launch Video Player"
+videoserverservefailure = "Error serving video to player, probably the player does not understand the video format or cannot play it from the network."
+videoserverservefailureadvice = "Please wait until the download is complete and try again, or select a different player in Preferences/Video."
+downloading = "Active"
+
+#############
+#Tribler UI - Profile View, Tribler 4.0.0
+#############
+nothingToDo = "You are optimal here!!"
+profileDetails_Overall_description = "You are a: -current level- \n- Beginner\n- Experienced\n- Top User\n- Master"
+# --- Recommendation quality
+profileDetails_Quality_description = "Based on the files you have downloaded over time, Tribler recommends other files that are likely to be interesting to you. \n\nSo far you have%s downloaded %s files."
+profileDetails_Quality_descriptionz_onlyword = " only"
+profileDetails_Quality_improve = "* Download more files to increase the quality of Tribler recommendations."
+# --- Discoverd Files
+profileDetails_Files_description = "So far, you have discovered %s files."
+profileDetails_Files_improve = "* Stay online longer to discover more files. \n\n* You have set your maximum to %s files. If you have reached this limit please set it higher."
+# --- Discoverd Persons
+profileDetails_Persons_description = "So far, you have discovered %s people."
+profileDetails_Persons_improve = "* Stay online longer and you will discover more people."
+# --- Optimal Download Speed
+profileDetails_Download_info = "You are not using your download speed optimally. To increase, follow the instructions."
+profileDetails_Download_UpSpeed = "Your upload speed is set to %d KB/s. Limiting your upload speed also limits your download speed."
+profileDetails_Download_UpSpeedMax = "Your upload speed is set to 'unlimited'. That's good."
+profileDetails_Download_UpSpeed_improve = "* Increase the upload speed limit in your preferences (for -Play ASAP- mode you need at least 64 KB/s). "
+profileDetails_Download_UpSpeedMax_improve = "* For an improved performance, you can also increase the number of upload slots in Preferences. "
+#  profileDetails_Download_UpSlots = "You set up a number of %d slots for upload."
+#  profileDetails_Download_UpSlotsMax = "You set up an unlimited number of slots for upload. That's good."
+#  profileDetails_Download_DlSpeed = "Your download speed is set to %d KB/s."
+#  profileDetails_Download_DlSpeedMax = "Your download speed is set to unlimited. That's good."
+profileDetails_Download_Friends = "At the moment you have %d friends. If you make more friends you can help in boosting each others download speeds."
+profileDetails_Download_Friends_improve = "* Invite your friends, family, and colleagues by e-mail, to start tribler too and let them add you as a friend."
+profileDetails_Download_VisibleYes = "You are currently accessible by other people."
+profileDetails_Download_VisibleYes_improve = "* Your friends should also be accessible. For that, please guide them to www.tribler.org for instructions."
+#profileDetails_Download_VisibleNo = "Other users are not able to connect to you, because your modem/router blocks them."
+profileDetails_Download_VisibleNo = "Other users are not able to connect to you, because your modem/router (%s) blocks them."
+profileDetails_Download_VisibleNo_improve = "* You have to open a port on your modem/router to enable other users to connect to you. This will almost double your possible download speed. Read more on www.tribler.org for instructions."
+# --- Network Reach
+profileDetails_Presence_info = "If you want to increase your network reach, follow the instructions."
+#profileDetails_Presence_Friends = profileDetails_Download_Friends
+#profileDetails_Presence_Friends_improve = profileDetails_Download_Friends_improve
+profileDetails_Presence_Sharingratio = "Your overall sharing ratio is %d. This means that you download more from others than you upload to them."
+profileDetails_Presence_Sharingratio_improve = "* To reach a fair sharing ratio, you should share your files longer. "
+profileDetails_Presence_VersionNewer = "You are using a newer version of Tribler (%s) than on website (%s)."
+profileDetails_Presence_VersionNewer_improve = "* Check the website for news and updates at %s"
+profileDetails_Presence_VersionOlder = "You are using an old version of Tribler (%s) and not taking advantage of the new features available. "
+profileDetails_Presence_VersionOlder_improve = "* Update to the newest version %s at %s"
+profileDetails_Presence_VersionCurrent = "You are up to date! The current version client is %s."
+profileDetails_Presence_VersionCurrent_improve = "* Check the website for news and updates at %s"
+profileDetails_Presence_VersionUnknown = "unknown"
+profileDetails_Presence_VersionError = "Your current client version is %s."
+profileDetails_Presence_VersionError_improve = "* Check the website for news and updates at %s"
+
+###############
+# Tribler UI - persons.py, Tribler 3.7
+##############
+peer_status_tooltip = "Status of person based on last time seen"
+peer_friend_tooltip = "This person is a friend of yours. Click to remove friendship."
+peer_nofriend_tooltip = "Click to make this person your friend."
+peer_connected_times_tooltip = "Successful connections made to this person."
+peer_buddycast_times_tooltip = "Specific Tribler messages exchanged with this person."
+peer_similarity_tooltip = "Similarity between you and this person based on the download history."
+commonFiles    = "   Common files (%d)"
+alsoDownloaded = "Also downloaded (%d/%s)"
+peer_common_files_tooltip = "Files that you and this person have in common."
+peer_other_files_tooltip = "Other files that this person has downloaded."
+
+#################
+# Notification  #
+#################
+notification_download_complete = "Download Complete"
+notification_finished_seeding  = "Finished Seeding"
+
+#############
+#Tribler UI - Persons View, Tribler 4.0.0
+#############
+persons_view_no_data = "No people encountered yet"
+
+torrentcollectsleep = "Seconds between downloading torrents from RSS:"
+buddycastsubscription = "Discover content via other Tribler users"
+web2subscription = "Discover content from YouTube and LiveLeak"
+filesdefaultsearchweb2txt = "search files, YouTube and LiveLeak"
+filesdefaultsearchtxt = "search all files"
+rssurldefaulttxt = "Paste your RSS link here"
+
+vlc_linux_start_bug_title = "No flash video streaming on Ubuntu Linux with VLC"
+vlc_linux_start_bug = "The current Ubuntu version of the VLC video player cannot stream Youtube.com movies. So be warned, they will not start playing until they have been completely downloaded. We have submitted a patch to Ubuntu."
+going_search = "        Results: %d"
+#going_search = "Searching for '%s'... (%d results)"
+finished_search = "Finished search '%s'. (%d results)"
+search_web2 = "Web movies (%d results)"
+search_torrent = "Discovered files (%d results)"
+search_peers = "Discovered persons (%d results)"
+search_friends = "Friends (%d results)"
+search_library = "Library files (%d results)"
+search_remote = "Tribler network (%d results)"
+# search buttons
+searchStop       = "stop searching"
+searchStopEnabled= "stopped searching"
+searchClear      = "clear results and browse all discovered files"
+help             = "Current sharing reputation : %2.2f"
+
+################
+#Tribler UI - Column headers Tribler 4.1.0
+#################
+# FILES
+C_filename     = "Name of the file"
+C_filesize             = "Total size"
+C_popularity    = "Popularity of the file"
+C_creationdate = "Creation date"
+C_uploaders            = "Number of uploaders (seeders)"
+C_downloaders  = "Number of downloaders (leechers)"
+C_recommfiles  = "Fit to your taste (top20 of discovered files)"
+C_source               = "Source of file"
+# PERSONS
+C_personname   = "Name of the persons"
+C_status               = "Last time you connected with this person"
+C_discfiles            = "Number of files discovered by this person"
+C_discpersons  = "Number of persons discovered by this person"
+C_recommpersons        = "Fit to your taste (top20 of discovered persons)"
+C_friends              = "Friends of yours"
+# LIBRARY
+C_progress             = "Progress of downloads"
+C_downspeed            = "Download speed"
+C_upspeed              = "Upload speed"
+C_downupspeed          = "Current download and upload speed"
+C_message              = "Status of downloads (no sorting)"
+C_info                 = "Other info (no sorting)"
+# FRIENDS
+C_friendname   = "Name of your friends"
+C_friendstatus = "Last time you connected with your friends"
+C_helping              = "Whether friend is boosting your downloads (no sorting)"
+C_remove               = "Remove file from Library and Disk"
+
+# TopNList discovered peers in profile view - Tribler 4.1.0
+
+totalUp         = "Up: %s"
+totalDown       = "Down: %s"
+
+# Core download status
+DLSTATUS_ALLOCATING_DISKSPACE = "initializing"
+DLSTATUS_WAITING4HASHCHECK = "initializing"
+DLSTATUS_HASHCHECKING = "checking old data"
+DLSTATUS_DOWNLOADING = "downloading"
+DLSTATUS_SEEDING = "completed/sharing"
+DLSTATUS_STOPPED = "stopped"
+DLSTATUS_STOPPED_ON_ERROR = "stopped/error"
+
+duplicate_download_msg = "You are already downloading this torrent, see the My Files section."
+duplicate_download_title = "Duplicate download"
+
+invalid_torrent_no_playable_files_msg = "You are attempting to play files from a torrent that does not contain any playable files."
+invalid_torrent_no_playable_files_title = "Invalid torrent"
+
+#
+# Friendship
+#
+question = 'Question'
+addfriendfillin = "Do you want to add\n%s\nas your friend?'
+
+################
+#Tribler UI - Upload tab
+#################
+peer_ip = "Peer IP"
+tribler_name = "Tribler name"
+curr_ul_rate = "Current upload rate"
+ul_amount = "Amount of MBytes uploaded"
+
diff --git a/instrumentation/next-share/BaseLib/Lang/lang.py b/instrumentation/next-share/BaseLib/Lang/lang.py
new file mode 100644 (file)
index 0000000..a632879
--- /dev/null
@@ -0,0 +1,209 @@
+# Written by ABC authors and Arno Bakker
+# see LICENSE.txt for license information
+import wx
+import sys
+import os
+
+from traceback import print_exc, print_stack
+from cStringIO import StringIO
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Utilities.configreader import ConfigReader
+from BaseLib.Core.BitTornado.__init__ import version_id
+
+################################################################
+#
+# Class: Lang
+#
+# Keep track of language strings.
+#
+# Lookups occur in the following order:
+# 1. See if the string is in user.lang
+# 2. See if the string is in the local language file
+# 3. See if the string is in english.lang
+#
+################################################################
+class Lang:
+    def __init__(self, utility):
+        self.utility = utility
+        
+        filename = self.utility.config.Read('language_file')
+        
+        
+        langpath = os.path.join(self.utility.getPath(), LIBRARYNAME,  "Lang")
+        
+        sys.stdout.write("Setting up languages\n")
+        sys.stdout.write("Language file: " + str(filename) + "\n")
+        
+        # Set up user language file (stored in user's config directory)
+        self.user_lang = None
+        user_filepath = os.path.join(self.utility.getConfigPath(), 'user.lang')
+        self.user_lang = ConfigReader(user_filepath, "ABC/language")
+
+        # Set up local language file
+        self.local_lang_filename = None
+        self.local_lang = None
+        local_filepath = os.path.join(langpath, filename)
+        
+        if filename != 'english.lang' and existsAndIsReadable(local_filepath):
+            self.local_lang_filename = filename
+            # Modified
+            self.local_lang = wx.FileConfig(localFilename = local_filepath)
+            self.local_lang.SetPath("ABC/language")
+            #self.local_lang = ConfigReader(local_filepath, "ABC/language")
+        
+        # Set up english language file
+        self.english_lang = None
+        english_filepath = os.path.join(langpath, 'english.lang')
+        if existsAndIsReadable(english_filepath):
+            self.english_lang = ConfigReader(english_filepath, "ABC/language")
+        
+        self.cache = {}
+        
+        self.langwarning = False
+        
+    def flush(self):
+        if self.user_lang is not None:
+            try:
+                self.user_lang.DeleteEntry("dummyparam", False)
+            except:
+                pass
+            self.user_lang.Flush()
+        self.cache = {}
+              
+    # Retrieve a text string
+    def get(self, label, tryuser = True, trylocal = True, tryenglish = True, giveerror = True):        
+        if tryuser and trylocal and tryenglish:
+            tryall = True
+        else:
+            tryall = False
+    
+        if tryall and label in self.cache:
+            return self.expandEnter(self.cache[label])
+    
+        if (label == 'version'):
+            return version_id
+        if (label == 'build'):
+            return "Build 17078"
+        if (label == 'build_date'):
+            return "Aug 27, 2010"
+        # see if it exists in 'user.lang'
+        if tryuser:
+            text, found = self.getFromLanguage(label, self.user_lang)
+            if found:
+                if tryall:
+                    self.cache[label] = text
+                return self.expandEnter(text)
+
+        # see if it exists in local language
+        if trylocal and self.local_lang is not None:
+            text, found = self.getFromLanguage(label, self.local_lang, giveerror = True)
+            if found:
+                if tryall:
+                    self.cache[label] = text
+                return self.expandEnter(text)
+
+        # see if it exists in 'english.lang'
+        if tryenglish:
+            text, found = self.getFromLanguage(label, self.english_lang)
+            if found:
+                if tryall:
+                    self.cache[label] = text
+                return self.expandEnter(text)
+
+        # if we get to this point, we weren't able to read anything
+        if giveerror:
+            sys.stdout.write("Language file: Got an error finding: "+label)
+            self.error(label)
+        return ""
+        
+    def expandEnter(self, text):
+        text = text.replace("\\r","\n")
+        text = text.replace("\\n","\n")
+        return text
+        
+    def getFromLanguage(self, label, langfile, giveerror = False):
+        try:
+            if langfile is not None:
+                if langfile.Exists(label):
+                    return self.getSingleline(label, langfile), True
+                if langfile.Exists(label + "_line1"):
+                    return self.getMultiline(label, langfile), True
+                
+                if giveerror:
+                    self.error(label, silent = True)
+        except:
+            fileused = ""
+            langfilenames = { "user.lang": self.user_lang, 
+                              self.local_lang_filename: self.local_lang, 
+                              "english.lang": self.english_lang }
+            for name in langfilenames:
+                if langfilenames[name] == langfile:
+                    fileused = name
+                    break
+            sys.stderr.write("Error reading language file: (" + fileused + "), label: (" + label + ")\n")
+            data = StringIO()
+            print_exc(file = data)
+            sys.stderr.write(data.getvalue())            
+                
+        return "", False
+        
+    def getSingleline(self, label, langfile):
+        return langfile.Read(label)
+    
+    def getMultiline(self, label, langfile):
+        i = 1
+        text = ""
+        while (langfile.Exists(label + "_line" + str(i))):
+            if (i != 1):
+                text+= "\n"
+            text += langfile.Read(label + "_line" + str(i))
+            i += 1
+        if not text:
+            sys.stdout.write("Language file: Got an error reading multiline string\n")
+            self.error(label)
+        return text
+        
+    def writeUser(self, label, text):
+        change = False
+        
+        text_user = self.get(label, trylocal = False, tryenglish = False, giveerror = False)
+        text_nonuser = self.get(label, tryuser = False, giveerror = False)
+               
+        user_lang = self.user_lang
+        
+        # The text string is the default string
+        if text == text_nonuser:
+            # If there was already a user string, delete it
+            # (otherwise, do nothing)
+            if text_user != "":
+                user_lang.Write("exampleparam", "example value")
+                user_lang.DeleteEntry(label)
+                change = True
+        elif text != text_user:
+            # Only need to update if the text string differs
+            # from what was already stored
+            user_lang.Write(label, text)
+            change = True
+        
+        return change
+        
+    def error(self, label, silent = False):
+        # Display a warning once that the language file doesn't contain all the values
+        if (not self.langwarning):
+            self.langwarning = True
+            error_title = self.get('error')
+            error_text = self.get('errorlanguagefile')
+            if (error_text == ""):
+                error_text = "Your language file is missing at least one string.\nPlease check to see if an updated version is available."
+            # Check to see if the frame has been created yet
+            if not silent and hasattr(self.utility, 'frame'):
+                # For the moment don't do anything if we can't display the error dialog
+                dlg = wx.MessageDialog(None, error_text, error_title, wx.ICON_ERROR)
+                dlg.ShowModal()
+                dlg.Destroy()
+        sys.stderr.write("\nError reading language file!\n")
+        sys.stderr.write("  Cannot find value for variable: " + label + "\n")
+
+def existsAndIsReadable(filename):
+    return os.access(filename, os.F_OK) and os.access(filename, os.R_OK)
diff --git a/instrumentation/next-share/BaseLib/Player/BaseApp.py b/instrumentation/next-share/BaseLib/Player/BaseApp.py
new file mode 100644 (file)
index 0000000..173a83c
--- /dev/null
@@ -0,0 +1,735 @@
+# Written by Arno Bakker, Choopan RATTANAPOKA, Jie Yang
+# see LICENSE.txt for license information
+""" Base class for Player and Plugin Background process. See swarmplayer.py """
+
+#
+# TODO: set 'download_slice_size' to 32K, such that pieces are no longer
+# downloaded in 2 chunks. This particularly avoids a bad case where you
+# kick the source: you download chunk 1 of piece X
+# from lagging peer and download chunk 2 of piece X from source. With the piece
+# now complete you check the sig. As the first part of the piece is old, this
+# fails and we kick the peer that gave us the completing chunk, which is the 
+# source.
+#
+# Note that the BT spec says: 
+# "All current implementations use 2 15 , and close connections which request 
+# an amount greater than 2 17." http://www.bittorrent.org/beps/bep_0003.html
+#
+# So it should be 32KB already. However, the BitTorrent (3.4.1, 5.0.9), 
+# BitTornado and Azureus all use 2 ** 14 = 16KB chunks.
+
+import os
+import sys
+import time
+import shutil
+from sets import Set
+
+from base64 import encodestring
+from threading import enumerate,currentThread,RLock
+from traceback import print_exc
+# Ric: added svc ext  
+from BaseLib.Video.utils import svcextdefaults
+
+if sys.platform == "darwin":
+    # on Mac, we can only load VLC/OpenSSL libraries
+    # relative to the location of tribler.py
+    os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
+try:
+    import wxversion
+    wxversion.select('2.8')
+except:
+    pass
+import wx
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.API import *
+from BaseLib.Policies.RateManager import UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager
+from BaseLib.Utilities.Instance2Instance import *
+
+from BaseLib.Player.systray import *
+# from BaseLib.Player.Reporter import Reporter
+from BaseLib.Player.UtilityStub import UtilityStub
+from BaseLib.Core.Statistics.Status.Status import get_status_holder
+
+DEBUG = False
+RATELIMITADSL = False
+DOWNLOADSPEED = 300
+DISKSPACE_LIMIT = 5L * 1024L * 1024L * 1024L  # 5 GB
+DEFAULT_MAX_UPLOAD_SEED_WHEN_SEEDING = 75 # KB/s
+
+class BaseApp(wx.App,InstanceConnectionHandler):
+    def __init__(self, redirectstderrout, appname, appversion, params, single_instance_checker, installdir, i2iport, sport):
+        self.appname = appname
+        self.appversion = appversion
+        self.params = params
+        self.single_instance_checker = single_instance_checker
+        self.installdir = installdir
+        self.i2iport = i2iport
+        self.sport = sport
+        self.error = None
+        self.s = None
+        self.tbicon = None
+        
+        self.downloads_in_vodmode = Set() # Set of playing Downloads, one for SP, many for Plugin
+        self.ratelimiter = None
+        self.ratelimit_update_count = 0
+        self.playermode = DLSTATUS_DOWNLOADING
+        self.getpeerlistcount = 2 # for research Reporter
+        self.shuttingdown = False
+        
+        InstanceConnectionHandler.__init__(self,self.i2ithread_readlinecallback)
+        wx.App.__init__(self, redirectstderrout)
+
+        
+    def OnInitBase(self):
+        """ To be wrapped in a OnInit() method that returns True/False """
+        
+        # Normal startup
+        # Read config
+        state_dir = Session.get_default_state_dir('.'+self.appname)
+        
+        self.utility = UtilityStub(self.installdir,state_dir)
+        self.utility.app = self
+        print >>sys.stderr,self.utility.lang.get('build')
+        self.iconpath = os.path.join(self.installdir,LIBRARYNAME,'Images',self.appname+'Icon.ico')
+        self.logopath = os.path.join(self.installdir,LIBRARYNAME,'Images',self.appname+'Logo.png')
+
+        
+        # Start server for instance2instance communication
+        self.i2is = Instance2InstanceServer(self.i2iport,self,timeout=(24.0*3600.0)) 
+
+
+        # The playerconfig contains all config parameters that are not
+        # saved by checkpointing the Session or its Downloads.
+        self.load_playerconfig(state_dir)
+
+        # Install systray icon
+        # Note: setting this makes the program not exit when the videoFrame
+        # is being closed.
+        self.tbicon = PlayerTaskBarIcon(self,self.iconpath)
+        
+        # Start Tribler Session
+        cfgfilename = Session.get_default_config_filename(state_dir)
+        
+        if DEBUG:
+            print >>sys.stderr,"main: Session config",cfgfilename
+        try:
+            self.sconfig = SessionStartupConfig.load(cfgfilename)
+            
+            print >>sys.stderr,"main: Session saved port",self.sconfig.get_listen_port(),cfgfilename
+        except:
+            print_exc()
+            self.sconfig = SessionStartupConfig()
+            self.sconfig.set_install_dir(self.installdir)
+            self.sconfig.set_state_dir(state_dir)
+            self.sconfig.set_listen_port(self.sport)
+            self.configure_session()    
+
+        self.s = Session(self.sconfig)
+        self.s.set_download_states_callback(self.sesscb_states_callback)
+
+        # self.reporter = Reporter( self.sconfig )
+
+        if RATELIMITADSL:
+            self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager()
+            self.ratelimiter.set_global_max_speed(DOWNLOAD,DOWNLOADSPEED)
+            self.ratelimiter.set_global_max_speed(UPLOAD,90)
+
+
+        # Arno: For extra robustness, ignore any errors related to restarting
+        try:
+            # Load all other downloads in cache, but in STOPPED state
+            self.s.load_checkpoint(initialdlstatus=DLSTATUS_STOPPED)
+        except:
+            print_exc()
+
+        # Start remote control
+        self.i2is.start()
+
+        # report client version
+        # from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
+        reporter = get_status_holder("LivingLab")
+        reporter.create_and_add_event("client-startup-version", [self.utility.lang.get("version")])
+        reporter.create_and_add_event("client-startup-build", [self.utility.lang.get("build")])
+        reporter.create_and_add_event("client-startup-build-date", [self.utility.lang.get("build_date")])
+
+    def configure_session(self):
+        # No overlay
+        self.sconfig.set_overlay(False)
+        self.sconfig.set_megacache(False)
+
+
+    def _get_poa(self, tdef):
+        """Try to load a POA - possibly trigger a GUI-thing or should the plugin handle getting it if none is already available?"""
+        
+        from BaseLib.Core.ClosedSwarm import ClosedSwarm,PaymentIntegration
+        print >>sys.stderr, "Swarm_id:",encodestring(tdef.infohash).replace("\n","")
+        try:
+            poa = ClosedSwarm.trivial_get_poa(self.s.get_state_dir(),
+                                              self.s.get_permid(),
+                                              tdef.infohash)
+            
+            poa.verify()
+            if not poa.torrent_id == tdef.infohash:
+                raise Exception("Bad POA - wrong infohash")
+            print >> sys.stderr,"Loaded poa from ",self.s.get_state_dir()
+        except:
+            # Try to get it or just let the plugin handle it?
+            swarm_id = encodestring(tdef.infohash).replace("\n","")
+            my_id = encodestring(self.s.get_permid()).replace("\n", "")
+            try:
+                # TODO: Support URLs from torrents?
+                poa = PaymentIntegration.wx_get_poa(None,
+                                                    swarm_id,
+                                                    my_id,
+                                                    swarm_title=tdef.get_name())
+            except Exception,e:
+                print >> sys.stderr, "Failed to get POA:",e
+                poa = None
+
+        try:
+            ClosedSwarm.trivial_save_poa(self.s.get_state_dir(),
+                                         self.s.get_permid(),
+                                         tdef.infohash,
+                                         poa)
+        except Exception,e:
+            print >> sys.stderr,"Failed to save POA",e
+            
+        if poa:
+            if not poa.torrent_id == tdef.infohash:
+                raise Exception("Bad POA - wrong infohash")
+
+        return poa
+
+
+    def start_download(self,tdef,dlfile,poa=None,supportedvodevents=None):
+        """ Start download of torrent tdef and play video file dlfile from it """
+        if poa:
+            from BaseLib.Core.ClosedSwarm import ClosedSwarm
+            if not poa.__class__ == ClosedSwarm.POA:
+                raise InvalidPOAException("Not a POA")
+            
+        # Free diskspace, if needed
+        destdir = self.get_default_destdir()
+        if not os.access(destdir,os.F_OK):
+            os.mkdir(destdir)
+
+        # Arno: For extra robustness, ignore any errors related to restarting
+        # TODO: Extend code such that we can also delete files from the 
+        # disk cache, not just Downloads. This would allow us to keep the
+        # parts of a Download that we already have, but that is being aborted
+        # by the user by closing the video window. See remove_playing_*
+        try:
+            if not self.free_up_diskspace_by_downloads(tdef.get_infohash(),tdef.get_length([dlfile])):
+                print >>sys.stderr,"main: Not enough free diskspace, ignoring"
+        except:
+            print_exc()
+        
+        # Setup how to download
+        dcfg = DownloadStartupConfig()
+
+        # CLOSED SWARMS
+        if poa:
+            dcfg.set_poa(poa)
+            print >> sys.stderr,"POA:",dcfg.get_poa()
+        else:
+            dcfg.set_poa(None)
+            
+        # Delegate processing to VideoPlayer
+        if supportedvodevents is None:
+            supportedvodevents = self.get_supported_vod_events()
+            
+        print >>sys.stderr,"bg: VOD EVENTS",supportedvodevents
+        dcfg.set_video_events(supportedvodevents)
+        
+        # Ric: added svc
+        if tdef.is_multifile_torrent():
+            svcdlfiles = self.is_svc(dlfile, tdef)
+
+            if svcdlfiles is not None:
+                dcfg.set_video_event_callback(self.sesscb_vod_event_callback, dlmode=DLMODE_SVC)
+                # Ric: svcdlfiles is an ordered list of svc layers
+                dcfg.set_selected_files(svcdlfiles)
+            else:
+                # Normal multi-file torrent
+                dcfg.set_video_event_callback(self.sesscb_vod_event_callback)
+                dcfg.set_selected_files([dlfile])
+        else:
+            dcfg.set_video_event_callback(self.sesscb_vod_event_callback)
+            # Do not set selected file
+                    
+
+        dcfg.set_dest_dir(destdir)
+        
+        # Arno: 2008-7-15: commented out, just stick with old ABC-tuned 
+        # settings for now
+        #dcfg.set_max_conns_to_initiate(300)
+        #dcfg.set_max_conns(300)
+        
+        # Cap at 1 MB/s
+        print >>sys.stderr,"bg: Capping Download speed to 1 MByte/s"
+        dcfg.set_max_speed(DOWNLOAD,1024)
+        
+        
+        # Stop all non-playing, see if we're restarting one
+        infohash = tdef.get_infohash()
+        newd = None
+        for d in self.s.get_downloads():
+            if d.get_def().get_infohash() == infohash:
+                # Download already exists.
+                # One safe option is to remove it (but not its downloaded content)
+                # so we can start with a fresh DownloadStartupConfig. However,
+                # this gives funky concurrency errors and could prevent a
+                # Download from starting without hashchecking (as its checkpoint
+                # was removed) 
+                # Alternative is to set VOD callback, etc. at Runtime:
+                print >>sys.stderr,"main: Reusing old duplicate Download",`infohash`
+                newd = d
+                                    
+                # If we have a POA, we add it to the existing download
+                if poa:
+                    d.set_poa(poa)
+
+            if d not in self.downloads_in_vodmode:
+                d.stop()
+
+        self.s.lm.h4xor_reset_init_conn_counter()
+
+        # ARNOTODO: does this work with Plugin's duplicate download facility?
+
+        self.playermode = DLSTATUS_DOWNLOADING
+        if newd is None:
+            print >>sys.stderr,"main: Starting new Download",`infohash`
+            newd = self.s.start_download(tdef,dcfg)
+        # Ric: added restart of an svc download
+        else:
+            newd.set_video_events(self.get_supported_vod_events())
+
+            svcdlfiles = self.is_svc(dlfile, tdef)
+            if svcdlfiles is not None:
+                newd.set_video_event_callback(self.sesscb_vod_event_callback, dlmode = DLMODE_SVC)
+                # Ric: svcdlfiles is an ordered list of svc layers
+                newd.set_selected_files(svcdlfiles)
+            else:
+                newd.set_video_event_callback(self.sesscb_vod_event_callback)
+                if tdef.is_multifile_torrent():
+                    newd.set_selected_files([dlfile])
+
+            print >>sys.stderr,"main: Restarting existing Download",`infohash`
+            newd.restart()
+
+        self.downloads_in_vodmode.add(newd)
+
+        print >>sys.stderr,"main: Saving content to",newd.get_dest_files()
+        return newd
+
+
+    def sesscb_vod_event_callback(self,d,event,params):
+        pass
+        
+    def get_supported_vod_events(self):
+        pass
+
+
+    #
+    # DownloadCache
+    #
+    def free_up_diskspace_by_downloads(self,infohash,needed):
+        
+        if DEBUG:
+            print >> sys.stderr,"main: free_up: needed",needed,DISKSPACE_LIMIT
+        if needed > DISKSPACE_LIMIT:
+            # Not cleaning out whole cache for bigguns
+            if DEBUG:
+                print >> sys.stderr,"main: free_up: No cleanup for bigguns"
+            return True 
+        
+        inuse = 0L
+        timelist = []
+        dlist = self.s.get_downloads()
+        for d in dlist:
+            hisinfohash = d.get_def().get_infohash()
+            if infohash == hisinfohash:
+                # Don't delete the torrent we want to play
+                continue
+            destfiles = d.get_dest_files()
+            if DEBUG:
+                print >> sys.stderr,"main: free_up: Downloaded content",`destfiles`
+            
+            dinuse = 0L
+            for (filename,savepath) in destfiles:
+                stat = os.stat(savepath)
+                dinuse += stat.st_size
+            inuse += dinuse
+            timerec = (stat.st_ctime,dinuse,d)
+            timelist.append(timerec)
+            
+        if inuse+needed < DISKSPACE_LIMIT:
+            # Enough available, done.
+            if DEBUG:
+                print >> sys.stderr,"main: free_up: Enough avail",inuse
+            return True
+        
+        # Policy: remove oldest till sufficient
+        timelist.sort()
+        if DEBUG:
+            print >> sys.stderr,"main: free_up: Found",timelist,"in dest dir"
+        
+        got = 0L
+        for ctime,dinuse,d in timelist:
+            print >> sys.stderr,"main: free_up: Removing",`d.get_def().get_name_as_unicode()`,"to free up diskspace, t",ctime
+            self.s.remove_download(d,removecontent=True)
+            got += dinuse
+            if got > needed:
+                return True
+        # Deleted all, still no space:
+        return False
+        
+        
+    #
+    # Process periodically reported DownloadStates
+    #
+    def sesscb_states_callback(self,dslist):
+        """ Called by Session thread """
+
+        #print >>sys.stderr,"bg: sesscb_states_callback",currentThread().getName()
+
+        # Display some stats
+        if (int(time.time()) % 5) == 0:
+            for ds in dslist:
+                d = ds.get_download()
+                print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \
+                    (d.get_def().get_name(), \
+                     dlstatus_strings[ds.get_status()], \
+                     ds.get_progress() * 100, \
+                     ds.get_error(), \
+                     ds.get_current_speed(UPLOAD), \
+                     ds.get_current_speed(DOWNLOAD))
+        
+        # Arno: we want the prebuf stats every second, and we want the
+        # detailed peerlist, needed for research stats. Getting them every
+        # second may be too expensive, so get them every 10.
+        #
+        self.getpeerlistcount += 1
+        getpeerlist = (self.getpeerlistcount % 10) == 0
+        haspeerlist =  (self.getpeerlistcount % 10) == 1
+
+        # Arno: delegate to GUI thread. This makes some things (especially
+        #access control to self.videoFrame easier
+        #self.gui_states_callback(dslist)
+        #print >>sys.stderr,"bg: sesscb_states_callback: calling GUI",currentThread().getName()
+        wx.CallAfter(self.gui_states_callback_wrapper,dslist,haspeerlist)
+        
+        #print >>sys.stderr,"main: SessStats:",self.getpeerlistcount,getpeerlist,haspeerlist
+        return (1.0,getpeerlist) 
+
+
+    def gui_states_callback_wrapper(self,dslist,haspeerlist):
+        try:
+            self.gui_states_callback(dslist,haspeerlist)
+        except:
+            print_exc()
+
+
+    def gui_states_callback(self,dslist,haspeerlist):
+        """ Called by *GUI* thread.
+        CAUTION: As this method is called by the GUI thread don't to any 
+        time-consuming stuff here! """
+        
+        #print >>sys.stderr,"main: Stats:"
+        if self.shuttingdown:
+            return ([],0,0)
+        
+        # See which Download is currently playing
+        playermode = self.playermode
+
+        totalspeed = {}
+        totalspeed[UPLOAD] = 0.0
+        totalspeed[DOWNLOAD] = 0.0
+        totalhelping = 0
+
+        # When not playing, display stats for all Downloads and apply rate control.
+        if playermode == DLSTATUS_SEEDING:
+            if DEBUG:
+                for ds in dslist:
+                    print >>sys.stderr,"main: Stats: Seeding: %s %.1f%% %s" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error())
+            self.ratelimit_callback(dslist)
+            
+        # Calc total dl/ul speed and find DownloadStates for playing Downloads
+        playing_dslist = []
+        for ds in dslist:
+            if ds.get_download() in self.downloads_in_vodmode:
+                playing_dslist.append(ds)
+            elif DEBUG and playermode == DLSTATUS_DOWNLOADING:
+                print >>sys.stderr,"main: Stats: Waiting: %s %.1f%% %s" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error())
+            
+            for dir in [UPLOAD,DOWNLOAD]:
+                totalspeed[dir] += ds.get_current_speed(dir)
+            totalhelping += ds.get_num_peers()
+
+        # Report statistics on all downloads to research server, every 10 secs
+        # if haspeerlist:
+        #     try:
+        #         for ds in dslist:
+        #             self.reporter.report_stat(ds)
+        #     except:
+        #         print_exc()
+
+        # Set systray icon tooltip. This has limited size on Win32!
+        txt = self.appname+' '+self.appversion+'\n\n'
+        txt += 'DL: %.1f\n' % (totalspeed[DOWNLOAD])
+        txt += 'UL:   %.1f\n' % (totalspeed[UPLOAD])
+        txt += 'Helping: %d\n' % (totalhelping) 
+        #print >>sys.stderr,"main: ToolTip summary",txt
+        self.OnSetSysTrayTooltip(txt)
+
+        # No playing Downloads        
+        if len(playing_dslist) == 0:
+            return ([],0,0)
+        elif DEBUG and playermode == DLSTATUS_DOWNLOADING:
+            for ds in playing_dslist:
+                print >>sys.stderr,"main: Stats: DL: %s %.1f%% %s dl %.1f ul %.1f n %d" % (dlstatus_strings[ds.get_status()],100.0*ds.get_progress(),ds.get_error(),ds.get_current_speed(DOWNLOAD),ds.get_current_speed(UPLOAD),ds.get_num_peers())
+
+        # If we're done playing we can now restart any previous downloads to 
+        # seed them.
+        if playermode != DLSTATUS_SEEDING:
+            playing_seeding_count = 0
+            for ds in playing_dslist:
+                 if ds.get_status() == DLSTATUS_SEEDING:
+                    playing_seeding_count += 1
+            if len(playing_dslist) == playing_seeding_count: 
+                    self.restart_other_downloads()
+
+        # cf. 25 Mbps cap to reduce CPU usage and improve playback on slow machines
+        # Arno: on some torrents this causes VLC to fail to tune into the video
+        # although it plays audio???
+        #ds.get_download().set_max_speed(DOWNLOAD,1500)
+    
+        
+        return (playing_dslist,totalhelping,totalspeed) 
+
+
+    def OnSetSysTrayTooltip(self,txt):         
+        if self.tbicon is not None:
+            self.tbicon.set_icon_tooltip(txt)
+
+    #
+    # Download Management
+    #
+    def restart_other_downloads(self):
+        """ Called by GUI thread """
+        if self.shuttingdown:
+            return
+        print >>sys.stderr,"main: Restarting other downloads"
+        self.playermode = DLSTATUS_SEEDING
+        self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager()
+        self.set_ratelimits()
+
+        dlist = self.s.get_downloads()
+        for d in dlist:
+            if d not in self.downloads_in_vodmode:
+                d.set_mode(DLMODE_NORMAL) # checkpointed torrents always restarted in DLMODE_NORMAL, just make extra sure
+                d.restart() 
+
+
+    def remove_downloads_in_vodmode_if_not_complete(self):
+        print >>sys.stderr,"main: Removing playing download if not complete"
+        for d in self.downloads_in_vodmode:
+            d.set_state_callback(self.sesscb_remove_playing_callback)
+        
+    def sesscb_remove_playing_callback(self,ds):
+        """ Called by SessionThread """
+        
+        print >>sys.stderr,"main: sesscb_remove_playing_callback: status is",dlstatus_strings[ds.get_status()],"progress",ds.get_progress()
+        
+        d = ds.get_download()
+        name = d.get_def().get_name()
+        if (ds.get_status() == DLSTATUS_DOWNLOADING and ds.get_progress() >= 0.9) or ds.get_status() == DLSTATUS_SEEDING:
+            pass
+            print >>sys.stderr,"main: sesscb_remove_playing_callback: voting for KEEPING",`name`            
+        else:
+            print >>sys.stderr,"main: sesscb_remove_playing_callback: voting for REMOVING",`name`
+            if self.shuttingdown:
+                # Arno, 2010-04-23: Do it now ourselves, wx won't do it anymore. Saves
+                # hashchecking on sparse file on Linux.
+                self.remove_playing_download(d)
+                
+            wx.CallAfter(self.remove_playing_download,d)
+        
+        return (-1.0,False)
+        
+
+    def remove_playing_download(self,d):
+        """ Called by MainThread """
+        if self.s is not None:
+            print >>sys.stderr,"main: Removing incomplete download",`d.get_def().get_name_as_unicode()`
+            try:
+                self.s.remove_download(d,removecontent=True)
+                self.downloads_in_vodmode.remove(d)
+            except:
+                print_exc()
+
+    def stop_playing_download(self,d):
+        """ Called by MainThread """
+        print >>sys.stderr,"main: Stopping download",`d.get_def().get_name_as_unicode()`
+        try:
+            d.stop()
+            self.downloads_in_vodmode.remove(d)
+        except:
+            print_exc()
+
+
+    #
+    # Rate limiter
+    #
+    def set_ratelimits(self):
+        uploadrate = float(self.playerconfig['total_max_upload_rate'])
+        print >>sys.stderr,"main: set_ratelimits: Setting max upload rate to",uploadrate
+        if self.ratelimiter is not None:
+            self.ratelimiter.set_global_max_speed(UPLOAD,uploadrate)
+            self.ratelimiter.set_global_max_seedupload_speed(uploadrate)
+
+    def ratelimit_callback(self,dslist):
+        """ When the player is in seeding mode, limit the used upload to
+        the limit set by the user via the options menu. 
+        Called by *GUI* thread """
+        if self.ratelimiter is None:
+            return
+
+        # Adjust speeds once every 4 seconds
+        adjustspeeds = False
+        if self.ratelimit_update_count % 4 == 0:
+            adjustspeeds = True
+        self.ratelimit_update_count += 1
+        
+        if adjustspeeds:
+            self.ratelimiter.add_downloadstatelist(dslist)
+            self.ratelimiter.adjust_speeds()
+
+
+    #
+    # Player config file
+    # 
+    def load_playerconfig(self,state_dir):
+        self.playercfgfilename = os.path.join(state_dir,'playerconf.pickle')
+        self.playerconfig = None
+        try:
+            f = open(self.playercfgfilename,"rb")
+            self.playerconfig = pickle.load(f)
+            f.close()
+        except:
+            print_exc()
+            self.playerconfig = {}
+            self.playerconfig['total_max_upload_rate'] = DEFAULT_MAX_UPLOAD_SEED_WHEN_SEEDING # KB/s
+
+    def save_playerconfig(self):
+        try:
+            f = open(self.playercfgfilename,"wb")
+            pickle.dump(self.playerconfig,f)
+            f.close()
+        except:
+            print_exc()
+            
+    def set_playerconfig(self,key,value):
+        self.playerconfig[key] = value
+        
+        if key == 'total_max_upload_rate':
+            try:
+                self.set_ratelimits()
+            except:
+                print_exc()
+    
+    def get_playerconfig(self,key):
+        return self.playerconfig[key]
+
+
+    #
+    # Shutdown
+    #
+    def OnExit(self):
+        print >>sys.stderr,"main: ONEXIT",currentThread().getName()
+        self.shuttingdown = True
+        self.remove_downloads_in_vodmode_if_not_complete()
+
+        # To let Threads in Session finish their business before we shut it down.
+        time.sleep(2) 
+        
+        if self.s is not None:
+            self.s.shutdown(hacksessconfcheckpoint=False)
+        
+        if self.tbicon is not None:
+            self.tbicon.RemoveIcon()
+            self.tbicon.Destroy()
+
+        ts = enumerate()
+        for t in ts:
+            print >>sys.stderr,"main: ONEXIT: Thread still running",t.getName(),"daemon",t.isDaemon()
+        
+        self.ExitMainLoop()
+
+    
+    def clear_session_state(self):
+        """ Try to fix apps by doing hard reset. Called from systray menu """
+        try:
+            if self.s is not None:
+                dlist = self.s.get_downloads()
+                for d in dlist:
+                    self.s.remove_download(d,removecontent=True)
+        except:
+            print_exc()
+        time.sleep(1) # give network thread time to do stuff
+        try:
+                dldestdir = self.get_default_destdir()
+                shutil.rmtree(dldestdir,True) # ignore errors
+        except:
+            print_exc()
+        try:
+                dlcheckpointsdir = os.path.join(self.s.get_state_dir(),STATEDIR_DLPSTATE_DIR)
+                shutil.rmtree(dlcheckpointsdir,True) # ignore errors
+        except:
+            print_exc()
+        try:
+                cfgfilename = os.path.join(self.s.get_state_dir(),STATEDIR_SESSCONFIG)
+                os.remove(cfgfilename)
+        except:
+            print_exc()
+
+        self.s = None # HARD EXIT
+        #self.OnExit()
+        sys.exit(0) # DIE HARD 4.0
+
+
+    def show_error(self,msg):
+        dlg = wx.MessageDialog(None, msg, self.appname+" Error", wx.OK|wx.ICON_ERROR)
+        result = dlg.ShowModal()
+        dlg.Destroy()
+        
+    
+    def get_default_destdir(self):
+        return os.path.join(self.s.get_state_dir(),'downloads')
+
+    
+    def is_svc(self, dlfile, tdef):
+        """ Ric: check if it as an SVC download. If it is add the enhancement 
+        layers to the dlfiles
+        """
+        svcfiles = None
+        
+        if tdef.is_multifile_torrent():
+            enhancement = tdef.get_files(exts=svcextdefaults)
+            # Ric: order the enhancement layer in the svcfiles list
+            # if the list of enhancements is not empty
+            if enhancement:
+                enhancement.sort()
+                if tdef.get_length(enhancement[0]) == tdef.get_length(dlfile):
+                    svcfiles = [dlfile]
+                    svcfiles.extend(enhancement)
+                
+        return svcfiles
+
+    #
+    # InstanceConnectionHandler
+    #
+    def i2ithread_readlinecallback(self,ic,cmd):    
+        pass
+    
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/Info.plist b/instrumentation/next-share/BaseLib/Player/Build/Mac/Info.plist
new file mode 100644 (file)
index 0000000..b1fb4ff
--- /dev/null
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+       <key>CFBundleDevelopmentRegion</key>
+       <string>English</string>
+       <key>CFBundleDocumentTypes</key>
+       <array>
+      <dict>
+         <key>CFBundleTypeExtensions</key>
+         <array>
+            <string>tstream</string>
+         </array>
+         <key>CFBundleTypeIconFile</key>
+         <string>SwarmPlayerDoc</string>
+         <key>CFBundleTypeMIMETypes</key>
+         <array>
+            <string>application/x-tribler-stream</string>
+         </array>
+         <key>CFBundleTypeName</key>
+         <string>Tribler Stream Meta-Info</string>
+         <key>CFBundleTypeOSTypes</key>
+         <array>
+            <string>BTMF</string>
+         </array>
+         <key>CFBundleTypeRole</key>
+         <string>Viewer</string>
+         <key>NSDocumentClass</key>
+         <string>DownloadDocument</string>
+      </dict>
+               <dict>
+                       <key>CFBundleTypeOSTypes</key>
+                       <array>
+                               <string>****</string>
+                               <string>fold</string>
+                               <string>disk</string>
+                       </array>
+                       <key>CFBundleTypeRole</key>
+                       <string>Viewer</string>
+               </dict>
+       </array>
+       <key>CFBundleExecutable</key>
+       <string>SwarmPlayer</string>
+       <key>CFBundleIconFile</key>
+       <string>swarmplayer.icns</string>
+       <key>CFBundleIdentifier</key>
+       <string>SwarmPlayer</string>
+       <key>CFBundleInfoDictionaryVersion</key>
+       <string>6.0</string>
+       <key>CFBundleName</key>
+       <string>SwarmPlayer</string>
+       <key>CFBundlePackageType</key>
+       <string>APPL</string>
+       <key>CFBundleSignature</key>
+       <string>????</string>
+</dict>
+</plist>
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/Makefile b/instrumentation/next-share/BaseLib/Player/Build/Mac/Makefile
new file mode 100644 (file)
index 0000000..cc99c6c
--- /dev/null
@@ -0,0 +1,116 @@
+# Building on Mac OS/X requires:
+# * Python 2.5
+# * wxPython 2.8-unicode
+# * py2app 0.3.6
+# * swig, subversion (available through MacPorts)
+# * XCode 2.4+
+#
+# Use lower versions at your own risk.
+
+APPNAME=SwarmPlayer
+PYTHON_VER=2.5
+PWD:=${shell pwd}
+ARCH:=${shell arch}
+
+PYTHON=python${PYTHON_VER}
+
+all:   clean SwarmPlayer-${ARCH}.dmg
+
+clean:
+       rm -rf build/imagecontents/ ${APPNAME}-${ARCH}.dmg
+
+.PHONY:                all clean dirs
+
+# ----- SwarmPlayer 
+
+APPRES=build/imagecontents/${APPNAME}.app/Contents/Resources
+
+SRCDIR=../../../..
+
+build/imagecontents/:
+       rm -rf $@
+       mkdir -p $@
+
+       cd ${SRCDIR} && DYLD_LIBRARY_PATH=macbinaries PYTHONPATH=.:macbinaries ${PYTHON} -OO - < ${PWD}/setuptriblermac.py py2app
+       mv ${SRCDIR}/dist/* $@
+
+       # Thin everything for this architecture. Some things ship Universal (Python, wxPython, ...) and
+       # others get a stub for the other architecture (things built by Universal Python)
+       for i in `find build/imagecontents`; do ./smart_lipo_thin $$i; done
+
+        # Replace any rogue references to local ones. For instance, some libraries are accidently
+        # linked against /usr/local/lib/* or /opt/local/lib. Py2app puts them in the Frameworks dir,
+        # but fails to correct the references in the binaries.
+       #./process_libs build/imagecontents | bash -
+
+       # Background
+       mkdir -p $@/.background
+       cp background.png $@/.background
+
+       # Volume Icon
+       cp VolumeIcon.icns $@/.VolumeIcon.icns
+
+       # Shortcut to /Applications
+       ln -s /Applications $@/Applications
+
+       touch $@
+
+${APPNAME}-${ARCH}.dmg:                build/imagecontents/ SLAResources.rsrc
+       rm -f $@
+       mkdir -p build/temp
+
+       # create image
+       hdiutil create -srcfolder $< -format UDRW -scrub -volname ${APPNAME} $@
+
+       # open it
+       hdiutil attach -readwrite -noverify -noautoopen $@ -mountpoint build/temp/mnt
+
+       # make sure root folder is opened when image is
+       bless --folder build/temp/mnt --openfolder build/temp/mnt
+       # hack: wait for completion
+       sleep 1
+
+       # position items
+       # oddly enough, 'set f .. as alias' can fail, but a reboot fixes that
+       osascript -e "tell application \"Finder\"" \
+       -e "   set f to POSIX file (\"${PWD}/build/temp/mnt\" as string) as alias" \
+       -e "   tell folder f" \
+       -e "       open" \
+       -e "       tell container window" \
+       -e "          set toolbar visible to false" \
+       -e "          set statusbar visible to false" \
+       -e "          set current view to icon view" \
+       -e "          delay 1 -- Sync" \
+       -e "          set the bounds to {50, 100, 1000, 1000} -- Big size so the finder won't do silly things" \
+       -e "       end tell" \
+       -e "       delay 1 -- Sync" \
+       -e "       set icon size of the icon view options of container window to 128" \
+       -e "       set arrangement of the icon view options of container window to not arranged" \
+       -e "       set background picture of the icon view options of container window to file \".background:background.png\"" \
+       -e "       set position of item \"${APPNAME}.app\" to {150, 140}" \
+       -e "       set position of item \"Applications\" to {410, 140}" \
+       -e "       set the bounds of the container window to {50, 100, 600, 400}" \
+       -e "       update without registering applications" \
+       -e "       delay 5 -- Sync" \
+       -e "       close" \
+       -e "   end tell" \
+       -e "   -- Sync" \
+       -e "   delay 5" \
+       -e "end tell" || true
+
+       # turn on custom volume icon
+       /Developer/Tools/SetFile -a C build/temp/mnt || true
+
+       # close
+       hdiutil detach build/temp/mnt || true
+
+       # make read-only
+       mv $@ build/temp/rw.dmg
+       hdiutil convert build/temp/rw.dmg -format UDZO -imagekey zlib-level=9 -o $@
+       rm -f build/temp/rw.dmg
+
+       # add EULA
+       hdiutil unflatten $@
+       /Developer/Tools/DeRez -useDF SLAResources.rsrc > build/temp/sla.r
+       /Developer/Tools/Rez -a build/temp/sla.r -o $@
+       hdiutil flatten $@
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/SLAResources.rsrc b/instrumentation/next-share/BaseLib/Player/Build/Mac/SLAResources.rsrc
new file mode 100644 (file)
index 0000000..162a889
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/SLAResources.rsrc differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/TriblerDoc.icns b/instrumentation/next-share/BaseLib/Player/Build/Mac/TriblerDoc.icns
new file mode 100644 (file)
index 0000000..8f9a1c2
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/TriblerDoc.icns differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/VolumeIcon.icns b/instrumentation/next-share/BaseLib/Player/Build/Mac/VolumeIcon.icns
new file mode 100644 (file)
index 0000000..8a9d383
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/VolumeIcon.icns differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/background.png b/instrumentation/next-share/BaseLib/Player/Build/Mac/background.png
new file mode 100644 (file)
index 0000000..fcd940a
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/background.png differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.png b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.png
new file mode 100644 (file)
index 0000000..586b2d3
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.png differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.psd b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.psd
new file mode 100644 (file)
index 0000000..d8604dd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/appicon.psd differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_document.png b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_document.png
new file mode 100644 (file)
index 0000000..7a14bb3
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_document.png differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_volumeicon.png b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_volumeicon.png
new file mode 100644 (file)
index 0000000..5ca6d53
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/default_volumeicon.png differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.png b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.png
new file mode 100644 (file)
index 0000000..24b2b22
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.png differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.psd b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.psd
new file mode 100644 (file)
index 0000000..d3fcc79
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/dmgicon.psd differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.png b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.png
new file mode 100644 (file)
index 0000000..e5c1ac6
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.png differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.psd b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.psd
new file mode 100644 (file)
index 0000000..b343238
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/icon_sources/docicon.psd differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/mkinstalldirs b/instrumentation/next-share/BaseLib/Player/Build/Mac/mkinstalldirs
new file mode 100755 (executable)
index 0000000..d2d5f21
--- /dev/null
@@ -0,0 +1,111 @@
+#! /bin/sh
+# mkinstalldirs --- make directory hierarchy
+# Author: Noah Friedman <friedman@prep.ai.mit.edu>
+# Created: 1993-05-16
+# Public domain
+
+errstatus=0
+dirmode=""
+
+usage="\
+Usage: mkinstalldirs [-h] [--help] [-m mode] dir ..."
+
+# process command line arguments
+while test $# -gt 0 ; do
+  case $1 in
+    -h | --help | --h*)         # -h for help
+      echo "$usage" 1>&2
+      exit 0
+      ;;
+    -m)                         # -m PERM arg
+      shift
+      test $# -eq 0 && { echo "$usage" 1>&2; exit 1; }
+      dirmode=$1
+      shift
+      ;;
+    --)                         # stop option processing
+      shift
+      break
+      ;;
+    -*)                         # unknown option
+      echo "$usage" 1>&2
+      exit 1
+      ;;
+    *)                          # first non-opt arg
+      break
+      ;;
+  esac
+done
+
+for file
+do
+  if test -d "$file"; then
+    shift
+  else
+    break
+  fi
+done
+
+case $# in
+  0) exit 0 ;;
+esac
+
+case $dirmode in
+  '')
+    if mkdir -p -- . 2>/dev/null; then
+      echo "mkdir -p -- $*"
+      exec mkdir -p -- "$@"
+    fi
+    ;;
+  *)
+    if mkdir -m "$dirmode" -p -- . 2>/dev/null; then
+      echo "mkdir -m $dirmode -p -- $*"
+      exec mkdir -m "$dirmode" -p -- "$@"
+    fi
+    ;;
+esac
+
+for file
+do
+  set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'`
+  shift
+
+  pathcomp=
+  for d
+  do
+    pathcomp="$pathcomp$d"
+    case $pathcomp in
+      -*) pathcomp=./$pathcomp ;;
+    esac
+
+    if test ! -d "$pathcomp"; then
+      echo "mkdir $pathcomp"
+
+      mkdir "$pathcomp" || lasterr=$?
+
+      if test ! -d "$pathcomp"; then
+       errstatus=$lasterr
+      else
+       if test ! -z "$dirmode"; then
+         echo "chmod $dirmode $pathcomp"
+         lasterr=""
+         chmod "$dirmode" "$pathcomp" || lasterr=$?
+
+         if test ! -z "$lasterr"; then
+           errstatus=$lasterr
+         fi
+       fi
+      fi
+    fi
+
+    pathcomp="$pathcomp/"
+  done
+done
+
+exit $errstatus
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# End:
+# mkinstalldirs ends here
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/process_libs b/instrumentation/next-share/BaseLib/Player/Build/Mac/process_libs
new file mode 100755 (executable)
index 0000000..d7a99a1
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+TARGETDIR=$1
+
+# process dependencies and their exact locations of all libs
+
+cd $TARGETDIR
+
+for i in `find . -name "*.dylib" -or -name "*.so"`
+do
+  otool -L $i | perl -ne '
+    if(m#/'`basename $i`' #) {
+      # skip references to self
+
+      next;
+    }
+
+    if(m#(/usr/local/lib/([^ /]+))#) {
+      # make reference to /usr/local/lib/* local
+
+      print "# Reference to $1 found in '$i'\n";
+      print "chmod a+w '$i'\n";
+      print "install_name_tool -change $1 \@executable_path/../Frameworks/$2 '$i'\n";
+    }
+
+    if(m#(/opt/local/lib/([^ /]+))#) {
+      # make reference to /opt/local/lib/* local
+
+      print "# Reference to $1 found in '$i'\n";
+      print "chmod a+w '$i'\n";
+      print "install_name_tool -change $1 \@executable_path/../Frameworks/$2 '$i'\n";
+    }
+  '
+done
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/setuptriblermac.py b/instrumentation/next-share/BaseLib/Player/Build/Mac/setuptriblermac.py
new file mode 100644 (file)
index 0000000..0cf2143
--- /dev/null
@@ -0,0 +1,120 @@
+# ---------------
+# This script builds build/SwarmPlayer.app
+#
+# Meant to be called from Tribler/Player/Build/Mac/Makefile
+# ---------------
+
+import py2app
+from distutils.util import get_platform
+import sys,os,platform,shutil
+from setuptools import setup
+
+from BaseLib.__init__ import LIBRARYNAME
+
+# modules to include into bundle
+includeModules=["encodings.hex_codec","encodings.utf_8","encodings.latin_1","xml.sax", "email.iterators"]
+
+# ----- some basic checks
+
+if __debug__:
+    print "WARNING: Non optimised python bytecode (.pyc) will be produced. Run with -OO instead to produce and bundle .pyo files."
+
+if sys.platform != "darwin":
+    print "WARNING: You do not seem to be running Mac OS/X." 
+
+# ----- import and verify wxPython
+
+import wxversion
+
+wxversion.select('2.8-unicode')
+
+import wx
+
+v = wx.__version__
+
+if v < "2.6":
+    print "WARNING: You need wxPython 2.6 or higher but are using %s." % v
+
+if v < "2.8.4.2":
+    print "WARNING: wxPython before 2.8.4.2 could crash when loading non-present fonts. You are using %s." % v
+
+# ----- import and verify M2Crypto
+
+import M2Crypto
+import M2Crypto.m2
+if "ec_init" not in M2Crypto.m2.__dict__:
+    print "WARNING: Could not import specialistic M2Crypto (imported %s)" % M2Crypto.__file__
+
+# ----- import VLC
+
+#import vlc
+
+#vlc = vlc.MediaControl(["--plugin-path",os.getcwd()+"/macbinaries/vlc_plugins"])
+
+# =================
+# build SwarmPlayer.app
+# =================
+
+from plistlib import Plist
+
+def includedir( srcpath, dstpath = None ):
+    """ Recursive directory listing, filtering out svn files. """
+
+    total = []
+
+    cwd = os.getcwd()
+    os.chdir( srcpath )
+
+    if dstpath is None:
+        dstpath = srcpath
+
+    for root,dirs,files in os.walk( "." ):
+        if '.svn' in dirs:
+            dirs.remove('.svn')
+
+        for f in files:
+            total.append( (root,f) )
+
+    os.chdir( cwd )
+
+    # format: (targetdir,[file])
+    # so for us, (dstpath/filedir,[srcpath/filedir/filename])
+    return [("%s/%s" % (dstpath,root),["%s/%s/%s" % (srcpath,root,f)]) for root,f in total]
+
+def filterincludes( l, f ):
+    """ Return includes which pass filter f. """
+
+    return [(x,y) for (x,y) in l if f(y[0])]
+
+# ----- build the app bundle
+mainfile = os.path.join(LIBRARYNAME,'Player','swarmplayer.py')
+
+setup(
+    setup_requires=['py2app'],
+    name='SwarmPlayer',
+    app=[mainfile],
+    options={ 'py2app': {
+        'argv_emulation': True,
+        'includes': includeModules,
+        'excludes': ["Tkinter","Tkconstants","tcl"],
+        'iconfile': LIBRARYNAME+'/Player/Build/Mac/tribler.icns',
+        'plist': Plist.fromFile(LIBRARYNAME+'/Player/Build/Mac/Info.plist'),
+        'optimize': 2*int(not __debug__),
+        'resources':
+            [(LIBRARYNAME+"/Lang", [LIBRARYNAME+"/Lang/english.lang"]),
+             LIBRARYNAME+"/binary-LICENSE.txt", 
+             LIBRARYNAME+"/readme.txt",
+             LIBRARYNAME+"/Images/SwarmPlayerIcon.ico",
+             LIBRARYNAME+"/Player/Build/Mac/TriblerDoc.icns",
+           ]
+           # add images
+           + includedir( LIBRARYNAME+"/Images" )
+
+           # add VLC plugins
+           + includedir( "macbinaries/vlc_plugins" )
+
+           # add ffmpeg binary
+           + ["macbinaries/ffmpeg"]
+            ,
+    } }
+)
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/smart_lipo_merge b/instrumentation/next-share/BaseLib/Player/Build/Mac/smart_lipo_merge
new file mode 100755 (executable)
index 0000000..3097e61
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# syntax: smart_lipo_merge filenative fileforeign fileout
+#
+# merges two binaries, taking the respective architecture part in case the input is fat
+#
+
+NATIVE=$1
+FOREIGN=$2
+FILEOUT=$3
+
+ARCH1=i386
+ARCH2=ppc
+ARCH=`arch`
+if [ $ARCH = $ARCH1 ]
+then
+    FOREIGNARCH=$ARCH2
+else
+    FOREIGNARCH=$ARCH1
+fi
+
+if [ `lipo -info $NATIVE | cut -d\  -f1` != "Non-fat" ]
+then
+  echo native file is fat -- extracting $ARCH
+  lipo -thin $ARCH $NATIVE -output $NATIVE.$ARCH
+else
+  echo native file is thin -- using as is
+  cp $NATIVE $NATIVE.$ARCH
+fi
+
+if [ `lipo -info $FOREIGN | cut -d\  -f1` != "Non-fat" ]
+then
+  echo foreign file is fat -- extracting $FOREIGNARCH
+  lipo -thin $FOREIGNARCH $FOREIGN -output $FOREIGN.$FOREIGNARCH
+else
+  echo foreign file is thin -- using as is
+  cp $FOREIGN $FOREIGN.$FOREIGNARCH
+fi
+
+echo merging... 
+lipo -create $NATIVE.$ARCH $FOREIGN.$FOREIGNARCH -output $FILEOUT
+echo cleanup..
+rm $NATIVE.$ARCH
+rm $FOREIGN.$FOREIGNARCH
+
+
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/smart_lipo_thin b/instrumentation/next-share/BaseLib/Player/Build/Mac/smart_lipo_thin
new file mode 100755 (executable)
index 0000000..b6fd13d
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# syntax: smart_lipo_thin file
+#
+# extracts the native architecture part of the fat input file, or does nothing if input is thin
+#
+
+INPUT=$1
+ARCH=`arch`
+
+REPORT=`lipo -info $INPUT 2>&1 | cut -d\  -f1-5`
+if [ "$REPORT" == "Architectures in the fat file:" ]
+then
+  echo thinning `basename $INPUT`
+  lipo -thin $ARCH $INPUT -output $INPUT.tmp
+  rm -f $INPUT
+  mv $INPUT.tmp $INPUT
+fi
+
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/tribler.icns b/instrumentation/next-share/BaseLib/Player/Build/Mac/tribler.icns
new file mode 100644 (file)
index 0000000..8fd54eb
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Mac/tribler.icns differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Mac/vlc-macosx-compile.patch b/instrumentation/next-share/BaseLib/Player/Build/Mac/vlc-macosx-compile.patch
new file mode 100644 (file)
index 0000000..a09a3a6
--- /dev/null
@@ -0,0 +1,509 @@
+Index: modules/gui/macosx/voutqt.m
+===================================================================
+--- modules/gui/macosx/voutqt.m        (revision 20403)
++++ modules/gui/macosx/voutqt.m        (working copy)
+@@ -39,6 +39,7 @@
+ #include "intf.h"
+ #include "vout.h"
++#include <vlc/libvlc_structures.h>
+ #define QT_MAX_DIRECTBUFFERS 10
+ #define VL_MAX_DISPLAYS 16
+@@ -138,13 +139,22 @@
+     p_vout->pf_display = DisplayVideo;
+     p_vout->pf_control = ControlVideo;
+-    /* Are we embedded?  If so, the drawable value will be a pointer to a
++    /* Are we embedded?  If so, the drawable value should be a pointer to a
+      * CGrafPtr that we're expected to use */
+     var_Get( p_vout->p_libvlc, "drawable", &value_drawable );
+-    if( value_drawable.i_int != 0 )
++    if( value_drawable.i_int != 0 ) {
++        vlc_value_t value_drawable_type;
++
++        var_Get( p_vout->p_libvlc, "macosx-drawable-type", &value_drawable_type );
++        if( value_drawable_type.i_int != VLCDrawableCGrafPtr ) {
++            msg_Err( p_vout, "QT interface requires a CGrafPtr when embedded" );
++            return( 1 );
++        }
++
+         p_vout->p_sys->b_embedded = VLC_TRUE;
+-    else
++    } else {
+         p_vout->p_sys->b_embedded = VLC_FALSE;
++    }
+     p_vout->p_sys->b_cpu_has_simd =
+         vlc_CPU() & (CPU_CAPABILITY_ALTIVEC|CPU_CAPABILITY_MMXEXT);
+Index: modules/gui/macosx/voutgl.m
+===================================================================
+--- modules/gui/macosx/voutgl.m        (revision 20403)
++++ modules/gui/macosx/voutgl.m        (working copy)
+@@ -35,6 +35,7 @@
+ #include <string.h>                                            /* strerror() */
+ #include <vlc_keys.h>
++#include <vlc/libvlc_structures.h>
+ #include "intf.h"
+ #include "vout.h"
+@@ -43,6 +44,7 @@
+ #include <OpenGL/gl.h>
+ #include <AGL/agl.h>
++#include <Carbon/Carbon.h>
+ /*****************************************************************************
+  * VLCGLView interface
+@@ -67,13 +69,18 @@
+     /* Mozilla plugin-related variables */
+     vlc_bool_t          b_embedded;
+     AGLContext          agl_ctx;
+-    AGLDrawable         agl_drawable;
+     int                 i_offx, i_offy;
+     int                 i_width, i_height;
+     WindowRef           theWindow;
+     WindowGroupRef      winGroup;
+     vlc_bool_t          b_clipped_out;
+-    Rect                clipBounds, viewBounds;             
++    Rect                clipBounds, viewBounds;
++
++    libvlc_macosx_drawable_type_t drawable_type;
++    union {
++        CGrafPtr        CGrafPtr;
++        ControlRef      ControlRef;
++    } drawable;
+ };
+ /*****************************************************************************
+@@ -462,17 +469,90 @@
+ static void aglReshape( vout_thread_t * p_vout );
+ static OSStatus WindowEventHandler(EventHandlerCallRef nextHandler, EventRef event, void *userData);
+-static int aglInit( vout_thread_t * p_vout )
++/* returns the bounds of the drawable control/window */
++static Rect aglGetBounds( vout_thread_t * p_vout )
+ {
++        WindowRef win;
++        Rect rect;
++
++        switch( p_vout->p_sys->drawable_type ) {
++            case VLCDrawableCGrafPtr:
++                win = GetWindowFromPort( p_vout->p_sys->drawable.CGrafPtr );
++                GetWindowPortBounds( win, &rect );
++                break;
++
++            case VLCDrawableControlRef:
++                win = GetControlOwner( p_vout->p_sys->drawable.ControlRef );
++                GetControlBounds( p_vout->p_sys->drawable.ControlRef, &rect );
++                break;
++        }
++
++        return rect;
++}
++
++/* returns the window containing the drawable area */
++static WindowRef aglGetWindow( vout_thread_t * p_vout )
++{
++        WindowRef window;
++
++        switch( p_vout->p_sys->drawable_type ) {
++            case VLCDrawableCGrafPtr:
++                window = GetWindowFromPort( p_vout->p_sys->drawable.CGrafPtr );
++                break;
++
++            case VLCDrawableControlRef:
++                window = GetControlOwner( p_vout->p_sys->drawable.ControlRef );
++                break;
++        }
++
++        return window;
++}
++
++/* gets the graphics port associated with our drawing area */
++static CGrafPtr aglGetPort( vout_thread_t * p_vout )
++{
++        CGrafPtr port;
++
++        switch( p_vout->p_sys->drawable_type ) {
++            case VLCDrawableCGrafPtr:
++                port = p_vout->p_sys->drawable.CGrafPtr;
++                break;
++
++            case VLCDrawableControlRef:
++                port = GetWindowPort( GetControlOwner(
++                           p_vout->p_sys->drawable.ControlRef
++                       ) );
++                break;
++        }
++
++        return port;
++}
++
++/* (re)process "drawable-*" and "macosx-drawable-type" variables. `drawable' is a
++   parameter to allow it to be overridden (REPARENT) */
++static int aglProcessDrawable( vout_thread_t * p_vout, libvlc_drawable_t drawable )
++{
+     vlc_value_t val;
++    vlc_value_t val_type;
++    AGLDrawable agl_drawable;
++    Rect clipBounds,viewBounds;
+-    Rect viewBounds;    
+-    Rect clipBounds;
+-    
+-    var_Get( p_vout->p_libvlc, "drawable", &val );
+-    p_vout->p_sys->agl_drawable = (AGLDrawable)val.i_int;
+-    aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable);
++    var_Get( p_vout->p_libvlc, "macosx-drawable-type", &val_type );
++    p_vout->p_sys->drawable_type = val_type.i_int;
++    switch( val_type.i_int ) {
++        case VLCDrawableCGrafPtr:
++            p_vout->p_sys->drawable.CGrafPtr = (CGrafPtr)drawable;
++            break;
++
++        case VLCDrawableControlRef:
++            p_vout->p_sys->drawable.ControlRef = (ControlRef)drawable;
++            break;
++    }
++
++    agl_drawable = (AGLDrawable)aglGetPort( p_vout );
++    aglSetDrawable(p_vout->p_sys->agl_ctx, agl_drawable);
++
+     var_Get( p_vout->p_libvlc, "drawable-view-top", &val );
+     viewBounds.top = val.i_int;
+     var_Get( p_vout->p_libvlc, "drawable-view-left", &val );
+@@ -481,15 +561,21 @@
+     viewBounds.bottom = val.i_int;
+     var_Get( p_vout->p_libvlc, "drawable-view-right", &val );
+     viewBounds.right = val.i_int;
+-    var_Get( p_vout->p_libvlc, "drawable-clip-top", &val );
+-    clipBounds.top = val.i_int;
+-    var_Get( p_vout->p_libvlc, "drawable-clip-left", &val );
+-    clipBounds.left = val.i_int;
+-    var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val );
+-    clipBounds.bottom = val.i_int;
+-    var_Get( p_vout->p_libvlc, "drawable-clip-right", &val );
+-    clipBounds.right = val.i_int;
++    if( !viewBounds.top && !viewBounds.left && !viewBounds.right && !viewBounds.bottom ) {
++        /* view bounds not set, use control/window bounds */
++        clipBounds = viewBounds = aglGetBounds( p_vout );
++    } else {
++        var_Get( p_vout->p_libvlc, "drawable-clip-top", &val );
++        clipBounds.top = val.i_int;
++        var_Get( p_vout->p_libvlc, "drawable-clip-left", &val );
++        clipBounds.left = val.i_int;
++        var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val );
++        clipBounds.bottom = val.i_int;
++        var_Get( p_vout->p_libvlc, "drawable-clip-right", &val );
++        clipBounds.right = val.i_int;
++    }
++
+     p_vout->p_sys->b_clipped_out = (clipBounds.top == clipBounds.bottom)
+                                  || (clipBounds.left == clipBounds.right);
+     if( ! p_vout->p_sys->b_clipped_out )
+@@ -501,7 +587,15 @@
+     }
+     p_vout->p_sys->clipBounds = clipBounds;
+     p_vout->p_sys->viewBounds = viewBounds;
++}
++static int aglInit( vout_thread_t * p_vout )
++{
++    vlc_value_t val;
++
++    var_Get( p_vout->p_libvlc, "drawable", &val );
++    aglProcessDrawable( p_vout, val.i_int );
++
+     return VLC_SUCCESS;
+ }
+@@ -564,6 +658,26 @@
+ static int aglManage( vout_thread_t * p_vout )
+ {
++    if( p_vout->p_sys->drawable_type == VLCDrawableControlRef ) {
++        /* auto-detect size changes in the control by polling */
++        Rect clipBounds, viewBounds;
++
++        clipBounds = viewBounds = aglGetBounds( p_vout );
++
++        if( memcmp(&clipBounds, &(p_vout->p_sys->clipBounds), sizeof(clipBounds) )
++         && memcmp(&viewBounds, &(p_vout->p_sys->viewBounds), sizeof(viewBounds)) )
++        {
++            /* size has changed since last poll */
++
++            p_vout->p_sys->clipBounds = clipBounds;
++            p_vout->p_sys->viewBounds = viewBounds;
++            aglLock( p_vout );
++            aglSetViewport(p_vout, viewBounds, clipBounds);
++            aglReshape( p_vout );
++            aglUnlock( p_vout );
++        }
++    }
++
+     if( p_vout->i_changes & VOUT_ASPECT_CHANGE )
+     {
+         aglLock( p_vout );
+@@ -586,42 +700,28 @@
+         {
+             /* Close the fullscreen window and resume normal drawing */
+             vlc_value_t val;
+-            Rect viewBounds;    
+-            Rect clipBounds;
+             var_Get( p_vout->p_libvlc, "drawable", &val );
+-            p_vout->p_sys->agl_drawable = (AGLDrawable)val.i_int;
+-            aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable);
++            aglProcessDrawable( p_vout, val.i_int );
+-            var_Get( p_vout->p_libvlc, "drawable-view-top", &val );
+-            viewBounds.top = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-view-left", &val );
+-            viewBounds.left = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-view-bottom", &val );
+-            viewBounds.bottom = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-view-right", &val );
+-            viewBounds.right = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-clip-top", &val );
+-            clipBounds.top = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-clip-left", &val );
+-            clipBounds.left = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-clip-bottom", &val );
+-            clipBounds.bottom = val.i_int;
+-            var_Get( p_vout->p_libvlc, "drawable-clip-right", &val );
+-            clipBounds.right = val.i_int;
++            /*the following was here, superfluous due to the same in aglLock?
++              aglSetCurrentContext(p_vout->p_sys->agl_ctx);*/
+-            aglSetCurrentContext(p_vout->p_sys->agl_ctx);
+-            aglSetViewport(p_vout, viewBounds, clipBounds);
+-
+             /* Most Carbon APIs are not thread-safe, therefore delagate some GUI visibilty update to the main thread */
+             sendEventToMainThread(GetWindowEventTarget(p_vout->p_sys->theWindow), kEventClassVLCPlugin, kEventVLCPluginHideFullscreen);
+         }
+         else
+         {
++            CGDirectDisplayID displayID;
++            CGRect displayBounds;
+             Rect deviceRect;
+             
+-            GDHandle deviceHdl = GetMainDevice();
+-            deviceRect = (*deviceHdl)->gdRect;
++            /* the main display has its origin at (0,0) */
++            displayBounds = CGDisplayBounds( CGMainDisplayID() );
++            deviceRect.left = 0;
++            deviceRect.top = 0;
++            deviceRect.right = displayBounds.size.width;
++            deviceRect.bottom = displayBounds.size.height;
+             
+             if( !p_vout->p_sys->theWindow )
+             {
+@@ -669,8 +769,9 @@
+                 SetWindowBounds(p_vout->p_sys->theWindow, kWindowContentRgn, &deviceRect);
+             }
+             glClear( GL_COLOR_BUFFER_BIT );
+-            p_vout->p_sys->agl_drawable = (AGLDrawable)GetWindowPort(p_vout->p_sys->theWindow);
+-            aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->agl_drawable);
++            p_vout->p_sys->drawable_type = VLCDrawableCGrafPtr;
++            p_vout->p_sys->drawable.CGrafPtr = GetWindowPort(p_vout->p_sys->theWindow);
++            aglSetDrawable(p_vout->p_sys->agl_ctx, p_vout->p_sys->drawable.CGrafPtr);
+             aglSetCurrentContext(p_vout->p_sys->agl_ctx);
+             aglSetViewport(p_vout, deviceRect, deviceRect);
+             //aglSetFullScreen(p_vout->p_sys->agl_ctx, device_width, device_height, 0, 0);
+@@ -753,11 +854,10 @@
+         case VOUT_REPARENT:
+         {
+-            AGLDrawable drawable = (AGLDrawable)va_arg( args, int);
+-            if( !p_vout->b_fullscreen && drawable != p_vout->p_sys->agl_drawable )
++            libvlc_drawable_t drawable = (libvlc_drawable_t)va_arg( args, int);
++            if( !p_vout->b_fullscreen )
+             {
+-                p_vout->p_sys->agl_drawable = drawable;
+-                aglSetDrawable(p_vout->p_sys->agl_ctx, drawable);
++                aglProcessDrawable( p_vout, drawable );
+             }
+             return VLC_SUCCESS;
+         }
+@@ -771,8 +871,16 @@
+ {
+     if( ! p_vout->p_sys->b_clipped_out )
+     {
++        WindowRef win;
++        Rect rect;
++
+         p_vout->p_sys->b_got_frame = VLC_TRUE;
+         aglSwapBuffers(p_vout->p_sys->agl_ctx);
++
++        win = aglGetWindow( p_vout );
++        rect = aglGetBounds( p_vout );
++
++        InvalWindowRect( win, &rect );
+     }
+     else
+     {
+@@ -788,12 +896,14 @@
+     // however AGL coordinates are based on window structure region
+     // and are vertically flipped
+     GLint rect[4];
+-    CGrafPtr port = (CGrafPtr)p_vout->p_sys->agl_drawable;
++    WindowRef window;
+     Rect winBounds, clientBounds;
+-    GetWindowBounds(GetWindowFromPort(port),
++    window = aglGetWindow( p_vout );
++
++    GetWindowBounds(window,
+         kWindowStructureRgn, &winBounds);
+-    GetWindowBounds(GetWindowFromPort(port),
++    GetWindowBounds(window,
+         kWindowContentRgn, &clientBounds);
+     /* update video clipping bounds in drawable */
+Index: bindings/python/vlc_instance.c
+===================================================================
+--- bindings/python/vlc_instance.c     (revision 20403)
++++ bindings/python/vlc_instance.c     (working copy)
+@@ -349,6 +349,30 @@
+ }
+ static PyObject *
++vlcInstance_video_set_macosx_parent_type( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_drawable_type;
++
++    if( !PyArg_ParseTuple( args, "i", &i_drawable_type ) )
++        return NULL;
++
++    if( i_drawable_type != VLCDrawableCGrafPtr
++     && i_drawable_type != VLCDrawableControlRef )
++    {
++        PyErr_SetString( vlcInstance_Exception, "Invalid drawable type." );
++        return NULL;
++    }
++
++    LIBVLC_TRY;
++    libvlc_video_set_macosx_parent_type( LIBVLC_INSTANCE->p_instance, (libvlc_macosx_drawable_type_t) i_drawable_type, &ex );
++    LIBVLC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
+ vlcInstance_video_set_size( PyObject *self, PyObject *args )
+ {
+     libvlc_exception_t ex;
+@@ -733,6 +757,8 @@
+       "playlist_get_input() -> object   Return the current input"},
+     { "video_set_parent", vlcInstance_video_set_parent, METH_VARARGS,
+       "video_set_parent(xid=int)       Set the parent xid or HWND"},
++    { "video_set_macosx_parent_type", vlcInstance_video_set_macosx_parent_type, METH_VARARGS,
++      "video_set_macosx_parent_type(drawabletype=int)       Set the type of parent used on Mac OS/X (see the Drawable* constants)"},
+     { "video_set_size", vlcInstance_video_set_size, METH_VARARGS,
+       "video_set_size(width=int, height=int)    Set the video width and height"},
+     { "audio_toggle_mute", vlcInstance_audio_toggle_mute, METH_VARARGS,
+Index: bindings/python/vlc_module.c
+===================================================================
+--- bindings/python/vlc_module.c       (revision 20403)
++++ bindings/python/vlc_module.c       (working copy)
+@@ -147,6 +147,10 @@
+                              mediacontrol_EndStatus );
+     PyModule_AddIntConstant( p_module, "UndefinedStatus",
+                              mediacontrol_UndefinedStatus );
++    PyModule_AddIntConstant( p_module, "DrawableCGrafPtr",
++                             VLCDrawableCGrafPtr );
++    PyModule_AddIntConstant( p_module, "DrawableControlRef",
++                             VLCDrawableControlRef );
+ }
+Index: src/control/video.c
+===================================================================
+--- src/control/video.c        (revision 20403)
++++ src/control/video.c        (working copy)
+@@ -277,6 +277,21 @@
+ /* global video settings */
++void libvlc_video_set_macosx_parent_type( libvlc_instance_t *p_instance, libvlc_macosx_drawable_type_t t,
++                              libvlc_exception_t *p_e )
++{
++    var_SetInteger(p_instance->p_libvlc_int, "macosx-drawable-type", (int)t);
++}
++
++libvlc_macosx_drawable_type_t libvlc_video_get_macosx_parent_type( libvlc_instance_t *p_instance, libvlc_exception_t *p_e )
++{
++    libvlc_macosx_drawable_type_t result;
++    
++    result = var_GetInteger( p_instance->p_libvlc_int, "macosx-drawable-type" );
++    
++    return result;
++}
++
+ void libvlc_video_set_parent( libvlc_instance_t *p_instance, libvlc_drawable_t d,
+                               libvlc_exception_t *p_e )
+ {
+Index: src/libvlc-common.c
+===================================================================
+--- src/libvlc-common.c        (revision 20403)
++++ src/libvlc-common.c        (working copy)
+@@ -941,6 +941,10 @@
+     var_Create( p_libvlc, "drawable-clip-bottom", VLC_VAR_INTEGER );
+     var_Create( p_libvlc, "drawable-clip-right", VLC_VAR_INTEGER );
++#ifdef __APPLE__
++    var_Create( p_libvlc, "macosx-drawable-type", VLC_VAR_INTEGER );
++#endif
++
+     /* Create volume callback system. */
+     var_Create( p_libvlc, "volume-change", VLC_VAR_BOOL );
+Index: include/vlc/libvlc.h
+===================================================================
+--- include/vlc/libvlc.h       (revision 20403)
++++ include/vlc/libvlc.h       (working copy)
+@@ -424,6 +424,10 @@
+  */
+ VLC_PUBLIC_API void libvlc_video_redraw_rectangle( libvlc_input_t *, const libvlc_rectangle_t *, libvlc_exception_t * );
++VLC_PUBLIC_API void libvlc_video_set_macosx_parent_type( libvlc_instance_t *, libvlc_macosx_drawable_type_t, libvlc_exception_t * );
++
++VLC_PUBLIC_API libvlc_macosx_drawable_type_t libvlc_video_get_macosx_parent_type( libvlc_instance_t *, libvlc_exception_t * );
++
+ /**
+  * Set the default video output parent
+  *  this settings will be used as default for all video outputs
+Index: include/vlc/libvlc_structures.h
+===================================================================
+--- include/vlc/libvlc_structures.h    (revision 20403)
++++ include/vlc/libvlc_structures.h    (working copy)
+@@ -83,12 +83,22 @@
+ /**
+ * Downcast to this general type as placeholder for a platform specific one, such as:
+ *  Drawable on X11,
+-*  CGrafPort on MacOSX,
++*  (libvlc_macosx_drawable_type_t) on MacOSX,
+ *  HWND on win32
+ */
+ typedef int libvlc_drawable_t;
+ /**
++* Type of libvlc_drawable_t on MaxOSX. Available types:
++* - VLCDrawableCGrafPtr
++* - VLCDrawableControlRef
++*/
++typedef enum {
++    VLCDrawableCGrafPtr = 0,
++    VLCDrawableControlRef,
++} libvlc_macosx_drawable_type_t;
++
++/**
+ * Rectangle type for video geometry
+ */
+ typedef struct
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/changelog b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/changelog
new file mode 100644 (file)
index 0000000..dbf4e49
--- /dev/null
@@ -0,0 +1,5 @@
+swarmplayer (1.0.0-1ubuntu3) hardy; urgency=low
+
+  * First release
+
+ -- Tribler <tribler@tribler.org>  Tue, 17 Jun 2008 11:22:05 +0200
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/compat b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/compat
new file mode 100644 (file)
index 0000000..b8626c4
--- /dev/null
@@ -0,0 +1 @@
+4
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/control b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/control
new file mode 100644 (file)
index 0000000..614499d
--- /dev/null
@@ -0,0 +1,16 @@
+Source: swarmplayer 
+Section: net
+Priority: optional
+Maintainer: Arno Bakker <arno@cs.vu.nl>
+Standards-Version: 3.7.2
+Build-Depends: python, debhelper (>= 5.0.37.2), devscripts
+
+Package: swarmplayer 
+Architecture: all
+Depends: python, python-wxgtk2.8, python-m2crypto, python-apsw, vlc, ffmpeg
+Description: Python based Bittorrent/Internet TV viewer. 
+ It allows you to watch BitTorrent-hosted videos on demand and
+ plays live Tribler streams. It is based on the same core as the
+ Tribler TV application.
+ .
+ Homepage: http://www.tribler.org/
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/files b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/files
new file mode 100644 (file)
index 0000000..06b52fd
--- /dev/null
@@ -0,0 +1 @@
+swarmplayer_1.0.0-1ubuntu3_all.deb net optional
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/prerm b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/prerm
new file mode 100644 (file)
index 0000000..082c7fe
--- /dev/null
@@ -0,0 +1,47 @@
+#! /bin/sh
+# prerm script for #PACKAGE#
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+#        * <prerm> `remove'
+#        * <old-prerm> `upgrade' <new-version>
+#        * <new-prerm> `failed-upgrade' <old-version>
+#        * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
+#        * <deconfigured's-prerm> `deconfigure' `in-favour'
+#          <package-being-installed> <version> `removing'
+#          <conflicting-package> <version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+PACKAGE="swarmplayer"
+
+dpkg --listfiles $PACKAGE |
+        awk '$0~/\.py$/ {print $0"c\n" $0"o"}' |
+        xargs rm -f >&2
+
+killall $PACKAGE || :
+
+
+case "$1" in
+    remove|upgrade|deconfigure)
+#       install-info --quiet --remove /usr/info/#PACKAGE#.info.gz
+        ;;
+    failed-upgrade)
+        ;;
+    *)
+        echo "prerm called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/pycompat b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/pycompat
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/rules b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/rules
new file mode 100644 (file)
index 0000000..5832e45
--- /dev/null
@@ -0,0 +1,85 @@
+#!/usr/bin/make -f
+# Sample debian/rules that uses debhelper.
+# GNU copyright 1997 to 1999 by Joey Hess.
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+LIBRARYNAME=BaseLib
+
+configure: configure-stamp
+configure-stamp:
+       dh_testdir
+       # Add here commands to configure the package.
+
+       touch configure-stamp
+
+
+build: build-stamp
+
+build-stamp: configure-stamp 
+       dh_testdir
+
+       # Add here commands to compile the package.
+       #$(MAKE)
+       #/usr/bin/docbook-to-man debian/bittorrent.sgml > bittorrent.1
+
+       touch build-stamp
+
+clean:
+       dh_testdir
+       dh_testroot
+       rm -f build-stamp configure-stamp
+
+       # Add here commands to clean up after the build process.
+       #-$(MAKE) clean
+       find . -name '*.pyc' |xargs rm || :
+
+       dh_clean
+
+install: build
+       dh_testdir
+       dh_testroot
+       dh_clean -k
+       dh_installdirs
+
+# Build architecture-independent files here.
+binary-arch: build install
+# We have nothing to do by default.
+
+
+# Build architecture-dependent files here.
+binary-indep: build install
+       dh_testdir
+       dh_testroot
+       dh_installdocs
+       dh_installexamples
+       dh_installmenu
+       dh_installmime
+       dh_installman
+
+       mkdir -p debian/swarmplayer/usr/share/swarmplayer/
+       cp -rf `ls -1d ${LIBRARYNAME}` debian/swarmplayer/usr/share/swarmplayer/
+       rm -rf debian/swarmplayer/usr/share/swarmplayer/${LIBRARYNAME}/Test
+       # add other files
+       mkdir -p debian/swarmplayer/usr/bin
+       cp -f debian/swarmplayer.sh debian/swarmplayer/usr/bin/swarmplayer
+       cp -f ${LIBRARYNAME}/LICENSE.txt debian/copyright
+       # for the menu
+       mkdir -p debian/swarmplayer/usr/share/pixmaps
+       cp -f debian/swarmplayer.xpm debian/swarmplayer/usr/share/pixmaps/
+
+       dh_installchangelogs 
+       dh_installinit -r --no-start -- stop 20 0 6 .
+       dh_install --sourcedir=debian/tmp
+       dh_install debian/swarmplayer.desktop usr/share/applications
+       dh_link
+       dh_compress
+       dh_fixperms
+       dh_installdeb
+       dh_python
+       dh_gencontrol
+       dh_md5sums
+       dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.1 b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.1
new file mode 100644 (file)
index 0000000..f909b4e
--- /dev/null
@@ -0,0 +1,22 @@
+.\" SwarmPlayer: Python based Bittorrent/Internet TV viewer 
+.TH man 1 "12 June 2007" "1.0" "SwarmPlayer man page"
+.SH NAME
+swarmplayer \- Python based Bittorrent/Internet TV viewer
+.SH SYNOPSIS
+.B swarmplayer
+.SH DESCRIPTION
+.B SwarmPlayer 
+is a python-based Bittorrent/Internet TV viewer.
+It allows you to watch BitTorrent-hosted videos on demand and
+plays live Tribler streams. It is based on the same core as the
+Tribler TV application.
+
+Homepage: http://www.tribler.org
+.SH FILES
+.P 
+.I /usr/bin/swarmplayer
+.I /usr/share/swarmplayer
+.SH AUTHOR
+.nf
+Arno Bakker (arno@cs.vu.nl)
+.fi
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.desktop b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.desktop
new file mode 100644 (file)
index 0000000..c3b1833
--- /dev/null
@@ -0,0 +1,8 @@
+[Desktop Entry]
+Name=SwarmPlayer
+GenericName=Bittorrent Video-On-Demand / Live streaming client
+Exec=swarmplayer
+Icon=swarmplayer
+Terminal=false
+Type=Application
+Categories=Application;Network;P2P
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.manpages b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.manpages
new file mode 100644 (file)
index 0000000..acc1bea
--- /dev/null
@@ -0,0 +1 @@
+debian/swarmplayer.1
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.menu b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.menu
new file mode 100644 (file)
index 0000000..1de95ce
--- /dev/null
@@ -0,0 +1,4 @@
+?package(swarmplayer):needs="x11" section="Apps/Net" \
+        title="SwarmPlayer" \
+        icon="/usr/share/pixmaps/swarmplayer.xpm" \
+        command="swarmplayer"
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.postinst.debhelper b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.postinst.debhelper
new file mode 100644 (file)
index 0000000..8637a4e
--- /dev/null
@@ -0,0 +1,5 @@
+# Automatically added by dh_installmenu
+if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then
+       update-menus
+fi
+# End automatically added section
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.postrm.debhelper b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.postrm.debhelper
new file mode 100644 (file)
index 0000000..2b4be4f
--- /dev/null
@@ -0,0 +1,3 @@
+# Automatically added by dh_installmenu
+if [ -x "`which update-menus 2>/dev/null`" ]; then update-menus ; fi
+# End automatically added section
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.sh b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.sh
new file mode 100755 (executable)
index 0000000..b5be675
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/sh
+# Startup script for Ubuntu Linux
+
+# don't care about gtk/x11/whatever. Currently (>= 3.4.0) must be unicode
+WXPYTHONVER24=`ls -1d /usr/lib/python2.4/site-packages/wx-2.8* 2>/dev/null | grep -v ansi | sed -e 's/.*wx-//g' -e 's/-.*//g' | sort -nr | head -1`
+WXPYTHONVER25=`ls -1d /usr/lib/python2.5/site-packages/wx-2.8* 2>/dev/null | grep -v ansi | sed -e 's/.*wx-//g' -e 's/-.*//g' | sort -nr | head -1`
+
+if [ "$WXPYTHONVER24" = "" ] && [ "$WXPYTHONVER25" = "" ];
+then
+    echo "Hmmm... No wxPython unicode package found for python2.4 or 2.5, cannot run Tribler, sorry"
+    exit -1
+fi
+
+if [ "$WXPYTHONVER25" = "" ];
+then
+    PYTHON="python2.4"
+    WXPYTHONVER=$WXPYTHONVER24
+    echo "Using python2.4"
+else
+    PYTHON="python2.5"
+    WXPYTHONVER=$WXPYTHONVER25
+    echo "Using python2.5"
+fi
+
+WXPYTHON=`ls -1d /usr/lib/$PYTHON/site-packages/wx-$WXPYTHONVER* | grep -v ansi | head -1`
+
+PYTHONPATH=/usr/share/swarmplayer/:$WXPYTHON
+export PYTHONPATH
+
+cd /usr/share/swarmplayer
+exec $PYTHON /usr/share/swarmplayer/Tribler/Player/swarmplayer.py "$@" > /tmp/$USER-swarmplayer.log 2>&1
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.xpm b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer.xpm
new file mode 100644 (file)
index 0000000..579b53f
--- /dev/null
@@ -0,0 +1,257 @@
+/* XPM */\r
+static char * swarmplayer_xpm[] = {\r
+"32 32 222 2",\r
+"      c None",\r
+".     c #8F8F90",\r
+"+     c #909191",\r
+"@     c #8F9090",\r
+"#     c #8D8D8E",\r
+"$     c #8B8B8C",\r
+"%     c #8A8A8B",\r
+"&     c #8A8A8A",\r
+"*     c #898A8A",\r
+"=     c #89898A",\r
+"-     c #898989",\r
+";     c #888989",\r
+">     c #888889",\r
+",     c #888888",\r
+"'     c #878888",\r
+")     c #878788",\r
+"!     c #878787",\r
+"~     c #868787",\r
+"{     c #868687",\r
+"]     c #858686",\r
+"^     c #868686",\r
+"/     c #8B8B8B",\r
+"(     c #C0C0C0",\r
+"_     c #EBECEC",\r
+":     c #EEEFEF",\r
+"<     c #EEEEEE",\r
+"[     c #EDEEEE",\r
+"}     c #ECEDED",\r
+"|     c #ECECEC",\r
+"1     c #EAEBEB",\r
+"2     c #C7C7C8",\r
+"3     c #616161",\r
+"4     c #C2E1C2",\r
+"5     c #7BCA7A",\r
+"6     c #E0EAE0",\r
+"7     c #AEDCAE",\r
+"8     c #CEE5CE",\r
+"9     c #E6ECE7",\r
+"0     c #EBEEEC",\r
+"a     c #D0E5D0",\r
+"b     c #3EB93B",\r
+"c     c #83CD82",\r
+"d     c #5AC058",\r
+"e     c #5EC45C",\r
+"f     c #37B234",\r
+"g     c #34B631",\r
+"h     c #99D298",\r
+"i     c #AEDAAE",\r
+"j     c #73C571",\r
+"k     c #19AA15",\r
+"l     c #1BB317",\r
+"m     c #A7D8A7",\r
+"n     c #46B543",\r
+"o     c #51BF4F",\r
+"p     c #A7D5A7",\r
+"q     c #42B93F",\r
+"r     c #5DBE5C",\r
+"s     c #B9DDBA",\r
+"t     c #DCE8DD",\r
+"u     c #DAE9DA",\r
+"v     c #32B530",\r
+"w     c #61C45F",\r
+"x     c #57C255",\r
+"y     c #11B00D",\r
+"z     c #1CAE19",\r
+"A     c #2AB627",\r
+"B     c #57BE54",\r
+"C     c #7AC879",\r
+"D     c #4BBD49",\r
+"E     c #36B934",\r
+"F     c #23B120",\r
+"G     c #33B630",\r
+"H     c #9CD49C",\r
+"I     c #CCE4CC",\r
+"J     c #56C254",\r
+"K     c #52C450",\r
+"L     c #3FBB3C",\r
+"M     c #38B736",\r
+"N     c #0AAD06",\r
+"O     c #15AD11",\r
+"P     c #0FA90B",\r
+"Q     c #1CAF19",\r
+"R     c #27B825",\r
+"S     c #55BC53",\r
+"T     c #1EAF1B",\r
+"U     c #25B421",\r
+"V     c #38BA35",\r
+"W     c #2EB42A",\r
+"X     c #40BC3E",\r
+"Y     c #5EC15C",\r
+"Z     c #84CE83",\r
+"`     c #9BD59B",\r
+" .    c #EDEFEE",\r
+"..    c #616262",\r
+"+.    c #9AD499",\r
+"@.    c #35B831",\r
+"#.    c #1EB11B",\r
+"$.    c #25B322",\r
+"%.    c #27B424",\r
+"&.    c #92D192",\r
+"*.    c #84CB83",\r
+"=.    c #ABDAAB",\r
+"-.    c #D7E7D8",\r
+";.    c #C5E1C5",\r
+">.    c #BCDDBC",\r
+",.    c #DDE9DE",\r
+"'.    c #56C255",\r
+").    c #7DCB7C",\r
+"!.    c #83CE82",\r
+"~.    c #95D595",\r
+"{.    c #C7E3C7",\r
+"].    c #626262",\r
+"^.    c #75CC74",\r
+"/.    c #15AF12",\r
+"(.    c #33B72F",\r
+"_.    c #1BB118",\r
+":.    c #36B532",\r
+"<.    c #6EC16D",\r
+"[.    c #DAE8DA",\r
+"}.    c #42BA3F",\r
+"|.    c #72CA70",\r
+"1.    c #0EAF0A",\r
+"2.    c #2BB428",\r
+"3.    c #B6DBB6",\r
+"4.    c #DEE8DF",\r
+"5.    c #C7E2C7",\r
+"6.    c #1FB51C",\r
+"7.    c #7AC97A",\r
+"8.    c #1AB116",\r
+"9.    c #B7DCB7",\r
+"0.    c #E0E9E1",\r
+"a.    c #31B82E",\r
+"b.    c #4ABD48",\r
+"c.    c #4EBE4C",\r
+"d.    c #6EC86C",\r
+"e.    c #E3E4E4",\r
+"f.    c #D6D7D7",\r
+"g.    c #DBDCDC",\r
+"h.    c #D7D8D8",\r
+"i.    c #DDDEDE",\r
+"j.    c #E7ECE8",\r
+"k.    c #87CD86",\r
+"l.    c #34B732",\r
+"m.    c #1AAC17",\r
+"n.    c #ADDAAD",\r
+"o.    c #9E9F9F",\r
+"p.    c #343434",\r
+"q.    c #252525",\r
+"r.    c #2A2A2A",\r
+"s.    c #525253",\r
+"t.    c #2B2B2B",\r
+"u.    c #6B6B6B",\r
+"v.    c #E9EAEA",\r
+"w.    c #87D086",\r
+"x.    c #5BC259",\r
+"y.    c #5ABF59",\r
+"z.    c #AAD8A9",\r
+"A.    c #EAEAEA",\r
+"B.    c #393939",\r
+"C.    c #515151",\r
+"D.    c #CFD0D0",\r
+"E.    c #C7C8C8",\r
+"F.    c #949494",\r
+"G.    c #DEDEDE",\r
+"H.    c #808080",\r
+"I.    c #D9DADA",\r
+"J.    c #8D8D8D",\r
+"K.    c #B4B5B5",\r
+"L.    c #61C460",\r
+"M.    c #46BC44",\r
+"N.    c #B4DDB4",\r
+"O.    c #3F4040",\r
+"P.    c #3B3B3B",\r
+"Q.    c #969697",\r
+"R.    c #BCBCBD",\r
+"S.    c #DEDFDF",\r
+"T.    c #ECEDEE",\r
+"U.    c #9C9C9C",\r
+"V.    c #B1B1B1",\r
+"W.    c #A5DAA4",\r
+"X.    c #4ABD47",\r
+"Y.    c #6AC768",\r
+"Z.    c #E9EDEA",\r
+"`.    c #BCBCBC",\r
+" +    c #565656",\r
+".+    c #313131",\r
+"++    c #262626",\r
+"@+    c #484848",\r
+"#+    c #DFE0E0",\r
+"$+    c #262727",\r
+"%+    c #282828",\r
+"&+    c #575858",\r
+"*+    c #E5E6E6",\r
+"=+    c #E6EDE7",\r
+"-+    c #82CE81",\r
+";+    c #2EB52B",\r
+">+    c #D4E7D5",\r
+",+    c #E6E8E8",\r
+"'+    c #919292",\r
+")+    c #A9AAAA",\r
+"!+    c #787878",\r
+"~+    c #C5C6C6",\r
+"{+    c #CECFCF",\r
+"]+    c #4EC04C",\r
+"^+    c #9ED79D",\r
+"/+    c #6A6B6B",\r
+"(+    c #505050",\r
+"_+    c #CBCCCC",\r
+":+    c #8D8E8E",\r
+"<+    c #C5E3C6",\r
+"[+    c #AAAAAA",\r
+"}+    c #787979",\r
+"|+    c #646464",\r
+"1+    c #6F7070",\r
+"2+    c #B7B7B7",\r
+"3+    c #AAABAB",\r
+"4+    c #C8C8C8",\r
+"5+    c #D3D4D4",\r
+"6+    c #848484",\r
+"7+    c #838484",\r
+"8+    c #838383",\r
+"9+    c #828282",\r
+"                                                                ",\r
+"      . + @ # $ % & * = - ; > , ' ' ) ! ~ { ] ^ { , * / =       ",\r
+"    ( _ : : : : : : : : : : : : : < [ [ [ } } | _ _ _ _ 1 2     ",\r
+"    } : : : : : : : : : : : : : : : : : : : : : : : : : : [ 3   ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : 3   ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : 3   ",\r
+"    : : : : : : : : 4 5 6 7 8 9 : : : : : : : : : : : : : : 3   ",\r
+"    : : : : : : 0 a b c d e f g h : : : : : : : : : : : : : 3   ",\r
+"    : : : : : i j k l m n o p q r s t : : : : : : : : : : : 3   ",\r
+"    : : : : u v w x y z A B C D E F G H I 0 : : : : : : : : 3   ",\r
+"    : : : : J K L M N O P Q R S T U V W X Y Z `  .: : : : : ..  ",\r
+"    [ : : : +.@.#.$.%.&.*.=.-.;.>.,.'.e ).!.~.{. .: : : : : ].  ",\r
+"    [ : : : ^./.(._.:.<.: : : : : : : : : : : : : : : : : : ].  ",\r
+"    } : : [.}.|.1.2.3.4.: : : : : : : : : : : : : : : : : : ].  ",\r
+"    _ : : 5.6.7.8.9.: : : : : : : : : : : : : : : : : : : : ].  ",\r
+"    _ : : 0.a.b.c.d.: : : : : e.f.g.1 : } h.h.h.i.[ : : : : ].  ",\r
+"    _ : : j.k.l.m.n.: : : : o.p.q.r.s.1 i.q.q.q.t.u.v.: : : ].  ",\r
+"    | : : : w.x.y.z.: : : A.B.C.D.E.F.v.G.q.H.I.J.q.K.: : : ].  ",\r
+"    } : : : -.L.M.N.: : : 1 O.P.Q.R.v.: S.q.* T.U.q.V.: : : ].  ",\r
+"    [ : : : : W.X.Y.Z.: : : `. +.+++@+#+S.q.++$+%+&+*+: : : ].  ",\r
+"    [ : : : : =+-+;+>+: : [ ,+: e.'+q.)+S.q.!+~+{+} : : : : ].  ",\r
+"    [ : : : : : =+]+^+: : 1 @+/+H.(+%+_+S.q.:+: : : : : : : ].  ",\r
+"    : : : : : : : <+e 0 : : [+}+|+1+2+: e.|+3+: : : : : : : ].  ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : ].  ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : ].  ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : ..  ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : ..  ",\r
+"    : : : : : : : : : : : : : : : : : : : : : : : : : : : : ].  ",\r
+"    [ : : : : : : : : : : : : : : : : : : : : : : : : : : :     ",\r
+"    4+: : : : : : : : : : : : : : : : : : : : : : : : : : 5+    ",\r
+"              / ^ 6+7+7+7+8+9+9+9+9+9+9+9+9+9+6+,               ",\r
+"                                                                "};\r
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer_big.xpm b/instrumentation/next-share/BaseLib/Player/Build/Ubuntu/swarmplayer_big.xpm
new file mode 100644 (file)
index 0000000..80af874
--- /dev/null
@@ -0,0 +1,563 @@
+/* XPM */\r
+static char * swarmplayer_big_xpm[] = {\r
+"48 48 512 2",\r
+"      c None",\r
+".     c #8F8F8F",\r
+"+     c #A6A6A6",\r
+"@     c #A4A5A5",\r
+"#     c #A4A4A4",\r
+"$     c #A3A4A4",\r
+"%     c #A2A3A4",\r
+"&     c #A2A2A2",\r
+"*     c #A1A1A2",\r
+"=     c #A0A1A1",\r
+"-     c #A0A0A1",\r
+";     c #A0A0A0",\r
+">     c #9FA0A0",\r
+",     c #9F9FA0",\r
+"'     c #9E9F9F",\r
+")     c #9E9E9F",\r
+"!     c #9E9E9E",\r
+"~     c #9D9E9E",\r
+"{     c #9D9D9E",\r
+"]     c #9C9D9D",\r
+"^     c #9C9C9D",\r
+"/     c #9B9C9C",\r
+"(     c #9B9B9C",\r
+"_     c #9B9B9B",\r
+":     c #9C9D9E",\r
+"<     c #8E8E8F",\r
+"[     c #DDDEDE",\r
+"}     c #F0F0F0",\r
+"|     c #F1F2F2",\r
+"1     c #F0F1F1",\r
+"2     c #EFF0F0",\r
+"3     c #EEEFEF",\r
+"4     c #EDEEEE",\r
+"5     c #ECEDEE",\r
+"6     c #ECEDED",\r
+"7     c #EBECEC",\r
+"8     c #EAEBEB",\r
+"9     c #EAEAEB",\r
+"0     c #E9EAEA",\r
+"a     c #E8E9EA",\r
+"b     c #E8E9E9",\r
+"c     c #E7E8E9",\r
+"d     c #E6E7E8",\r
+"e     c #E6E6E7",\r
+"f     c #E5E6E7",\r
+"g     c #E4E5E6",\r
+"h     c #E3E4E5",\r
+"i     c #E3E3E4",\r
+"j     c #E2E3E4",\r
+"k     c #E2E3E3",\r
+"l     c #D9DADA",\r
+"m     c #A9AAAA",\r
+"n     c #F5F6F6",\r
+"o     c #F4F5F5",\r
+"p     c #F3F4F4",\r
+"q     c #F2F3F3",\r
+"r     c #F1F2F3",\r
+"s     c #F0F1F2",\r
+"t     c #EDEEEF",\r
+"u     c #EBECED",\r
+"v     c #EAEBEC",\r
+"w     c #E9EAEB",\r
+"x     c #DEDFDF",\r
+"y     c #616161",\r
+"z     c #5A5A5A",\r
+"A     c #EFF0F1",\r
+"B     c #EEEFF0",\r
+"C     c #757575",\r
+"D     c #5B5B5B",\r
+"E     c #F1F1F2",\r
+"F     c #F3F4F5",\r
+"G     c #F2F3F4",\r
+"H     c #E3ECE4",\r
+"I     c #C1E1C2",\r
+"J     c #97D397",\r
+"K     c #EAECEC",\r
+"L     c #DFE9E1",\r
+"M     c #D1E5D2",\r
+"N     c #E1E9E2",\r
+"O     c #E7E8E8",\r
+"P     c #E6E7E7",\r
+"Q     c #747474",\r
+"R     c #77CA75",\r
+"S     c #35B532",\r
+"T     c #49BA47",\r
+"U     c #C0E0C1",\r
+"V     c #57C156",\r
+"W     c #38BC35",\r
+"X     c #5CC15A",\r
+"Y     c #6FC66E",\r
+"Z     c #ACD8AC",\r
+"`     c #DCE6DD",\r
+" .    c #E5E7E7",\r
+"..    c #737373",\r
+"+.    c #ECEEED",\r
+"@.    c #DEE9DF",\r
+"#.    c #B2DAB2",\r
+"$.    c #57C155",\r
+"%.    c #31B62E",\r
+"&.    c #C2E0C2",\r
+"*.    c #58BE56",\r
+"=.    c #48BB45",\r
+"-.    c #8CD18C",\r
+";.    c #2AAC27",\r
+">.    c #4FBB4D",\r
+",.    c #12AF0F",\r
+"'.    c #55BD52",\r
+").    c #D0E0D1",\r
+"!.    c #737374",\r
+"~.    c #EAEEEB",\r
+"{.    c #C7E2C7",\r
+"].    c #9BD39A",\r
+"^.    c #37B133",\r
+"/.    c #15A811",\r
+"(.    c #10B00B",\r
+"_.    c #44BD42",\r
+":.    c #CDE2CE",\r
+"<.    c #60BE5F",\r
+"[.    c #2FB12C",\r
+"}.    c #36B933",\r
+"|.    c #7EC97D",\r
+"1.    c #83CB82",\r
+"2.    c #57BF55",\r
+"3.    c #51BA4F",\r
+"4.    c #A7D4A8",\r
+"5.    c #DDE4DF",\r
+"6.    c #E3E5E5",\r
+"7.    c #737474",\r
+"8.    c #C7E1C7",\r
+"9.    c #31B02F",\r
+"0.    c #86CC86",\r
+"a.    c #5CBE5A",\r
+"b.    c #0EAF0A",\r
+"c.    c #13B20F",\r
+"d.    c #44BD41",\r
+"e.    c #72C871",\r
+"f.    c #22AE1F",\r
+"g.    c #7AC679",\r
+"h.    c #71C56F",\r
+"i.    c #C4DDC4",\r
+"j.    c #83CC82",\r
+"k.    c #2BB528",\r
+"l.    c #40B63E",\r
+"m.    c #4BBC49",\r
+"n.    c #61C05F",\r
+"o.    c #7ECA7E",\r
+"p.    c #BBDABB",\r
+"q.    c #E1EAE2",\r
+"r.    c #7ACF78",\r
+"s.    c #15AD13",\r
+"t.    c #2AB426",\r
+"u.    c #A0D69F",\r
+"v.    c #39BA36",\r
+"w.    c #0CAF08",\r
+"x.    c #13B00F",\r
+"y.    c #14A810",\r
+"z.    c #0FB00C",\r
+"A.    c #60C55E",\r
+"B.    c #32B32F",\r
+"C.    c #3CB73A",\r
+"D.    c #93D093",\r
+"E.    c #2DB62A",\r
+"F.    c #2DB72B",\r
+"G.    c #2CB529",\r
+"H.    c #18AF15",\r
+"I.    c #20B21D",\r
+"J.    c #3AB838",\r
+"K.    c #8ACD89",\r
+"L.    c #B4DBB4",\r
+"M.    c #CFE2D1",\r
+"N.    c #E6E8E8",\r
+"O.    c #67C666",\r
+"P.    c #14B510",\r
+"Q.    c #64C863",\r
+"R.    c #23B320",\r
+"S.    c #7DCA7D",\r
+"T.    c #1FB01C",\r
+"U.    c #09AD05",\r
+"V.    c #0BAB08",\r
+"W.    c #0CAB08",\r
+"X.    c #0DA809",\r
+"Y.    c #14AF10",\r
+"Z.    c #19AE16",\r
+"`.    c #12B20F",\r
+" +    c #6DC56D",\r
+".+    c #58BB56",\r
+"++    c #1FAE1B",\r
+"@+    c #1DB11A",\r
+"#+    c #1AB117",\r
+"$+    c #3CB839",\r
+"%+    c #4ABE47",\r
+"&+    c #22B01E",\r
+"*+    c #30B72D",\r
+"=+    c #38B835",\r
+"-+    c #77C875",\r
+";+    c #99D398",\r
+">+    c #C0DFC1",\r
+",+    c #BADDBA",\r
+"'+    c #41BF3E",\r
+")+    c #79CD78",\r
+"!+    c #3CBA3A",\r
+"~+    c #27B425",\r
+"{+    c #1BAF17",\r
+"]+    c #0EB10A",\r
+"^+    c #36B833",\r
+"/+    c #44B942",\r
+"(+    c #1AAA16",\r
+"_+    c #20B31D",\r
+":+    c #5CBF5A",\r
+"<+    c #6ECC6D",\r
+"[+    c #70C76F",\r
+"}+    c #46BC43",\r
+"|+    c #3FB83D",\r
+"1+    c #6DC36B",\r
+"2+    c #7BCD7A",\r
+"3+    c #1DB419",\r
+"4+    c #34B731",\r
+"5+    c #35B732",\r
+"6+    c #4BBE49",\r
+"7+    c #7BCC7A",\r
+"8+    c #46BB44",\r
+"9+    c #41BD3F",\r
+"0+    c #4CBE4A",\r
+"a+    c #88CF88",\r
+"b+    c #E7EBE9",\r
+"c+    c #9BD39B",\r
+"d+    c #34BA31",\r
+"e+    c #25B121",\r
+"f+    c #1FB11C",\r
+"g+    c #23B21F",\r
+"h+    c #25B421",\r
+"i+    c #39B836",\r
+"j+    c #44BC41",\r
+"k+    c #BDDBBE",\r
+"l+    c #9FD29F",\r
+"m+    c #BADBBB",\r
+"n+    c #D3E2D5",\r
+"o+    c #E3E6E5",\r
+"p+    c #E0E5E2",\r
+"q+    c #DAE4DC",\r
+"r+    c #E1E6E3",\r
+"s+    c #E2E7E4",\r
+"t+    c #84CD83",\r
+"u+    c #53C351",\r
+"v+    c #76CB75",\r
+"w+    c #90CF8F",\r
+"x+    c #99D399",\r
+"y+    c #92D291",\r
+"z+    c #BBDFBC",\r
+"A+    c #CEE4CF",\r
+"B+    c #E5EAE6",\r
+"C+    c #A7D7A7",\r
+"D+    c #19B216",\r
+"E+    c #1CAE19",\r
+"F+    c #34B831",\r
+"G+    c #27B323",\r
+"H+    c #15AF11",\r
+"I+    c #42BB40",\r
+"J+    c #34B032",\r
+"K+    c #8ECA8D",\r
+"L+    c #81CD80",\r
+"M+    c #18BA15",\r
+"N+    c #1DB319",\r
+"O+    c #1EB31A",\r
+"P+    c #38BA35",\r
+"Q+    c #10B20C",\r
+"R+    c #4EBD4C",\r
+"S+    c #57BA54",\r
+"T+    c #B0D5B1",\r
+"U+    c #BDDCBE",\r
+"V+    c #48B945",\r
+"W+    c #65C464",\r
+"X+    c #8BCE8A",\r
+"Y+    c #08AE04",\r
+"Z+    c #16AD13",\r
+"`+    c #3BB739",\r
+" @    c #C9DECB",\r
+".@    c #DEE4E0",\r
+"+@    c #A2D4A1",\r
+"@@    c #25B522",\r
+"#@    c #37BD36",\r
+"$@    c #A6D5A6",\r
+"%@    c #16B113",\r
+"&@    c #5AC258",\r
+"*@    c #DBE3DC",\r
+"=@    c #D1E1D3",\r
+"-@    c #2BB929",\r
+";@    c #2BB429",\r
+">@    c #56BA54",\r
+",@    c #27B424",\r
+"'@    c #37BB34",\r
+")@    c #91D291",\r
+"!@    c #E9EBEB",\r
+"~@    c #EEEEEF",\r
+"{@    c #747575",\r
+"]@    c #D2E0D3",\r
+"^@    c #34B832",\r
+"/@    c #29B626",\r
+"(@    c #61C75F",\r
+"_@    c #6BC56A",\r
+":@    c #2DB429",\r
+"<@    c #A3D6A3",\r
+"[@    c #D4D5D6",\r
+"}@    c #C9C9CA",\r
+"|@    c #C9CACA",\r
+"1@    c #E4E5E5",\r
+"2@    c #D8D9D9",\r
+"3@    c #CCCCCD",\r
+"4@    c #CCCDCD",\r
+"5@    c #CDCDCE",\r
+"6@    c #CFD0D0",\r
+"7@    c #D9E3DB",\r
+"8@    c #71C770",\r
+"9@    c #33B630",\r
+"0@    c #33B830",\r
+"a@    c #14A911",\r
+"b@    c #36B733",\r
+"c@    c #CFE1D0",\r
+"d@    c #D3D4D5",\r
+"e@    c #676767",\r
+"f@    c #333333",\r
+"g@    c #252525",\r
+"h@    c #303030",\r
+"i@    c #474747",\r
+"j@    c #B6B7B7",\r
+"k@    c #707070",\r
+"l@    c #272727",\r
+"m@    c #A3A3A3",\r
+"n@    c #E4E6E6",\r
+"o@    c #C0DEC1",\r
+"p@    c #80CC80",\r
+"q@    c #58BF56",\r
+"r@    c #51BE4E",\r
+"s@    c #47BB44",\r
+"t@    c #D1E3D2",\r
+"u@    c #EBEBEC",\r
+"v@    c #686969",\r
+"w@    c #252626",\r
+"x@    c #2E2E2E",\r
+"y@    c #646464",\r
+"z@    c #4F4F4F",\r
+"A@    c #2D2D2D",\r
+"B@    c #717171",\r
+"C@    c #363636",\r
+"D@    c #656666",\r
+"E@    c #666666",\r
+"F@    c #454545",\r
+"G@    c #262626",\r
+"H@    c #C4C5C5",\r
+"I@    c #E5E6E6",\r
+"J@    c #ADD9AD",\r
+"K@    c #47BC44",\r
+"L@    c #4DBA4B",\r
+"M@    c #77C876",\r
+"N@    c #BADCBB",\r
+"O@    c #E1E2E3",\r
+"P@    c #3E3E3E",\r
+"Q@    c #7E7E7F",\r
+"R@    c #C0C1C1",\r
+"S@    c #C7C8C8",\r
+"T@    c #717172",\r
+"U@    c #5B5B5C",\r
+"V@    c #D7D7D8",\r
+"W@    c #3C3C3C",\r
+"X@    c #8E8F8F",\r
+"Y@    c #DFE6E0",\r
+"Z@    c #85CE84",\r
+"`@    c #6AC669",\r
+" #    c #52BC50",\r
+".#    c #4AC048",\r
+"+#    c #DAE7DB",\r
+"@#    c #404041",\r
+"##    c #565656",\r
+"$#    c #B9BABA",\r
+"%#    c #DCDDDE",\r
+"&#    c #717272",\r
+"*#    c #E0E1E1",\r
+"=#    c #424242",\r
+"-#    c #8C8D8D",\r
+";#    c #C2E0C3",\r
+">#    c #5DC65B",\r
+",#    c #3EBC3B",\r
+"'#    c #4EBE4C",\r
+")#    c #A8D8A8",\r
+"!#    c #878788",\r
+"~#    c #282828",\r
+"{#    c #2C2C2C",\r
+"]#    c #535353",\r
+"^#    c #949494",\r
+"/#    c #727272",\r
+"(#    c #838484",\r
+"_#    c #848585",\r
+":#    c #575757",\r
+"<#    c #292929",\r
+"[#    c #E8EAEA",\r
+"}#    c #8AD289",\r
+"|#    c #3EB93B",\r
+"1#    c #8CD08C",\r
+"2#    c #4DBF4B",\r
+"3#    c #E0EAE1",\r
+"4#    c #A5A5A5",\r
+"5#    c #5D5D5D",\r
+"6#    c #3A3A3A",\r
+"7#    c #2B2B2B",\r
+"8#    c #787878",\r
+"9#    c #888989",\r
+"0#    c #D9E7DA",\r
+"a#    c #65C563",\r
+"b#    c #3BB838",\r
+"c#    c #35B832",\r
+"d#    c #C9E4CA",\r
+"e#    c #606060",\r
+"f#    c #727373",\r
+"g#    c #4A4A4A",\r
+"h#    c #B3B4B4",\r
+"i#    c #B4B5B5",\r
+"j#    c #B8B9B9",\r
+"k#    c #EBEDED",\r
+"l#    c #E7EBE8",\r
+"m#    c #7CCC7A",\r
+"n#    c #28B324",\r
+"o#    c #ADDBAD",\r
+"p#    c #959696",\r
+"q#    c #E1E2E2",\r
+"r#    c #E9E9EA",\r
+"s#    c #838383",\r
+"t#    c #DFE0E0",\r
+"u#    c #AFDCAF",\r
+"v#    c #45BE43",\r
+"w#    c #67C865",\r
+"x#    c #E8EEE9",\r
+"y#    c #414242",\r
+"z#    c #4E4E4E",\r
+"A#    c #494949",\r
+"B#    c #2A2A2A",\r
+"C#    c #E7E7E8",\r
+"D#    c #98D597",\r
+"E#    c #3CBA39",\r
+"F#    c #C2E2C2",\r
+"G#    c #9A9B9B",\r
+"H#    c #414141",\r
+"I#    c #313232",\r
+"J#    c #E3E4E4",\r
+"K#    c #7A7A7A",\r
+"L#    c #616262",\r
+"M#    c #ECEFEE",\r
+"N#    c #A7DBA7",\r
+"O#    c #C9E5C9",\r
+"P#    c #EEF0F0",\r
+"Q#    c #F2F4F4",\r
+"R#    c #D1D1D1",\r
+"S#    c #D0D1D1",\r
+"T#    c #EEEEEE",\r
+"U#    c #DCDDDD",\r
+"V#    c #DBDBDB",\r
+"W#    c #DBDCDC",\r
+"X#    c #DADBDB",\r
+"Y#    c #CECFCF",\r
+"Z#    c #929292",\r
+"`#    c #B3B3B3",\r
+" $    c #888888",\r
+".$    c #A5A6A6",\r
+"+$    c #9C9C9C",\r
+"@$    c #989999",\r
+"#$    c #646565",\r
+"$$    c #818181",\r
+"%$    c #686868",\r
+"&$    c #ACACAC",\r
+"*$    c #B5B6B6",\r
+"=$    c #8B8B8B",\r
+"-$    c #C9C9C9",\r
+";$    c #A7A7A7",\r
+">$    c #AEAFAF",\r
+",$    c #8B8C8C",\r
+"'$    c #939393",\r
+")$    c #B0B1B1",\r
+"!$    c #868686",\r
+"~$    c #707171",\r
+"{$    c #949595",\r
+"]$    c #7C7D7D",\r
+"^$    c #9F9F9F",\r
+"/$    c #999A9A",\r
+"($    c #CACBCB",\r
+"_$    c #909191",\r
+":$    c #B2B2B2",\r
+"<$    c #6B6B6B",\r
+"[$    c #CACACA",\r
+"}$    c #858585",\r
+"|$    c #979898",\r
+"1$    c #919292",\r
+"2$    c #BCBDBD",\r
+"3$    c #939494",\r
+"4$    c #C1C2C2",\r
+"5$    c #B2B3B3",\r
+"6$    c #BFC0C0",\r
+"7$    c #C2C3C3",\r
+"8$    c #A7A8A8",\r
+"9$    c #C4C4C4",\r
+"0$    c #ABACAC",\r
+"a$    c #C3C4C4",\r
+"b$    c #B1B2B2",\r
+"c$    c #CDCECE",\r
+"d$    c #BDBEBE",\r
+"e$    c #585858",\r
+"f$    c #797A7A",\r
+"g$    c #979797",\r
+"h$    c #AFAFAF",\r
+"i$    c #9D9D9D",\r
+"j$    c #989898",\r
+"k$    c #969696",\r
+"l$    c #959595",\r
+"m$    c #9A9A9A",\r
+"                                                                                                ",\r
+"                                                                                                ",\r
+"        . + @ # $ % & * = - - ; ; > , , ' ' ) ! ~ ~ { { ] ] ^ ^ / / ( ( _ ( / ] : ~ ) <         ",\r
+"      ! [ } | | | 1 1 2 2 3 3 4 4 5 6 7 7 8 9 0 0 a b c d d e f g g h h i j k j i h h l m       ",\r
+"      [ n n n o o p p q r | s 1 2 2 3 t 4 5 6 u 7 v 8 w a a c c d d f f g g f f d d c c x y     ",\r
+"    z 1 n o o p p q q | | 1 A 2 B 3 t 4 6 u 7 v 8 w 0 a c c d d f f g g f f d d c c b a 0 C     ",\r
+"    D E o F p G q | | 1 1 2 B H I J K L M N v w w a b c O d f f g g f f P d O c c a a 0 w Q     ",\r
+"    z 1 p p q r | s 1 2 2 3 t R S T U V W X Y Z ` c c d e f g g f f  .d d c c a a 0 w 8 v ..    ",\r
+"    z 2 q r | s 1 A 2 3 +.@.#.$.%.&.*.=.-.;.>.,.'.).d f f g g f f d d c c b a 0 w 8 v v 7 !.    ",\r
+"    z 3 | | 1 A 2 B ~.{.].^./.(._.:.<.[.}.|.1.2.3.4.5.6.g f f d d O c b a a w w 8 v 7 u 6 7.    ",\r
+"    z 4 1 1 2 B 3 t 8.9.0.a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.f d d c c a a 0 w 8 v 7 u u 6 5 Q     ",\r
+"    z 5 2 2 3 t 4 q.r.s.t.u.v.w.x.y.z.A.B.C.D.E.F.G.H.I.J.K.L.M.N.a 0 w 8 v 7 7 u 6 5 4 t Q     ",\r
+"    z u B 3 t 4 6 O.P.Q.R.S.T.U.V.W.X.Y.Z.`. +.+++@+#+$+%+&+*+=+-+;+Z >+8 7 u 6 5 4 4 t 3 Q     ",\r
+"    z v t 4 5 6 u ,+'+)+!+~+{+]+^+/+(+_+:+<+[+}+|+1+2+3+4+5+6+7+8+9+0+a+b+6 5 5 4 t 3 B 2 Q     ",\r
+"    z 9 5 6 u 7 v c+d+e+f+g+h+i+j+k+l+m+n+o+o+p+q+r+s+t+u+v+w+x+y+z+A+B+6 5 4 t 3 B 2 2 1 Q     ",\r
+"    z 0 u 7 v 8 w C+D+E+F+G+H+I+J+K+g f f d d c c b a 0 w 8 v v 7 u 6 5 4 t 3 3 B 2 A 1 s Q     ",\r
+"    z b v 8 w 0 a L+M+N+O+P+Q+R+S+T+f d d c c b a a w w 8 v 7 u 6 5 4 4 t 3 B 2 A 1 1 | | Q     ",\r
+"    z c w 0 a b U+V+W+X+Y+Z+`+ @.@d d O c c a a w w 8 v 7 u 6 5 5 4 t 3 B 2 A 1 1 | | r q Q     ",\r
+"    z d a a c c +@@@#@$@%@&@*@f d d c c a a 0 w 8 v 7 u u 6 5 4 t 3 B 2 2 A 1 s | r q q p Q     ",\r
+"    z f c c d d =@-@;@>@,@'@)@d c c b a 0 w !@v v 7 u 6 5 4 t ~@3 B 2 A 1 s | | q q p p F {@    ",\r
+"    z g d d f f ]@^@/@(@_@:@<@c b a a w w 8 a [@}@|@[@1@4 t 3 2@3@4@4@5@6@O q q G p p o o C     ",\r
+"    z h f f g g 7@8@9@0@a@b@c@a a w w 8 v d@e@f@g@g@h@i@j@B 2 k@g@g@g@g@l@i@m@p p o o n n C     ",\r
+"    z i n@g f f f o@p@q@r@s@t@0 w 8 v u@7 v@w@x@D y@z@A@$ 2 A B@g@C@D@E@F@G@A@H@o o n n n C     ",\r
+"    z k I@f f d d J@K@v.L@M@N@w v v 7 u O@P@g@Q@8 3 g R@S@1 s T@g@U@s p V@W@g@X@n n n n o {@    ",\r
+"    z i f d d O c Y@Z@`@ #.#+#v 7 u 6 5 h @#g@##$#%#O A 1 | | &#g@D | p *#=#g@-#n n n o o {@    ",\r
+"    z h d d c c a a ;#>#,#'#)#u u 6 5 4 t !#~#g@{#C@]#^#I@r q /#g@P@(#_#:#l@<#$#n o o o o {@    ",\r
+"    z 1@c c b a 0 w [#}#|#1#2#3#5 4 t 3 B O 4#5#6#7#g@l@8#2 p /#g@g@g@g@G@C@9#q o o o o o {@    ",\r
+"    z I@b a a w w 8 v 0#a#b#c#d#4 3 3 B 2 A 1 s P S@e#g@6#[ p f#g@g#h#i#j#*#o o o o o o p Q     ",\r
+"    z f a w w 8 v 7 u k#l#m#n#o#3 B 2 A a p#R@q#6 r#s#g@W@t#o f#g@D p n o o o o o o p p p Q     ",\r
+"    z P w 8 v 7 u u 6 5 4 u#v#w#x#2 A 1 8 y#{#=#z#A#B#g@k@q n ..g@D q o o o o o o p p p p Q     ",\r
+"    z C#v v 7 u 6 5 4 t 3 ~.D#E#F#1 s | A G#y H#I#I#F@s#J#n n K#I#L#q o o o o p p p p p p Q     ",\r
+"    z c 7 u 6 5 4 4 ~@3 B 2 M#N#O#| | r q G 4 I@*#q#O o n n o b *#I@o o o p p p p p p p q Q     ",\r
+"    z a 6 5 5 4 t 3 B 2 2 1 1 s | r q G p p o o n n n n n o o o o o o p p p p p p p q q q Q     ",\r
+"    z 0 5 4 t 3 B 2 2 1 1 s | | q q p p o o o n n n n o o o o o o o p p p p p p p q q q q Q     ",\r
+"    z 8 t 3 3 P#2 A 1 s | | q q Q#p F o o n n n n o o o o o o o p p p p p p p q q q q q q Q     ",\r
+"    z v 3 B 2 A 1 1 | | r q G p p o o n n n n o o o o o o o p p p p p p p q q q q q q q | Q     ",\r
+"    z 7 2 2 1 1 | | r q q p p o o n n n n n o o o o o o o p p p p p p q q q q q q q | | | Q     ",\r
+"    z 6 1 1 s | | q q J#V@h p 8 0 R#0 S#b 1@T#U#S#b 0 p t#V#J#4 W#6@X#Y#J#q q q | | | | | Q     ",\r
+"    z 5 s | | q q G 1 Z#$ -#|@`# $.$8#+$@$#$$$%$&$ $*$3 =$_#^#-$%$;$>$,$'$2 | | | | | | | Q     ",\r
+"    z 4 | r q G p p p )$!$= ~$'$(#{$%$]$^$/$($%$_$; :$O /#<$[$}$|$1$2$Q 3$1 | | | | | 1 1 Q     ",\r
+"    z 3 q q p p o o o 4$.$b 5$6$7$7 8$l 4$9$*#;$7 t#Z#0$4$a$8 4$a$|$b$c$d$1 | | | | 1 1 1 Q     ",\r
+"    D 3 p p F o o n n n n o o o o o o o p p p p p p q q q q q q q | | | | | | | 1 1 1 1 1 C     ",\r
+"    e$2 p o o n n n n o o o o o o o p p p p p p p q q q q q q q | | | | | | 1 1 1 1 1 1 1 f$    ",\r
+"      I@o n n n n n o o o o o o p p p p p p p q q q q q q q | | | | | | 1 1 1 1 1 1 1 2 8       ",\r
+"      g$6 n n n o o o o o o o p p p p p p q q q q q q q | | | | | | | 1 1 1 1 1 1 2 2 7 h$      ",\r
+"          i$; ^$^$! i$_ j$k$p#p#l$l$l$l$l$l$l$l$l$l$l${${${${${$l$l$l$g$m$+$i$! ~ ! i$          ",\r
+"                                                                                                ",\r
+"                                                                                                "};\r
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Win32/heading.bmp b/instrumentation/next-share/BaseLib/Player/Build/Win32/heading.bmp
new file mode 100644 (file)
index 0000000..7bdbfcd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Player/Build/Win32/heading.bmp differ
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Win32/setuptriblerplay.py b/instrumentation/next-share/BaseLib/Player/Build/Win32/setuptriblerplay.py
new file mode 100644 (file)
index 0000000..068ba57
--- /dev/null
@@ -0,0 +1,52 @@
+# setup.py
+import sys
+import os
+
+try:
+    import py2exe.mf as modulefinder
+    import win32com
+    for p in win32com.__path__[1:]:
+        modulefinder.AddPackagePath("win32com", p)
+    for extra in ["win32com.shell"]:
+        __import__(extra)
+        m = sys.modules[extra]
+        for p in m.__path__[1:]:
+            modulefinder.AddPackagePath(extra, p)
+except ImportError:
+    pass
+
+from distutils.core import setup
+import py2exe
+
+from BaseLib.__init__ import LIBRARYNAME
+
+################################################################
+#
+# Setup script used for py2exe
+#
+# *** Important note: ***
+# Setting Python's optimize flag when building disables
+# "assert" statments, which are used throughout the
+# BitTornado core for error-handling.
+#
+################################################################
+
+mainfile = os.path.join(LIBRARYNAME,'Player','swarmplayer.py')
+progicofile = os.path.join(LIBRARYNAME,'Images','SwarmPlayerIcon.ico')
+
+target_player = {
+    "script": mainfile,
+    "icon_resources": [(1, progicofile)],
+}
+
+
+setup(
+#    (Disabling bundle_files for now -- apparently causes some issues with Win98)
+#    options = {"py2exe": {"bundle_files": 1}},
+#    zipfile = None,
+    options = {"py2exe": {"packages": [LIBRARYNAME+".Core","encodings"],"optimize": 2}},
+    data_files = [("installdir",[])], 
+    windows = [target_player],
+)
+
+#data_files = [("installdir", [manifest, nsifile, progicofile, toricofile, "binary-LICENSE.txt", "readme.txt"])],
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Win32/swarmplayer.exe.manifest b/instrumentation/next-share/BaseLib/Player/Build/Win32/swarmplayer.exe.manifest
new file mode 100644 (file)
index 0000000..78c537e
--- /dev/null
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\r
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">\r
+       <assemblyIdentity\r
+          version="1.0.0.0"\r
+          processorArchitecture="X86"\r
+          name="Microsoft.Winweb.SwarmPlayer"\r
+          type="win32"\r
+       />\r
+       <description>My Manifest Testing application</description>\r
+       <dependency>\r
+          <dependentAssembly>\r
+            <assemblyIdentity\r
+              type="win32"\r
+              name="Microsoft.Windows.Common-Controls"\r
+              version="6.0.0.0"\r
+              processorArchitecture="X86"\r
+              publicKeyToken="6595b64144ccf1df"\r
+              language="*"\r
+            />\r
+          </dependentAssembly>\r
+       </dependency>\r
+</assembly>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Player/Build/Win32/triblerplay.nsi b/instrumentation/next-share/BaseLib/Player/Build/Win32/triblerplay.nsi
new file mode 100644 (file)
index 0000000..e10c485
--- /dev/null
@@ -0,0 +1,218 @@
+!define PRODUCT "SwarmPlayer"\r
+!define VERSION "1.1.0"\r
+!define LIBRARYNAME "BaseLib"\r
+\r
+\r
+!include "MUI.nsh"\r
+\r
+;--------------------------------\r
+;Configuration\r
+\r
+;General\r
+ Name "${PRODUCT} ${VERSION}"\r
+OutFile "${PRODUCT}_${VERSION}.exe"\r
+\r
+;Folder selection page\r
+InstallDir "$PROGRAMFILES\${PRODUCT}"\r
\r
+;Remember install folder\r
+InstallDirRegKey HKCU "Software\${PRODUCT}" ""\r
+\r
+;\r
+; Uncomment for smaller file size\r
+;\r
+SetCompressor "lzma"\r
+;\r
+; Uncomment for quick built time\r
+;\r
+;SetCompress "off"\r
+\r
+CompletedText "Installation completed. Thank you for choosing ${PRODUCT}"\r
+\r
+BrandingText "${PRODUCT}"\r
+\r
+;--------------------------------\r
+;Modern UI Configuration\r
+\r
+!define MUI_ABORTWARNING\r
+!define MUI_HEADERIMAGE\r
+!define MUI_HEADERIMAGE_BITMAP "heading.bmp"\r
+\r
+;--------------------------------\r
+;Pages\r
+\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept"\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline"\r
+;   !define MUI_FINISHPAGE_RUN "$INSTDIR\swarmplayer.exe"\r
+\r
+!insertmacro MUI_PAGE_LICENSE "binary-LICENSE.txt"\r
+!insertmacro MUI_PAGE_COMPONENTS\r
+!insertmacro MUI_PAGE_DIRECTORY\r
+!insertmacro MUI_PAGE_INSTFILES\r
+!insertmacro MUI_PAGE_FINISH\r
+\r
+!insertmacro MUI_UNPAGE_CONFIRM\r
+!insertmacro MUI_UNPAGE_INSTFILES\r
+\r
+;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp"\r
+\r
+;--------------------------------\r
+;Languages\r
+\r
+!insertmacro MUI_LANGUAGE "English"\r
\r
+;--------------------------------\r
+;Language Strings\r
+\r
+;Description\r
+LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}"\r
+LangString DESC_SecDesk ${LANG_ENGLISH} "Create Desktop Shortcuts"\r
+LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts"\r
+LangString DESC_SecDefaultTStream ${LANG_ENGLISH} "Associate .tstream files with ${PRODUCT}"\r
+LangString DESC_SecDefaultTorrent ${LANG_ENGLISH} "Associate .torrent files with ${PRODUCT}"\r
+\r
+;--------------------------------\r
+;Installer Sections\r
+\r
+Section "!Main EXE" SecMain\r
+ SectionIn RO\r
+ SetOutPath "$INSTDIR"\r
+ File *.txt\r
+ File swarmplayer.exe.manifest\r
+ File swarmplayer.exe\r
+ File ffmpeg.exe\r
+ File /r vlc\r
+ File *.bat\r
+ Delete "$INSTDIR\*.pyd"\r
+ File *.pyd\r
+ Delete "$INSTDIR\python*.dll"\r
+ Delete "$INSTDIR\wx*.dll"\r
+ File *.dll\r
+ Delete "$INSTDIR\*.zip"\r
+ File *.zip\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}"\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}\Core"\r
+ SetOutPath "$INSTDIR\${LIBRARYNAME}\Core"\r
+ File ${LIBRARYNAME}\Core\*.txt\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}\Core\Statistics"\r
+ SetOutPath "$INSTDIR\${LIBRARYNAME}\Core\Statistics"\r
+ File ${LIBRARYNAME}\Core\Statistics\*.txt\r
+ File ${LIBRARYNAME}\Core\Statistics\*.sql\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}\Images"\r
+ SetOutPath "$INSTDIR\${LIBRARYNAME}\Images"\r
+ File ${LIBRARYNAME}\Images\*.*\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}\Video"\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}\Video\Images"\r
+ SetOutPath "$INSTDIR\${LIBRARYNAME}\Video\Images"\r
+ File ${LIBRARYNAME}\Video\Images\*.*\r
+ CreateDirectory "$INSTDIR\${LIBRARYNAME}\Lang"\r
+ SetOutPath "$INSTDIR\${LIBRARYNAME}\Lang"\r
+ IfFileExists user.lang userlang\r
+ File ${LIBRARYNAME}\Lang\*.*\r
+ userlang:\r
+ File /x user.lang ${LIBRARYNAME}\Lang\*.*\r
+ SetOutPath "$INSTDIR"\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)"\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe"\r
+\r
+; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user\r
+ DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+; Remove old error log if present\r
+ Delete "$INSTDIR\swarmplayer.exe.log"\r
+\r
+ WriteUninstaller "$INSTDIR\Uninstall.exe"\r
+\r
+ ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled\r
+ SimpleFC::AddApplication "Tribler" "$INSTDIR\${PRODUCT}.exe" 0 2 "" 1\r
+ ; Pop $0 ; return error(1)/success(0)\r
+\r
+SectionEnd\r
+\r
+\r
+Section "Desktop Icons" SecDesk\r
+   CreateShortCut "$DESKTOP\${PRODUCT}.lnk" "$INSTDIR\${PRODUCT}.exe" ""\r
+SectionEnd\r
+\r
+\r
+Section "Startmenu Icons" SecStart\r
+   CreateDirectory "$SMPROGRAMS\${PRODUCT}"\r
+   CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0\r
+   CreateShortCut "$SMPROGRAMS\${PRODUCT}\${PRODUCT}.lnk" "$INSTDIR\${PRODUCT}.exe" "" "$INSTDIR\${PRODUCT}.exe" 0\r
+SectionEnd\r
+\r
+\r
+Section "Make Default For .tstream" SecDefaultTStream\r
+   WriteRegStr HKCR .tstream "" tstream\r
+   WriteRegStr HKCR .tstream "Content Type" application/x-tribler-stream\r
+   WriteRegStr HKCR "MIME\Database\Content Type\application/x-tribler-stream" Extension .tstream\r
+   WriteRegStr HKCR tstream "" "TSTREAM File"\r
+   WriteRegBin HKCR tstream EditFlags 00000100\r
+   WriteRegStr HKCR "tstream\shell" "" open\r
+   WriteRegStr HKCR "tstream\shell\open\command" "" '"$INSTDIR\${PRODUCT}.exe" "%1"'\r
+   WriteRegStr HKCR "tstream\DefaultIcon" "" "$INSTDIR\${LIBRARYNAME}\Images\SwarmPlayerIcon.ico"\r
+SectionEnd\r
+\r
+\r
+Section /o "Make Default For .torrent" SecDefaultTorrent\r
+   ; Delete ddeexec key if it exists\r
+   DeleteRegKey HKCR "bittorrent\shell\open\ddeexec"\r
+   WriteRegStr HKCR .torrent "" bittorrent\r
+   WriteRegStr HKCR .torrent "Content Type" application/x-bittorrent\r
+   WriteRegStr HKCR "MIME\Database\Content Type\application/x-bittorrent" Extension .torrent\r
+   WriteRegStr HKCR bittorrent "" "TORRENT File"\r
+   WriteRegBin HKCR bittorrent EditFlags 00000100\r
+   WriteRegStr HKCR "bittorrent\shell" "" open\r
+   WriteRegStr HKCR "bittorrent\shell\open\command" "" '"$INSTDIR\${PRODUCT}.exe" "%1"'\r
+   WriteRegStr HKCR "bittorrent\DefaultIcon" "" "$INSTDIR\${LIBRARYNAME}\Images\torrenticon.ico"\r
+SectionEnd\r
+\r
+\r
+\r
+;--------------------------------\r
+;Descriptions\r
+\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecDesk} $(DESC_SecDesk)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart)\r
+;!insertmacro MUI_DESCRIPTION_TEXT ${SecLang} $(DESC_SecLang)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecDefaultTStream} $(DESC_SecDefaultTStream)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecDefaultTorrent} $(DESC_SecDefaultTorrent)\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_END\r
+\r
+;--------------------------------\r
+;Uninstaller Section\r
+\r
+Section "Uninstall"\r
+\r
+ RMDir /r "$INSTDIR"\r
+\r
+ Delete "$DESKTOP\${PRODUCT}.lnk"\r
+ Delete "$SMPROGRAMS\${PRODUCT}\*.*"\r
+ RmDir  "$SMPROGRAMS\${PRODUCT}"\r
+\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+\r
+ ; Remove an application from the firewall exception list\r
+ SimpleFC::RemoveApplication "$INSTDIR\${PRODUCT}.exe"\r
+ ; Pop $0 ; return error(1)/success(0)\r
+\r
+SectionEnd\r
+\r
+\r
+;--------------------------------\r
+;Functions Section\r
+\r
+Function .onInit\r
+  System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SwarmPlayer") i .r1 ?e' \r
+\r
+  Pop $R0 \r
+\r
+  StrCmp $R0 0 +3 \r
+\r
+  MessageBox MB_OK "The installer is already running."\r
+\r
+  Abort \r
+FunctionEnd\r
diff --git a/instrumentation/next-share/BaseLib/Player/EmbeddedPlayer4Frame.py b/instrumentation/next-share/BaseLib/Player/EmbeddedPlayer4Frame.py
new file mode 100644 (file)
index 0000000..5077179
--- /dev/null
@@ -0,0 +1,494 @@
+# Written by Fabian van der Werf and Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# EmbeddedPlayerPanel is the panel used in Tribler 5.0\r
+# EmbeddedPlayer4FramePanel is the panel used in the SwarmPlayer / 4.5\r
+# \r
+\r
+import wx\r
+import sys\r
+\r
+import os, shutil\r
+import time\r
+import random\r
+from time import sleep\r
+from tempfile import mkstemp\r
+from threading import currentThread,Event, Thread\r
+from traceback import print_stack,print_exc\r
+from textwrap import wrap\r
+\r
+from BaseLib.__init__ import LIBRARYNAME\r
+from BaseLib.Video.defs import *\r
+from BaseLib.Video.Progress import ProgressSlider, VolumeSlider\r
+from BaseLib.Video.Buttons import PlayerSwitchButton, PlayerButton\r
+from BaseLib.Video.VideoFrame import DelayTimer\r
+\r
+DEBUG = False\r
+\r
+class EmbeddedPlayer4FramePanel(wx.Panel):\r
+    """\r
+    The Embedded Player consists of a VLCLogoWindow and the media controls such \r
+    as Play/Pause buttons and Volume Control.\r
+    """\r
+\r
+    def __init__(self, parent, utility, vlcwrap, logopath):\r
+        wx.Panel.__init__(self, parent, -1)\r
+        self.utility = utility\r
+\r
+        self.estduration = None\r
+\r
+        #self.SetBackgroundColour(wx.WHITE)\r
+        self.SetBackgroundColour(wx.BLACK)\r
+        mainbox = wx.BoxSizer(wx.VERTICAL)\r
+\r
+\r
+        if vlcwrap is None:\r
+            size = (320,64)\r
+        else:\r
+            size = (320,240) \r
+        \r
+        self.vlcwin = VLCLogoWindow(self,size,vlcwrap,logopath, animate = False)\r
+        self.vlcwrap = vlcwrap\r
+\r
+        # Arno: until we figure out how to show in-playback prebuffering info\r
+        self.statuslabel = wx.StaticText(self, -1, 'Loading player...' )\r
+        self.statuslabel.SetForegroundColour(wx.WHITE)\r
+\r
+        if vlcwrap is not None:\r
+            ctrlsizer = wx.BoxSizer(wx.HORIZONTAL)        \r
+            #self.slider = wx.Slider(self, -1)\r
+            self.slider = ProgressSlider(self, self.utility, imgprefix='4frame')\r
+            self.slider.SetRange(0,1)\r
+            self.slider.SetValue(0)\r
+            self.oldvolume = None\r
+            \r
+                            \r
+            self.ppbtn = PlayerSwitchButton(self, os.path.join(self.utility.getPath(), LIBRARYNAME, 'Images'), 'pause', 'play')\r
+            self.ppbtn.Bind(wx.EVT_LEFT_UP, self.PlayPause)\r
+    \r
+            self.volumebox = wx.BoxSizer(wx.HORIZONTAL)\r
+            self.volumeicon = PlayerSwitchButton(self, os.path.join(self.utility.getPath(), LIBRARYNAME, 'Images'), 'volume', 'mute')   \r
+            self.volumeicon.Bind(wx.EVT_LEFT_UP, self.Mute)\r
+            self.volume = VolumeSlider(self, self.utility, imgprefix='4frame')\r
+            self.volume.SetRange(0, 100)\r
+            self.volumebox.Add(self.volumeicon, 0, wx.ALIGN_CENTER_VERTICAL)\r
+            self.volumebox.Add(self.volume, 0, wx.ALIGN_CENTER_VERTICAL, 0)\r
+    \r
+            self.fsbtn = PlayerButton(self, os.path.join(self.utility.getPath(), LIBRARYNAME, 'Images'), 'fullScreen')\r
+            self.fsbtn.Bind(wx.EVT_LEFT_UP, self.FullScreen)\r
+    \r
+            self.save_button = PlayerSwitchButton(self, os.path.join(self.utility.getPath(), LIBRARYNAME, 'Images'), 'saveDisabled', 'save')   \r
+            self.save_button.Bind(wx.EVT_LEFT_UP, self.Save)\r
+            self.save_callback = lambda:None\r
+            \r
+            ctrlsizer.Add(self.ppbtn, 0, wx.ALIGN_CENTER_VERTICAL)\r
+            ctrlsizer.Add(self.slider, 1, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)\r
+            ctrlsizer.Add(self.volumebox, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)\r
+            ctrlsizer.Add(self.fsbtn, 0, wx.ALIGN_CENTER_VERTICAL)\r
+            ctrlsizer.Add(self.save_button, 0, wx.ALIGN_CENTER_VERTICAL)\r
+        \r
+        mainbox.Add(self.vlcwin, 1, wx.EXPAND, 1)\r
+        mainbox.Add(self.statuslabel, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, 30)\r
+        if vlcwrap is not None:\r
+            mainbox.Add(ctrlsizer, 0, wx.ALIGN_BOTTOM|wx.EXPAND, 1)\r
+        self.SetSizerAndFit(mainbox)\r
+        \r
+        self.playtimer = None\r
+        self.update = False\r
+        self.timer = None\r
+        \r
+    def Load(self,url,streaminfo = None):\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: Load:",url,streaminfo,currentThread().getName()\r
+        # Arno: hack: disable dragging when not playing from file.\r
+        #if url is None or url.startswith('http:'):\r
+        #if url is not None and url.startswith('http:'):\r
+        #   self.slider.DisableDragging()\r
+        #else:\r
+        self.slider.EnableDragging()\r
+        self.SetPlayerStatus('')\r
+        if streaminfo is not None:\r
+            self.estduration = streaminfo.get('estduration',None)\r
+\r
+        # Arno, 2008-10-17: If we don't do this VLC gets the wrong playlist somehow\r
+        self.vlcwrap.stop()\r
+        self.vlcwrap.playlist_clear()\r
+             \r
+        self.vlcwrap.load(url,streaminfo=streaminfo)\r
+        \r
+        # Enable update of progress slider\r
+        self.update = True\r
+        wx.CallAfter(self.slider.SetValue,0)\r
+        if self.timer is None:\r
+            self.timer = wx.Timer(self)\r
+            self.Bind(wx.EVT_TIMER, self.UpdateSlider)\r
+        self.timer.Start(200)\r
+        \r
+    def StartPlay(self):\r
+        """ Start playing the new item after VLC has stopped playing the old\r
+        one\r
+        """\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: PlayWhenStopped"\r
+        self.playtimer = DelayTimer(self)\r
+\r
+    def Play(self, evt=None):\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: Play pressed"\r
+\r
+        if self.GetState() != MEDIASTATE_PLAYING:\r
+            self.ppbtn.setToggled(False)\r
+            self.vlcwrap.start()\r
+\r
+    def Pause(self, evt=None):\r
+        """ Toggle between playing and pausing of current item """\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: Pause pressed"\r
+        \r
+        if self.GetState() == MEDIASTATE_PLAYING:\r
+            self.ppbtn.setToggled(True)\r
+            self.vlcwrap.pause()\r
+\r
+\r
+    def PlayPause(self, evt=None):\r
+        """ Toggle between playing and pausing of current item """\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: PlayPause pressed"\r
+        \r
+        if self.GetState() == MEDIASTATE_PLAYING:\r
+            self.ppbtn.setToggled(True)\r
+            self.vlcwrap.pause()\r
+\r
+        else:\r
+            self.ppbtn.setToggled(False)\r
+            self.vlcwrap.resume()\r
+\r
+\r
+    def Seek(self, evt=None):\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: Seek"\r
+        \r
+        oldsliderpos = self.slider.GetValue()\r
+        print >>sys.stderr, 'embedplay: Seek: GetValue returned,',oldsliderpos\r
+        pos = int(oldsliderpos * 1000.0)\r
+        print >>sys.stderr, 'embedplay: Seek: newpos',pos\r
+        \r
+        try:\r
+            if self.GetState() == MEDIASTATE_STOPPED:\r
+                self.vlcwrap.start(pos)\r
+            else:\r
+                self.vlcwrap.set_media_position(pos)\r
+        except:\r
+            print_exc()\r
+            if DEBUG:\r
+                print >> sys.stderr, 'embedplay: could not seek'\r
+            self.slider.SetValue(oldsliderpos)\r
+        self.update = True\r
+        \r
+\r
+    def FullScreen(self,evt=None):\r
+        self.vlcwrap.set_fullscreen(True)\r
+\r
+    def Mute(self, evt = None):\r
+        if self.volumeicon.isToggled():\r
+            if self.oldvolume is not None:\r
+                self.vlcwrap.sound_set_volume(self.oldvolume)\r
+            self.volumeicon.setToggled(False)\r
+        else:\r
+            self.oldvolume = self.vlcwrap.sound_get_volume()\r
+            self.vlcwrap.sound_set_volume(0.0) # mute sound\r
+            self.volumeicon.setToggled(True)\r
+        \r
+    def Save(self, evt = None):\r
+        # save media content in different directory\r
+        if self.save_button.isToggled():\r
+            self.save_callback()\r
+            \r
+    \r
+    def SetVolume(self, evt = None):\r
+        if DEBUG:\r
+            print >> sys.stderr, "embedplay: SetVolume:",self.volume.GetValue()\r
+        self.vlcwrap.sound_set_volume(float(self.volume.GetValue()) / 100)\r
+        # reset mute\r
+        if self.volumeicon.isToggled():\r
+            self.volumeicon.setToggled(False)\r
+\r
+    def Stop(self):\r
+        if DEBUG:\r
+            print >> sys.stderr, "embedplay: Stop"\r
+        self.vlcwrap.stop()\r
+        self.ppbtn.SetLabel(self.utility.lang.get('playprompt'))\r
+        self.slider.SetValue(0)\r
+        if self.timer is not None:\r
+            self.timer.Stop()\r
+\r
+    def GetState(self):\r
+        """ Returns the state of VLC as summarized by Fabian: \r
+        MEDIASTATE_PLAYING, MEDIASTATE_PAUSED, MEDIASTATE_STOPPED """\r
+        if DEBUG:\r
+            print >>sys.stderr,"embedplay: GetState"\r
+            \r
+        status = self.vlcwrap.get_stream_information_status()\r
+        \r
+        import vlc\r
+        if status == vlc.PlayingStatus:\r
+            return MEDIASTATE_PLAYING\r
+        elif status == vlc.PauseStatus:\r
+            return MEDIASTATE_PAUSED\r
+        else:\r
+            return MEDIASTATE_STOPPED\r
+\r
+\r
+    def EnableSaveButton(self, b, callback):\r
+        self.save_button.setToggled(b)\r
+        if b:\r
+            self.save_callback = callback\r
+        else:\r
+            self.save_callback = lambda:None\r
+\r
+    def Reset(self):\r
+        self.DisableInput()\r
+        self.Stop()\r
+        self.UpdateProgressSlider([False])\r
+\r
+    #\r
+    # Control on-screen information\r
+    #\r
+    def UpdateStatus(self,playerstatus,pieces_complete):\r
+        self.SetPlayerStatus(playerstatus)\r
+        if self.vlcwrap is not None:\r
+            self.UpdateProgressSlider(pieces_complete)\r
+    \r
+    def SetPlayerStatus(self,s):\r
+        self.statuslabel.SetLabel(s)\r
+\r
+    def SetContentName(self,s):\r
+        self.vlcwin.set_content_name(s)\r
+\r
+    def SetContentImage(self,wximg):\r
+        self.vlcwin.set_content_image(wximg)\r
+\r
+\r
+    #\r
+    # Internal methods\r
+    #\r
+    def EnableInput(self):\r
+        self.ppbtn.Enable(True)\r
+        self.slider.Enable(True)\r
+        self.fsbtn.Enable(True)\r
+\r
+    def UpdateProgressSlider(self, pieces_complete):\r
+        self.slider.setBufferFromPieces(pieces_complete)\r
+        self.slider.Refresh()\r
+        \r
+    def DisableInput(self):\r
+        return # Not currently used\r
+        \r
+        self.ppbtn.Disable()\r
+        self.slider.Disable()\r
+        self.fsbtn.Disable()\r
+\r
+    def UpdateSlider(self, evt):\r
+        if not self.volumeicon.isToggled():\r
+            self.volume.SetValue(int(self.vlcwrap.sound_get_volume() * 100))\r
+\r
+        if self.update and self.GetState() != MEDIASTATE_STOPPED:\r
+            len = self.vlcwrap.get_stream_information_length()\r
+            if len == -1 or len == 0:\r
+                if self.estduration is None:\r
+                    return\r
+                else:\r
+                    len = int(self.estduration)\r
+            else:\r
+                len /= 1000\r
+\r
+            cur = self.vlcwrap.get_media_position() / 1000\r
+\r
+            self.slider.SetRange(0, len)\r
+            self.slider.SetValue(cur)\r
+            self.slider.SetTimePosition(float(cur), len)\r
+\r
+    def StopSliderUpdate(self, evt):\r
+        self.update = False\r
+\r
+\r
+    def TellLVCWrapWindow4Playback(self):\r
+        if self.vlcwrap is not None:\r
+            self.vlcwin.tell_vclwrap_window_for_playback()\r
+\r
+    def ShowLoading(self):\r
+        pass\r
+    \r
+    \r
+\r
+\r
+class VLCLogoWindow(wx.Panel):\r
+    """ A wx.Window to be passed to the vlc.MediaControl to draw the video\r
+    in (normally). In addition, the class can display a logo, a thumbnail and a \r
+    "Loading: bla.video" message when VLC is not playing.\r
+    """\r
+    \r
+    def __init__(self, parent, size, vlcwrap, logopath, fg=wx.WHITE, bg=wx.BLACK, animate = False, position = (300,300)):\r
+        wx.Panel.__init__(self, parent, -1, size=size)\r
+        self.parent = parent ##\r
+    \r
+        self.SetMinSize(size)\r
+        self.SetBackgroundColour(bg)\r
+        self.bg = bg\r
+        self.vlcwrap = vlcwrap\r
+        self.animation_running = False\r
+       \r
+        self.Bind(wx.EVT_KEY_UP, self.keyDown)\r
+\r
+        print >>sys.stderr,"VLCLogoWindow: logopath is",logopath\r
+\r
+        if logopath is not None and not animate:\r
+            self.logo = wx.BitmapFromImage(wx.Image(logopath),-1)\r
+        else:\r
+            self.logo = None\r
+        self.contentname = None\r
+        self.contentbm = None\r
+        self.Bind(wx.EVT_PAINT, self.OnPaint)\r
+        if sys.platform == 'darwin':\r
+            self.hsizermain = wx.BoxSizer(wx.HORIZONTAL)\r
+            self.vsizer = wx.BoxSizer(wx.VERTICAL)\r
+            self.vsizer.Add((0,70),0,0,0)\r
+        if animate:\r
+            if sys.platform == 'darwin':\r
+                self.agVideo = wx.animate.GIFAnimationCtrl(self, 1, logopath)\r
+            else:\r
+                self.agVideo = wx.animate.GIFAnimationCtrl(self, 1, logopath, pos = (110,70))\r
+            self.agVideo.Hide()\r
+            if sys.platform == 'darwin':\r
+                self.vsizer.Add(self.agVideo,0,wx.ALIGN_CENTRE_HORIZONTAL,0)\r
+                self.vsizer.Add((0,10),0,0,0)\r
+        else:\r
+            self.agVideo = None\r
+\r
+        #self.playbackText = wx.StaticText(self,-1,"Leave Tribler running\n for faster playback",wx.Point(30,140))\r
+        #self.playbackText.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8"))\r
+        #self.playbackText.SetForegroundColour(wx.Colour(255,51,00))\r
+        if sys.platform == 'darwin':\r
+            self.loadingtext = wx.StaticText(self,-1,'')\r
+        else:\r
+            self.loadingtext = wx.StaticText(self,-1,'',wx.Point(0,200),wx.Size(320,30),style=wx.ALIGN_CENTRE)\r
+        if sys.platform == 'darwin':\r
+            self.loadingtext.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8"))\r
+        else:\r
+            self.loadingtext.SetFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD, 0, "UTF-8"))\r
+        self.loadingtext.SetForegroundColour(wx.WHITE)\r
+\r
+        if sys.platform == 'darwin':\r
+            self.vsizer.Add(self.loadingtext,1,wx.ALIGN_CENTRE_HORIZONTAL,0)\r
+            self.hsizermain.Add(self.vsizer,1,wx.ALIGN_CENTRE_HORIZONTAL,0)\r
+            self.SetSizer(self.hsizermain)\r
+            self.SetAutoLayout(1)\r
+            self.Layout()\r
+            self.Refresh()\r
+        if self.vlcwrap is not None:\r
+            wx.CallAfter(self.tell_vclwrap_window_for_playback)\r
+        \r
+    def tell_vclwrap_window_for_playback(self):\r
+        """ This method must be called after the VLCLogoWindow has been\r
+        realized, otherwise the self.GetHandle() call that vlcwrap.set_window()\r
+        does, doesn't return a correct XID.\r
+        """\r
+        self.vlcwrap.set_window(self)\r
+\r
+    def get_vlcwrap(self):\r
+        return self.vlcwrap\r
+\r
+    def set_content_name(self,s):\r
+        if DEBUG:\r
+            print >>sys.stderr,"VLCWin: set_content_name"\r
+        self.contentname = s\r
+        self.Refresh()\r
+    \r
+    def set_content_image(self,wximg):\r
+        if DEBUG:\r
+            print >>sys.stderr,"VLCWin: set_content_image"\r
+        if wximg is not None:\r
+            self.contentbm = wx.BitmapFromImage(wximg,-1)\r
+        else:\r
+            self.contentbm = None\r
+\r
+    def is_animation_running(self):\r
+        return self.animation_running\r
+\r
+    def setloadingtext(self, text):\r
+        self.loadingtext.SetLabel(text)\r
+        self.Refresh()\r
+\r
+    def show_loading(self):\r
+        if self.agVideo:\r
+            self.agVideo.Show()\r
+            self.agVideo.Play()\r
+            self.animation_running = True\r
+            self.Refresh()\r
+\r
+\r
+        \r
+        \r
+    def stop_animation(self):\r
+        if self.agVideo:\r
+            self.agVideo.Stop()\r
+            self.agVideo.Hide()\r
+            self.animation_running = False\r
+            self.Refresh()\r
+\r
+    def OnPaint(self,evt):\r
+        dc = wx.PaintDC(self)\r
+        dc.Clear()\r
+        dc.BeginDrawing()        \r
+\r
+        x,y,maxw,maxh = self.GetClientRect()\r
+        halfx = (maxw-x)/2\r
+        halfy = (maxh-y)/2\r
+        if self.logo is None:\r
+            halfx = 10\r
+            halfy = 10\r
+            lheight = 20\r
+        else:\r
+            halfx -= self.logo.GetWidth()/2\r
+            halfy -= self.logo.GetHeight()/2\r
+            lheight = self.logo.GetHeight()\r
+\r
+        dc.SetPen(wx.Pen(self.bg,0))\r
+        dc.SetBrush(wx.Brush(self.bg))\r
+        if sys.platform == 'linux2':\r
+            dc.DrawRectangle(x,y,maxw,maxh)\r
+        if self.logo is not None:\r
+            dc.DrawBitmap(self.logo,halfx,halfy,True)\r
+        #logox = max(0,maxw-self.logo.GetWidth()-30)\r
+        #dc.DrawBitmap(self.logo,logox,20,True)\r
+\r
+        dc.SetTextForeground(wx.WHITE)\r
+        dc.SetTextBackground(wx.BLACK)\r
+        \r
+        lineoffset = 120\r
+        txty = halfy+lheight+lineoffset\r
+        if txty > maxh:\r
+            txty = 0\r
+        if self.contentname is not None:\r
+            txt = self.contentname\r
+            dc.DrawText(txt,30,txty)\r
+            lineoffset += 30\r
+\r
+        #txt = self.getStatus()\r
+        #dc.DrawText(txt,30,halfy+self.logo.GetHeight()+lineoffset)\r
+        \r
+        if self.contentbm is not None:\r
+            bmy = max(20,txty-20-self.contentbm.GetHeight())\r
+            dc.DrawBitmap(self.contentbm,30,bmy,True)\r
+        \r
+        dc.EndDrawing()\r
+        if evt is not None:\r
+            evt.Skip(True)\r
+\r
+\r
+    def keyDown(self, event):\r
+        Level = event.StopPropagation()\r
+        event.ResumePropagation(10)\r
+\r
+        event.Skip()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Player/PlayerVideoFrame.py b/instrumentation/next-share/BaseLib/Player/PlayerVideoFrame.py
new file mode 100644 (file)
index 0000000..3ecc453
--- /dev/null
@@ -0,0 +1,96 @@
+# Written by Fabian van der Werf and Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+import wx\r
+import sys\r
+\r
+from BaseLib.Video.VideoFrame import VideoBaseFrame\r
+from BaseLib.Player.EmbeddedPlayer4Frame import EmbeddedPlayer4FramePanel
+
+DEBUG = False
+
+\r
+\r
+\r
+class VideoFrame(wx.Frame,VideoBaseFrame):\r
+    """ Provides a wx.Frame around an EmbeddedPlayerPanel so the embedded player\r
+    is shown as a separate window. The Embedded Player consists of a VLCLogoWindow\r
+    and the media controls such as Play/Pause buttons and Volume Control.\r
+    """\r
+    \r
+    def __init__(self,parent,utility,title,iconpath,vlcwrap,logopath): ## rm utility\r
+        self.parent = parent    \r
+        self.utility = utility ## parent.utility\r
+        if title is None:\r
+            title = self.utility.lang.get('tb_video_short')\r
+        \r
+        if vlcwrap is None:\r
+            size = (800,150)\r
+        else:\r
+            if sys.platform == 'darwin':\r
+                size = (800,520)\r
+            else:\r
+                size = (800,520) # Use 16:9 aspect ratio: 500 = (800/16) * 9 + 50 for controls\r
+        wx.Frame.__init__(self, None, -1, title, size=size) \r
+        self.Centre()\r
+        \r
+        self.create_videopanel(vlcwrap,logopath)\r
+\r
+        # Set icons for Frame\r
+        self.icons = wx.IconBundle()\r
+        self.icons.AddIconFromFile(iconpath,wx.BITMAP_TYPE_ICO)\r
+        self.SetIcons(self.icons)\r
+\r
+        self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\r
+\r
+    def create_videopanel(self,vlcwrap, logopath):\r
+        self.showingframe = False\r
+        self.videopanel = EmbeddedPlayer4FramePanel(self, self.utility, vlcwrap, logopath)\r
+        self.Hide()\r
+\r
+    def show_videoframe(self):\r
+        if DEBUG:\r
+            print >>sys.stderr,"videoframe: Swap IN videopanel"\r
+            \r
+        if self.videopanel is not None:\r
+            if not self.showingframe:\r
+                self.showingframe = True\r
+                self.Show()\r
+                \r
+            self.Raise()\r
+            self.SetFocus()\r
+                       \r
+            # H4x0r: We need to tell the VLC wrapper a XID of a\r
+            # window to paint in. Apparently on win32 the XID is only\r
+            # known when the window is shown. We give it the command\r
+            # to show here, so shortly after it should be shown.\r
+            #\r
+            wx.CallAfter(self.videopanel.TellLVCWrapWindow4Playback)\r
+\r
+    \r
+    def hide_videoframe(self):\r
+        if DEBUG:\r
+            print >>sys.stderr,"videoframe: Swap OUT videopanel"\r
+        if self.videopanel is not None:\r
+            self.videopanel.Reset()\r
+            if self.showingframe:\r
+                self.showingframe = False\r
+                self.Hide()\r
+\r
+    def get_videopanel(self):\r
+        return self.videopanel\r
+\r
+    def delete_videopanel(self):\r
+        self.videopanel = None\r
+\r
+    def get_window(self):\r
+        return self\r
+\r
+\r
+    def OnCloseWindow(self, event = None):\r
+        if sys.platform == 'darwin':\r
+            #self.videopanel.Stop()\r
+            self.videopanel.Reset()\r
+                \r
+        self.hide_videoframe()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Player/Reporter.py b/instrumentation/next-share/BaseLib/Player/Reporter.py
new file mode 100644 (file)
index 0000000..39cbef9
--- /dev/null
@@ -0,0 +1,169 @@
+# Written by Jan David Mol
+# see LICENSE.txt for license information
+
+# Collects statistics about a download/VOD session, and sends it
+# home on a regular interval.
+
+import sys,urllib,zlib,pickle
+import thread
+import threading
+from random import shuffle
+from time import time
+from traceback import print_exc
+from BaseLib.Core.Session import Session
+
+PHONEHOME = True
+DEBUG = False
+
+class Reporter:
+    """ Old Reporter class used for July 2008 trial. See below for new """
+    
+    def __init__( self, sconfig ):
+        self.sconfig = sconfig
+
+        # time of initialisation
+        self.epoch = time()
+
+        # mapping from peer ids to (shorter) numbers
+        self.peernr = {}
+
+        # remember static peer information, such as IP
+        # self.peerinfo[id] = info string
+        self.peerinfo = {}
+
+        # remember which peers we were connected to in the last report
+        # self.connected[id] = timestamp when last seen
+        self.connected = {}
+
+        # collected reports
+        self.buffered_reports = []
+
+        # whether to phone home to send collected data
+        self.do_reporting = True
+
+        # send data at this interval (seconds)
+        self.report_interval = 30
+
+        # send first report immediately
+        self.last_report_ts = 0
+
+        # record when we started (used as a session id)
+        self.epoch = time()
+
+    def phone_home( self, report ):
+        """ Report status to a centralised server. """
+
+        #if DEBUG: print >>sys.stderr,"\nreport: ".join(reports)
+
+        # do not actually send if reporting is disabled
+        if not self.do_reporting or not PHONEHOME:
+            return
+
+        # add reports to buffer
+        self.buffered_reports.append( report )
+
+        # only process at regular intervals
+        now = time()
+        if now - self.last_report_ts < self.report_interval:
+            return
+        self.last_report_ts = now
+
+        # send complete buffer
+        s = pickle.dumps( self.buffered_reports )
+        self.buffered_reports = []
+
+        if DEBUG: print >>sys.stderr,"\nreport: phoning home."
+        try:
+            data = zlib.compress( s, 9 ).encode("base64")
+            sock = urllib.urlopen("http://swpreporter.tribler.org/reporting/report.cgi",data)
+            result = sock.read()
+            sock.close()
+
+            result = int(result)
+
+            if result == 0:
+                # remote server is not recording, so don't bother sending info
+                self.do_reporting = False
+            else:
+                self.report_interval = result
+        except IOError, e:
+            # error contacting server
+            print_exc(file=sys.stderr)
+            self.do_reporting = False
+        except ValueError, e:
+            # page did not obtain an integer
+            print >>sys.stderr,"report: got %s" % (result,)
+            print_exc(file=sys.stderr)
+            self.do_reporting = False
+        except:
+            # any other error
+            print_exc(file=sys.stderr)
+            self.do_reporting = False
+        if DEBUG: print >>sys.stderr,"\nreport: succes. reported %s bytes, will report again (%s) in %s seconds" % (len(data),self.do_reporting,self.report_interval)
+
+    def report_stat( self, ds ):
+        chokestr = lambda b: ["c","C"][int(bool(b))]
+        intereststr = lambda b: ["i","I"][int(bool(b))]
+        optstr = lambda b: ["o","O"][int(bool(b))]
+        protstr = lambda b: ["bt","g2g"][int(bool(b))]
+            
+        now = time()
+        v = ds.get_vod_stats() or { "played": 0, "stall": 0, "late": 0, "dropped": 0, "prebuf": -1, "pieces": {} }
+        # Arno, 2009-09-09: method removed, was unclean
+        vi = ds.get_videoinfo() or { "live": False, "inpath": "(none)", "status": None }
+        vs = vi["status"]
+
+        scfg = self.sconfig
+
+        down_total, down_rate, up_total, up_rate = 0, 0.0, 0, 0.0
+        peerinfo = {}
+
+        for p in ds.get_peerlist():
+            down_total += p["dtotal"]/1024
+            down_rate  += p["downrate"]/1024.0
+            up_total   += p["utotal"]/1024
+            up_rate    += p["uprate"]/1024.0
+
+            id = p["id"]
+            peerinfo[id] = {
+                "g2g": protstr(p["g2g"]),
+                "addr": "%s:%s:%s" % (p["ip"],p["port"],p["direction"]),
+                "id": id,
+                "g2g_score": "%s,%s" % (p["g2g_score"][0],p["g2g_score"][1]),
+                "down_str": "%s%s" % (chokestr(p["dchoked"]),intereststr(p["dinterested"])),
+                "down_total": p["dtotal"]/1024,
+                "down_rate": p["downrate"]/1024.0,
+                "up_str": "%s%s%s" % (chokestr(p["uchoked"]),intereststr(p["uinterested"]),optstr(p["optimistic"])),
+                "up_total": p["utotal"]/1024,
+                "up_rate": p["uprate"]/1024.0,
+            }
+
+        if vs:
+            valid_range = vs.download_range()
+        else:
+            valid_range = ""
+
+        stats = {
+            "timestamp":  time(),
+            "epoch":      self.epoch,
+            "listenport": scfg.get_listen_port(),
+            "infohash":   `ds.get_download().get_def().get_infohash()`,
+            "filename":   vi["inpath"],
+            "peerid":     `ds.get_peerid()`,  # Arno, 2009-09-09: method removed, should be Download method
+            "live":       vi["live"],
+            "progress":   100.00*ds.get_progress(),
+            "down_total": down_total,
+            "down_rate":  down_rate,
+            "up_total":   up_total,
+            "up_rate":    up_rate,
+            "p_played":   v["played"],
+            "t_stall":    v["stall"],
+            "p_late":     v["late"],
+            "p_dropped":  v["dropped"],
+            "t_prebuf":   v["prebuf"],
+            "peers":      peerinfo.values(),
+            "pieces":     v["pieces"],
+            "validrange": valid_range,
+        }
+
+        self.phone_home( stats )
diff --git a/instrumentation/next-share/BaseLib/Player/SvcTest.py b/instrumentation/next-share/BaseLib/Player/SvcTest.py
new file mode 100644 (file)
index 0000000..1183e17
--- /dev/null
@@ -0,0 +1,182 @@
+import wx
+import sys
+import time
+from traceback import print_exc
+from BaseLib.Video.utils import svcextdefaults, videoextdefaults
+from BaseLib.Core.API import *
+
+DEBUG = True
+# used to set different download speeds
+DOWNLOADSPEED = 200
+
+def svc_event_callback(d,event,params):
+    if event == VODEVENT_START:
+    
+        stream = params["stream"]
+        length   = params["length"]
+        mimetype = params["mimetype"]
+        
+        # save stream on a temp file for verification
+        f = open("stream","wb")
+
+        while True:
+            # Read data from the resulting stream.
+            # Every stream.read() call will give back the available layers for the
+            # following time slot.
+            # The first 6 Bytes tell us the piece size. Therefore depending on the 
+            # size of the stream, knowing the piece size, we can see how many layers 
+            # are given back for that specific time slot.
+            data = stream.read()
+            print >>sys.stderr,"main: VOD ready callback: reading",type(data)
+            print >>sys.stderr,"main: VOD ready callback: reading",len(data)
+            if len(data) == 0:
+                break
+            f.write(data)
+            time.sleep(2)
+        
+        # Stop the download
+        if STOP_AFTER:
+            d.stop()
+            
+        f.close()
+    
+        stream.close()
+        
+
+def state_callback(ds):
+    try:
+        d = ds.get_download()
+        p = "%.0f %%" % (100.0*ds.get_progress())
+        dl = "dl %.0f" % (ds.get_current_speed(DOWNLOAD))
+        ul = "ul %.0f" % (ds.get_current_speed(UPLOAD))
+        print >>sys.stderr,dlstatus_strings[ds.get_status() ],p,dl,ul,"====="
+    except:
+        print_exc()
+
+    return (1.0,False)
+
+
+def select_torrent_from_disk(self):
+        dlg = wx.FileDialog(None, 
+                            self.appname+': Select torrent to play', 
+                            '', # default dir
+                            '', # default file
+                            'TSTREAM and TORRENT files (*.tstream;*.torrent)|*.tstream;*.torrent', 
+                            wx.OPEN|wx.FD_FILE_MUST_EXIST)
+        if dlg.ShowModal() == wx.ID_OK:
+            filename = dlg.GetPath()
+        else:
+            filename = None
+        dlg.Destroy()
+        return filename
+
+
+def select_file_start_download(self,torrentfilename):
+    
+    if torrentfilename.startswith("http") or torrentfilename.startswith(P2PURL_SCHEME):
+        tdef = TorrentDef.load_from_url(torrentfilename)
+    else: 
+        tdef = TorrentDef.load(torrentfilename)
+    print >>sys.stderr,"main: Starting download, infohash is",`tdef.get_infohash()`
+    
+    # Select which video to play (if multiple)
+    videofiles = tdef.get_files(exts=videoextdefaults)
+    print >>sys.stderr,"main: Found video files",videofiles
+    
+    if len(videofiles) == 0:
+        print >>sys.stderr,"main: No video files found! Let user select"
+        # Let user choose any file
+        videofiles = tdef.get_files(exts=None)
+        
+    if len(videofiles) > 1:
+        selectedvideofile = self.ask_user_which_video_from_torrent(videofiles)
+        if selectedvideofile is None:
+            print >>sys.stderr,"main: User selected no video"
+            return False
+        dlfile = selectedvideofile
+    else:
+        dlfile = videofiles[0]
+
+# Ric: check if it as an SVC download. If it is add the enhancement layers to the dlfiles
+def is_svc(dlfile, tdef):
+    svcfiles = None
+    
+    if tdef.is_multifile_torrent():
+        enhancement =  tdef.get_files(exts=svcextdefaults)
+        # Ric: order the enhancement layer in the svcfiles list
+        enhancement.sort()
+        if tdef.get_length(enhancement[0]) == tdef.get_length(dlfile):
+            svcfiles = [dlfile]
+            svcfiles.extend(enhancement)
+            
+    return svcfiles
+
+def run_test(params = None):
+    
+    if params is None:
+        params = [""]
+    
+    if len(sys.argv) > 1:
+        params = sys.argv[1:]
+        torrentfilename = params[0]
+    else:
+        torrentfilename = self.select_torrent_from_disk()
+        if torrentfilename is None:
+            print >>sys.stderr,"main: User selected no file"
+            self.OnExit()
+            return False
+    
+    scfg = SessionStartupConfig()
+    scfg.set_megacache( False )
+    scfg.set_overlay( False )
+
+    s = Session( scfg )
+
+    tdef = TorrentDef.load(torrentfilename)
+    dcfg = DownloadStartupConfig()
+
+    
+    # Select which video to play (if multiple)
+    videofiles = tdef.get_files(exts=videoextdefaults)
+    print >>sys.stderr,"main: Found video files",videofiles
+    
+    if len(videofiles) == 0:
+        print >>sys.stderr,"main: No video files found! Let user select"
+        # Let user choose any file
+        
+    if len(videofiles) > 1:
+        print >>sys.stderr,"main: More then one video file found!!"
+    else:
+        videofile = videofiles[0]
+
+    # Ric: check for svc
+    if tdef.is_multifile_torrent():
+
+        dlfiles = is_svc(videofile, tdef)
+
+        if dlfiles is not None:
+            print >>sys.stderr,"main: Found SVC video!!"
+            dcfg.set_video_event_callback(svc_event_callback, svc=True)
+            dcfg.set_selected_files(dlfiles)
+    else:
+        dcfg.set_video_event_callback(svc_event_callback)
+        dcfg.set_selected_files([dlfile])        
+
+    
+    # Ric: Set here the desired download speed
+    dcfg.set_max_speed(DOWNLOAD,DOWNLOADSPEED)
+
+    d = s.start_download( tdef, dcfg )
+
+    d.set_state_callback(state_callback,getpeerlist=False)
+    print >>sys.stderr,"main: Saving content to", d.get_dest_files()
+
+    while True:
+      time.sleep(360)
+    print >>sys.stderr,"Sleeping seconds to let other threads finish"
+    time.sleep(2)
+
+
+
+if __name__ == '__main__':
+    run_test()
diff --git a/instrumentation/next-share/BaseLib/Player/UtilityStub.py b/instrumentation/next-share/BaseLib/Player/UtilityStub.py
new file mode 100644 (file)
index 0000000..8823655
--- /dev/null
@@ -0,0 +1,37 @@
+# Written by ABC authors and Arno Bakker 
+# see LICENSE.txt for license information
+import sys
+import os
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Lang.lang import Lang
+
+################################################################
+#
+# Class: UtilityStub
+#
+################################################################
+class UtilityStub:
+    def __init__(self,installdir,statedir):
+        self.installdir = installdir
+        self.statedir = statedir
+
+        self.config = self
+
+        # Setup language files
+        self.lang = Lang(self)
+
+        
+        
+    def getConfigPath(self):
+        return self.statedir
+    
+    def getPath(self):
+        return self.installdir.decode(sys.getfilesystemencoding())
+
+    def Read(self,key):
+        if key == 'language_file':
+            return os.path.join(self.installdir,LIBRARYNAME,'Lang','english.lang')
+        elif key == 'videoplayerpath':
+            return 'vlc'
+        return None
diff --git a/instrumentation/next-share/BaseLib/Player/__init__.py b/instrumentation/next-share/BaseLib/Player/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Player/swarmplayer-njaal.py b/instrumentation/next-share/BaseLib/Player/swarmplayer-njaal.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/instrumentation/next-share/BaseLib/Player/swarmplayer.py b/instrumentation/next-share/BaseLib/Player/swarmplayer.py
new file mode 100644 (file)
index 0000000..9198c73
--- /dev/null
@@ -0,0 +1,688 @@
+# Written by Arno Bakker, Choopan RATTANAPOKA, Jie Yang
+# see LICENSE.txt for license information
+#
+# This is the main file for the SwarmPlayer V1, which is a standalone P2P-based 
+# video player. SwarmPlayer V2, the transport protocol for use with HTML5 can be
+# found in Transport/SwarmEngine.py (Sharing code with SwarmPlugin and v1,
+# confusing the code a bit).
+#
+#
+# TODO: 
+# * set 'download_slice_size' to 32K, such that pieces are no longer
+#   downloaded in 2 chunks. This particularly avoids a bad case where you
+#   kick the source: you download chunk 1 of piece X
+#   from lagging peer and download chunk 2 of piece X from source. With the piece
+#   now complete you check the sig. As the first part of the piece is old, this
+#   fails and we kick the peer that gave us the completing chunk, which is the 
+#   source.
+#
+#   Note that the BT spec says: 
+#   "All current implementations use 2 15 , and close connections which request 
+#   an amount greater than 2 17." http://www.bittorrent.org/beps/bep_0003.html
+#
+#   So it should be 32KB already. However, the BitTorrent (3.4.1, 5.0.9), 
+#   BitTornado and Azureus all use 2 ** 14 = 16KB chunks.
+#
+# - See if we can use stream.seek() to optimize SwarmPlayer as well (see SwarmPlugin)
+
+# modify the sys.stderr and sys.stdout for safe output
+import BaseLib.Debug.console
+
+import os
+import sys
+import time
+import tempfile
+import shutil
+from traceback import print_exc
+from cStringIO import StringIO
+from threading import Thread
+from base64 import encodestring
+
+if sys.platform == "darwin":
+    # on Mac, we can only load VLC/OpenSSL libraries
+    # relative to the location of tribler.py
+    os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
+try:
+    import wxversion
+    wxversion.select('2.8')
+except:
+    pass
+import wx
+
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.API import *
+from BaseLib.Core.Utilities.unicode import bin2unicode
+from BaseLib.Core.Utilities.timeouturlopen import urlOpenTimeout
+
+from BaseLib.Video.defs import * 
+from BaseLib.Video.VideoPlayer import VideoPlayer, VideoChooser
+from BaseLib.Video.utils import videoextdefaults
+from BaseLib.Utilities.LinuxSingleInstanceChecker import *
+from BaseLib.Utilities.Instance2Instance import Instance2InstanceClient
+
+from BaseLib.Player.PlayerVideoFrame import VideoFrame
+from BaseLib.Player.BaseApp import BaseApp
+
+from BaseLib.Core.Statistics.Status import *
+
+DEBUG = True
+ONSCREENDEBUG = False
+ALLOW_MULTIPLE = False
+
+PLAYER_VERSION = '1.1.0'
+
+I2I_LISTENPORT = 57894
+PLAYER_LISTENPORT = 8620
+VIDEOHTTP_LISTENPORT = 6879
+
+# Arno, 2010-03-08: declaration here gives warning, can't get rid of it.
+START_TIME = 0
+
+
+class PlayerApp(BaseApp):
+    def __init__(self, redirectstderrout, appname, appversion, params, single_instance_checker, installdir, i2iport, sport):
+        self.videoFrame = None
+        BaseApp.__init__(self, redirectstderrout, appname, appversion, params, single_instance_checker, installdir, i2iport, sport)
+
+        self.said_start_playback = False
+        self.decodeprogress = 0
+
+        
+    def OnInit(self):
+        try:
+            # If already running, and user starts a new instance without a URL 
+            # on the cmd line
+            if not ALLOW_MULTIPLE and self.single_instance_checker.IsAnotherRunning():
+                print >> sys.stderr,"main: Another instance running, no URL on CMD, asking user"
+                torrentfilename = self.select_torrent_from_disk()
+                if torrentfilename is not None:
+                    i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename)
+                    return False
+
+            # Do common initialization
+            BaseApp.OnInitBase(self)
+        
+            # Fire up the VideoPlayer, it abstracts away whether we're using
+            # an internal or external video player.
+            self.videoplayer = VideoPlayer.getInstance(httpport=VIDEOHTTP_LISTENPORT)
+            playbackmode = PLAYBACKMODE_INTERNAL
+            self.videoplayer.register(self.utility,preferredplaybackmode=playbackmode)
+            
+            # Open video window
+            self.start_video_frame()
+
+            # Load torrent
+            if self.params[0] != "":
+                torrentfilename = self.params[0]
+                
+                # TEST: just play video file
+                #self.videoplayer.play_url(torrentfilename)
+                #return True
+                
+            else:
+                torrentfilename = self.select_torrent_from_disk()
+                if torrentfilename is None:
+                    print >>sys.stderr,"main: User selected no file"
+                    self.OnExit()
+                    return False
+
+
+            # Start download
+            if not self.select_file_start_download(torrentfilename):
+                
+                self.OnExit()
+                return False
+
+            return True
+        
+        except Exception,e:
+            print_exc()
+            self.show_error(str(e))
+            self.OnExit()
+            return False
+
+
+    def start_video_frame(self):
+        self.videoFrame = PlayerFrame(self,self.appname)
+        self.Bind(wx.EVT_CLOSE, self.videoFrame.OnCloseWindow)
+        self.Bind(wx.EVT_QUERY_END_SESSION, self.videoFrame.OnCloseWindow)
+        self.Bind(wx.EVT_END_SESSION, self.videoFrame.OnCloseWindow)
+        self.videoFrame.show_videoframe()
+
+        if self.videoplayer is not None:
+            self.videoplayer.set_videoframe(self.videoFrame)
+        self.said_start_playback = False
+        
+        
+    def select_torrent_from_disk(self):
+        dlg = wx.FileDialog(None, 
+                            self.appname+': Select torrent to play', 
+                            '', # default dir
+                            '', # default file
+                            'TSTREAM and TORRENT files (*.tstream;*.torrent)|*.tstream;*.torrent', 
+                            wx.OPEN|wx.FD_FILE_MUST_EXIST)
+        if dlg.ShowModal() == wx.ID_OK:
+            filename = dlg.GetPath()
+        else:
+            filename = None
+        dlg.Destroy()
+        return filename
+
+
+    def select_file_start_download(self,torrentfilename):
+        
+        if torrentfilename.startswith("http") or torrentfilename.startswith(P2PURL_SCHEME):
+            tdef = TorrentDef.load_from_url(torrentfilename)
+        else: 
+            tdef = TorrentDef.load(torrentfilename)
+        print >>sys.stderr,"main: Starting download, infohash is",`tdef.get_infohash()`
+        poa = None
+        if tdef.get_cs_keys():
+            # This is a closed swarm, try to get a POA
+            poa = self._get_poa(tdef)
+        
+        # Select which video to play (if multiple)
+        videofiles = tdef.get_files(exts=videoextdefaults)
+        print >>sys.stderr,"main: Found video files",videofiles
+        
+        if len(videofiles) == 0:
+            print >>sys.stderr,"main: No video files found! Let user select"
+            # Let user choose any file
+            videofiles = tdef.get_files(exts=None)
+            
+        if len(videofiles) > 1:
+            selectedvideofile = self.ask_user_which_video_from_torrent(videofiles)
+            if selectedvideofile is None:
+                print >>sys.stderr,"main: User selected no video"
+                return False
+            dlfile = selectedvideofile
+        else:
+            dlfile = videofiles[0]
+
+
+        # Start video window if not open
+        if self.videoFrame is None:
+            self.start_video_frame()
+        else:
+            # Stop playing, reset stream progress info + sliders 
+            self.videoplayer.stop_playback(reset=True)
+            self.said_start_playback = False
+        self.decodeprogress = 0
+
+        # Display name and thumbnail
+        cname = tdef.get_name_as_unicode()
+        if len(videofiles) > 1:
+            cname += u' - '+bin2unicode(dlfile)
+        self.videoplayer.set_content_name(u'Loading: '+cname)
+        
+        try:
+            [mime,imgdata] = tdef.get_thumbnail()
+            if mime is not None:
+                f = StringIO(imgdata)
+                img = wx.EmptyImage(-1,-1)
+                img.LoadMimeStream(f,mime,-1)
+                self.videoplayer.set_content_image(img)
+            else:
+                self.videoplayer.set_content_image(None)
+        except:
+            print_exc()
+
+
+        # Start actual download
+        self.start_download(tdef,dlfile, poa)
+        return True
+
+
+
+    def ask_user_which_video_from_torrent(self,videofiles):
+        dlg = VideoChooser(self.videoFrame,self.utility,videofiles,title=self.appname,expl='Select which file to play')
+        result = dlg.ShowModal()
+        if result == wx.ID_OK:
+            index = dlg.getChosenIndex()
+            filename = videofiles[index]
+        else:
+            filename = None
+        dlg.Destroy()
+        return filename
+
+
+    # ARNOTODO: see how VideoPlayer manages stopping downloads
+    
+    def sesscb_vod_event_callback(self,d,event,params):
+        self.videoplayer.sesscb_vod_event_callback(d,event,params)
+        
+        
+    def get_supported_vod_events(self):
+        return self.videoplayer.get_supported_vod_events()
+
+
+    #
+    # Remote start of new torrents
+    #
+    def i2ithread_readlinecallback(self,ic,cmd):
+        """ Called by Instance2Instance thread """
+        
+        print >>sys.stderr,"main: Another instance called us with cmd",cmd
+        ic.close()
+        
+        if cmd.startswith('START '):
+            param = cmd[len('START '):]
+            torrentfilename = None
+            if param.startswith('http:'):
+                # Retrieve from web 
+                f = tempfile.NamedTemporaryFile()
+                n = urlOpenTimeout(param)
+                data = n.read()
+                f.write(data)
+                f.close()
+                n.close()
+                torrentfilename = f.name
+            else:
+                torrentfilename = param
+                
+            # Switch to GUI thread
+            wx.CallAfter(self.remote_start_download,torrentfilename)
+
+    def remote_start_download(self,torrentfilename):
+        """ Called by GUI thread """
+        self.videoplayer.stop_playback(reset=True)
+
+        self.remove_downloads_in_vodmode_if_not_complete()
+        self.select_file_start_download(torrentfilename)
+
+
+    #
+    # Display stats in videoframe
+    #
+    def gui_states_callback(self,dslist,haspeerlist):
+        """ Override BaseApp """
+        
+        (playing_dslist,totalhelping,totalspeed) = BaseApp.gui_states_callback(self,dslist,haspeerlist)
+        
+        # Don't display stats if there is no video frame to show them on.
+        if self.videoFrame is None:
+            return
+        elif len(playing_dslist) > 0:
+            ds = playing_dslist[0] # only single playing Download at the moment in swarmplayer 
+            self.display_stats_in_videoframe(ds,totalhelping,totalspeed)
+
+
+    def display_stats_in_videoframe(self,ds,totalhelping,totalspeed):
+        # Display stats for currently playing Download
+        
+        videoplayer_mediastate = self.videoplayer.get_state()
+        #print >>sys.stderr,"main: Stats: VideoPlayer state",videoplayer_mediastate
+        
+        [topmsg,msg,self.said_start_playback,self.decodeprogress] = get_status_msgs(ds,videoplayer_mediastate,self.appname,self.said_start_playback,self.decodeprogress,totalhelping,totalspeed)
+        # Display helping info on "content name" line.
+        self.videoplayer.set_content_name(topmsg)
+
+        # Update status msg and progress bar
+        self.videoplayer.set_player_status_and_progress(msg,ds.get_pieces_complete())
+        
+        # Toggle save button
+        self.videoplayer.set_save_button(ds.get_status() == DLSTATUS_SEEDING, self.save_video_copy)    
+            
+        if False: # Only works if the sesscb_states_callback() method returns (x,True)
+            peerlist = ds.get_peerlist()
+            print >>sys.stderr,"main: Connected to",len(peerlist),"peers"
+            for peer in peerlist:
+                print >>sys.stderr,"main: Connected to",peer['ip'],peer['uprate'],peer['downrate']
+
+
+    def videoserver_set_status_guicallback(self,status):
+        """ Override BaseApp """
+        if self.videoFrame is not None:
+            self.videoFrame.set_player_status(status)
+
+    #
+    # Save button logic
+    #
+    def save_video_copy(self):
+        # Save a copy of playing download to other location
+        for d2 in self.downloads_in_vodmode:
+            # only single playing Download at the moment in swarmplayer
+            d = d2
+        dest_files = d.get_dest_files()  
+        dest_file = dest_files[0] # only single file at the moment in swarmplayer
+        savethread_callback_lambda = lambda:self.savethread_callback(dest_file)
+        
+        t = Thread(target = savethread_callback_lambda)
+        t.setName( self.appname+"Save"+t.getName() )
+        t.setDaemon(True)
+        t.start()
+    
+    def savethread_callback(self,dest_file):
+        
+        # Save a copy of playing download to other location
+        # called by new thread from self.save_video_copy
+        try:
+            if sys.platform == 'win32':
+                # Jelle also goes win32, find location of "My Documents"
+                # see http://www.mvps.org/access/api/api0054.htm
+                from win32com.shell import shell
+                pidl = shell.SHGetSpecialFolderLocation(0,0x05)
+                defaultpath = shell.SHGetPathFromIDList(pidl)
+            else:
+                defaultpath = os.path.expandvars('$HOME')
+        except Exception, msg:
+            defaultpath = ''
+            print_exc()
+
+        dest_file_only = os.path.split(dest_file[1])[1]
+        
+        print >> sys.stderr, 'Defaultpath:', defaultpath, 'Dest:', dest_file
+        dlg = wx.FileDialog(self.videoFrame, 
+                            message = self.utility.lang.get('savemedia'), 
+                            defaultDir = defaultpath, 
+                            defaultFile = dest_file_only,
+                            wildcard = self.utility.lang.get('allfileswildcard') + ' (*.*)|*.*', 
+                            style = wx.SAVE)
+        dlg.Raise()
+        result = dlg.ShowModal()
+        dlg.Destroy()
+        
+        if result == wx.ID_OK:
+            path = dlg.GetPath()
+            print >> sys.stderr, 'Path:', path
+            print >> sys.stderr, 'Copy: %s to %s' % (dest_file[1], path)
+            if sys.platform == 'win32':
+                try:
+                    import win32file
+                    win32file.CopyFile(dest_file[1], path, 0) # do succeed on collision
+                except:
+                    shutil.copyfile(dest_file[1], path)
+            else:
+                shutil.copyfile(dest_file[1], path)
+
+    # On Exit
+
+    def clear_session_state(self):
+        """ Try to fix apps by doing hard reset. Called from systray menu """
+        try:
+            self.videoplayer.stop_playback()
+        except:
+            print_exc()
+        BaseApp.clear_session_state(self)
+
+def get_status_msgs(ds,videoplayer_mediastate,appname,said_start_playback,decodeprogress,totalhelping,totalspeed):
+
+    intime = "Not playing for quite some time."
+    ETA = ((60 * 15, "Playing in less than 15 minutes."),
+           (60 * 10, "Playing in less than 10 minutes."),
+           (60 * 5, "Playing in less than 5 minutes."),
+           (60, "Playing in less than a minute."))
+
+    topmsg = ''
+    msg = ''
+    
+    logmsgs = ds.get_log_messages()
+    logmsg = None
+    if len(logmsgs) > 0:
+        print >>sys.stderr,"main: Log",logmsgs[0]
+        logmsg = logmsgs[-1][1]
+        
+    preprogress = ds.get_vod_prebuffering_progress()
+    playable = ds.get_vod_playable()
+    t = ds.get_vod_playable_after()
+
+    # Instrumentation
+    # Status elements (reported periodically):
+    #   playable: True if playable
+    #   prebuffering: float of percentage full?
+    #
+    # Events:
+    #   failed_after: Failed to play after X seconds (since starting to play)
+    #   playable_in: Started playing after X seconds
+    status = Status.get_status_holder("LivingLab")
+
+    s_play = status.get_or_create_status_element("playable", False)
+    if playable:
+        if preprogress < 1.0:
+            if s_play.get_value() == True:
+                global START_TIME
+                status.create_and_add_event("failed_after", [time.time() - START_TIME])
+                START_TIME = time.time()
+                
+            s_play.set_value(False)
+
+        elif s_play.get_value() == False:
+            s_play.set_value(True)
+            global START_TIME
+            status.create_and_add_event("playable_in", [time.time() - START_TIME])
+            START_TIME = time.time()
+
+    elif preprogress < 1.0:
+        status.get_or_create_status_element("prebuffering").set_value(preprogress)
+    # /Instrumentation
+
+    intime = ETA[0][1]
+    for eta_time, eta_msg in ETA:
+        if t > eta_time:
+            break
+        intime = eta_msg
+    
+    #print >>sys.stderr,"main: playble",playable,"preprog",preprogress
+    #print >>sys.stderr,"main: ETA is",t,"secs"
+    # if t > float(2 ** 30):
+    #     intime = "inf"
+    # elif t == 0.0:
+    #     intime = "now"
+    # else:
+    #     h, t = divmod(t, 60.0*60.0)
+    #     m, s = divmod(t, 60.0)
+    #     if h == 0.0:
+    #         if m == 0.0:
+    #             intime = "%ds" % (s)
+    #         else:
+    #             intime = "%dm:%02ds" % (m,s)
+    #     else:
+    #         intime = "%dh:%02dm:%02ds" % (h,m,s)
+            
+    #print >>sys.stderr,"main: VODStats",preprogress,playable,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
+
+    if ds.get_status() == DLSTATUS_HASHCHECKING:
+        genprogress = ds.get_progress()
+        pstr = str(int(genprogress*100))
+        msg = "Checking already downloaded parts "+pstr+"% done"
+    elif ds.get_status() == DLSTATUS_STOPPED_ON_ERROR:
+        msg = 'Error playing: '+str(ds.get_error())
+    elif ds.get_progress() == 1.0:
+        msg = ''
+    elif playable:
+        if not said_start_playback:
+            msg = "Starting playback..."
+            
+        if videoplayer_mediastate == MEDIASTATE_STOPPED and said_start_playback:
+            if totalhelping == 0:
+                topmsg = u"Please leave the "+appname+" running, this will help other "+appname+" users to download faster."
+            else:
+                topmsg = u"Helping "+str(totalhelping)+" "+appname+" users to download. Please leave it running in the background."
+                
+            # Display this on status line
+            # TODO: Show balloon in systray when closing window to indicate things continue there
+            msg = ''
+            
+        elif videoplayer_mediastate == MEDIASTATE_PLAYING:
+            said_start_playback = True
+            # It may take a while for VLC to actually start displaying
+            # video, as it is trying to tune in to the stream (finding
+            # I-Frame). Display some info to show that:
+            #
+            cname = ds.get_download().get_def().get_name_as_unicode()
+            topmsg = u'Decoding: '+cname+' '+str(decodeprogress)+' s'
+            decodeprogress += 1
+            msg = ''
+        elif videoplayer_mediastate == MEDIASTATE_PAUSED:
+            # msg = "Buffering... " + str(int(100.0*preprogress))+"%" 
+            msg = "Buffering... " + str(int(100.0*preprogress))+"%. " + intime
+        else:
+            msg = ''
+            
+    elif preprogress != 1.0:
+        pstr = str(int(preprogress*100))
+        npeers = ds.get_num_peers()
+        npeerstr = str(npeers)
+        if npeers == 0 and logmsg is not None:
+            msg = logmsg
+        elif npeers == 1:
+            msg = "Prebuffering "+pstr+"% done (connected to 1 person). " + intime
+        else:
+            msg = "Prebuffering "+pstr+"% done (connected to "+npeerstr+" people). " + intime
+            
+        try:
+            d = ds.get_download()
+            tdef = d.get_def()
+            videofiles = d.get_selected_files()
+            if len(videofiles) >= 1:
+                videofile = videofiles[0]
+            else:
+                videofile = None
+            if tdef.get_bitrate(videofile) is None:
+                msg += ' This video may not play properly because its bitrate is unknown'
+        except:
+            print_exc()
+    else:
+        # msg = "Waiting for sufficient download speed... "+intime
+        msg = 'Waiting for sufficient download speed... ' + intime
+        
+    global ONSCREENDEBUG
+    if msg == '' and ONSCREENDEBUG:
+        uptxt = "up %.1f" % (totalspeed[UPLOAD])
+        downtxt = " down %.1f" % (totalspeed[DOWNLOAD])
+        peertxt = " peer %d" % (totalhelping)
+        msg = uptxt + downtxt + peertxt
+
+    return [topmsg,msg,said_start_playback,decodeprogress]
+
+
+
+class PlayerFrame(VideoFrame):
+    def __init__(self,parent,appname):
+        VideoFrame.__init__(self,parent,parent.utility,appname+' '+PLAYER_VERSION,parent.iconpath,parent.videoplayer.get_vlcwrap(),parent.logopath)
+        self.parent = parent
+        self.closed = False
+
+        dragdroplist = FileDropTarget(self.parent)
+        self.SetDropTarget(dragdroplist)
+        
+        self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
+    
+    def OnCloseWindow(self, event = None):
+        
+        print >>sys.stderr,"main: ON CLOSE WINDOW"
+
+        # TODO: first event.Skip does not close window, second apparently does
+        # Check how event differs
+
+        # This gets called multiple times somehow
+        if not self.closed:
+            self.closed = True
+            self.parent.videoFrame = None
+
+            self.parent.videoplayer.stop_playback()
+            self.parent.remove_downloads_in_vodmode_if_not_complete()
+            self.parent.restart_other_downloads()
+
+        if event is not None:
+            nr = event.GetEventType()
+            lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" }
+            if nr in lookup: 
+                nr = lookup[nr]
+            print >>sys.stderr,"main: Closing due to event ",nr
+            event.Skip()
+        else:
+            print >>sys.stderr,"main: Closing untriggered by event"
+
+        print >>sys.stderr,"main: Closing done"
+        # TODO: Show balloon in systray when closing window to indicate things continue there
+
+    def set_player_status(self,s):
+        pass
+
+
+class FileDropTarget(wx.FileDropTarget):
+    """ To enable drag and drop of .tstream to window """
+    def __init__(self,app):
+        wx.FileDropTarget.__init__(self) 
+        self.app = app
+      
+    def OnDropFiles(self, x, y, filenames):
+        for filename in filenames:
+            self.app.remote_start_download(filename)
+        return True
+
+
+                
+        
+##############################################################
+#
+# Main Program Start Here
+#
+##############################################################
+def run_playerapp(appname,appversion,params = None):
+    global START_TIME
+    START_TIME = time.time()
+    
+    if params is None:
+        params = [""]
+    
+    if len(sys.argv) > 1:
+        params = sys.argv[1:]
+    
+    if 'debug' in params:
+        global ONSCREENDEBUG
+        ONSCREENDEBUG=True
+    if 'raw' in params:
+        BaseLib.Video.VideoPlayer.USE_VLC_RAW_INTERFACE = True
+    
+    # Create single instance semaphore
+    # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears
+    # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc()
+    #
+    siappname = appname.lower() # For backwards compatibility
+    if sys.platform != 'linux2':
+        single_instance_checker = wx.SingleInstanceChecker(siappname+"-"+ wx.GetUserId())
+    else:
+        single_instance_checker = LinuxSingleInstanceChecker(siappname)
+
+    #print "[StartUpDebug]---------------- 1", time()-start_time
+    if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning():
+        if params[0] != "":
+            torrentfilename = params[0]
+            i2ic = Instance2InstanceClient(I2I_LISTENPORT,'START',torrentfilename)
+            time.sleep(1)
+            return
+        
+    arg0 = sys.argv[0].lower()
+    if arg0.endswith('.exe'):
+        installdir = os.path.abspath(os.path.dirname(sys.argv[0]))
+    else:
+        installdir = os.getcwd()  
+
+    # Launch first single instance
+    app = PlayerApp(0, appname, appversion, params, single_instance_checker, installdir, I2I_LISTENPORT, PLAYER_LISTENPORT)
+
+    # Setup the statistic reporter while waiting for proper integration
+    status = Status.get_status_holder("LivingLab")
+    s = Session.get_instance()
+    id = encodestring(s.get_permid()).replace("\n","")
+    #reporter = LivingLabReporter.LivingLabPeriodicReporter("Living lab CS reporter", 300, id) # Report every 5 minutes
+    reporter = LivingLabReporter.LivingLabPeriodicReporter("Living lab CS reporter", 30, id) # Report every 30 seconds - ONLY FOR TESTING
+    status.add_reporter(reporter)
+
+    app.MainLoop()
+
+    reporter.stop()
+    
+    print >>sys.stderr,"Sleeping seconds to let other threads finish"
+    time.sleep(2)
+    
+    if not ALLOW_MULTIPLE:
+        del single_instance_checker
+
+
+if __name__ == '__main__':
+    run_playerapp("SwarmPlayer","1.1.0")
+
diff --git a/instrumentation/next-share/BaseLib/Player/systray.py b/instrumentation/next-share/BaseLib/Player/systray.py
new file mode 100644 (file)
index 0000000..3a67872
--- /dev/null
@@ -0,0 +1,218 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import os
+import textwrap
+import time
+from traceback import print_exc
+import wx
+
+from BaseLib.Core.API import *
+from BaseLib.Plugin.defs import *
+
+class PlayerTaskBarIcon(wx.TaskBarIcon):
+    
+    def __init__(self,wxapp,iconfilename):
+        wx.TaskBarIcon.__init__(self)
+        self.wxapp = wxapp
+        
+        self.icons = wx.IconBundle()
+        self.icons.AddIconFromFile(iconfilename,wx.BITMAP_TYPE_ICO)
+        self.icon = self.icons.GetIcon(wx.Size(-1,-1))
+
+        self.Bind(wx.EVT_TASKBAR_LEFT_UP, self.OnLeftClicked)
+
+        if sys.platform != "darwin":
+            # Mac already has the right icon set at startup
+            self.SetIcon(self.icon,self.wxapp.appname)
+        else:
+            menuBar = wx.MenuBar()
+
+            # Setting up the file menu.
+            filemenu = wx.Menu()
+            item = filemenu.Append(-1,'E&xit','Terminate the program')
+            self.Bind(wx.EVT_MENU, self.OnExit, item)
+
+            wx.App.SetMacExitMenuItemId(item.GetId())
+
+    def OnExit(self,e):
+        self.wxapp.ExitMainLoop()
+        # Close the frame.
+
+    def CreatePopupMenu(self):        
+        menu = wx.Menu()
+        
+        mi = menu.Append(-1,"Options...")
+        self.Bind(wx.EVT_MENU, self.OnOptions, id=mi.GetId())
+        menu.AppendSeparator()
+        mi = menu.Append(-1,"Exit")
+        self.Bind(wx.EVT_MENU, self.OnExitClient, id=mi.GetId())
+        return menu
+        
+    def OnOptions(self,event=None):
+        #print >>sys.stderr,"PlayerTaskBarIcon: OnOptions"
+        dlg = PlayerOptionsDialog(self.wxapp,self.icons)
+        ret = dlg.ShowModal()
+        #print >>sys.stderr,"PlayerTaskBarIcon: Dialog returned",ret
+        dlg.Destroy()
+
+    def OnExitClient(self,event=None):
+        #print >>sys.stderr,"PlayerTaskBarIcon: OnExitClient"
+        self.wxapp.ExitMainLoop()
+    
+    
+    def set_icon_tooltip(self,txt):
+        if sys.platform == "darwin":
+            # no taskbar tooltip on OS/X
+            return
+
+        self.SetIcon(self.icon,txt)
+    
+    def OnLeftClicked(self,event=None):
+        import webbrowser
+        url = 'http://127.0.0.1:'+str(self.wxapp.httpport)+URLPATH_WEBIF_PREFIX
+        webbrowser.open_new_tab(url)
+            
+
+    
+class PlayerOptionsDialog(wx.Dialog):
+    
+    def __init__(self,wxapp,icons):
+        self.wxapp = wxapp
+        self.icons = icons
+        self.port = None
+        
+        style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
+        wx.Dialog.__init__(self, None, -1, self.wxapp.appname+' Options', size=(400,200), style=style)
+        self.SetIcons(self.icons)
+
+        mainbox = wx.BoxSizer(wx.VERTICAL)
+        
+        aboutbox = wx.BoxSizer(wx.VERTICAL)
+        aboutlabel1 = wx.StaticText(self, -1, self.wxapp.appname+' is a product of the P2P-Next project')
+        aboutlabel2 = wx.StaticText(self, -1, 'Visit us at www.p2p-next.org!')
+        aboutbox.Add(aboutlabel1, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
+        aboutbox.Add(aboutlabel2, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
+        
+        uploadrate = self.wxapp.get_playerconfig('total_max_upload_rate')
+        
+        uploadratebox = wx.BoxSizer(wx.HORIZONTAL)
+        label = wx.StaticText(self, -1, 'Max upload to others (KB/s)')
+        self.uploadratectrl = wx.TextCtrl(self, -1, str(uploadrate))
+        uploadratebox.Add(label, 1, wx.ALIGN_CENTER_VERTICAL)
+        uploadratebox.Add(self.uploadratectrl)
+
+
+        buttonbox2 = wx.BoxSizer(wx.HORIZONTAL)
+        advbtn = wx.Button(self, -1, 'Advanced...')
+        buttonbox2.Add(advbtn, 0, wx.ALL, 5)
+
+        
+        buttonbox = wx.BoxSizer(wx.HORIZONTAL)
+        okbtn = wx.Button(self, wx.ID_OK, 'OK')
+        buttonbox.Add(okbtn, 0, wx.ALL, 5)
+        cancelbtn = wx.Button(self, wx.ID_CANCEL, 'Cancel')
+        buttonbox.Add(cancelbtn, 0, wx.ALL, 5)
+        applybtn = wx.Button(self, -1, 'Apply')
+        buttonbox.Add(applybtn, 0, wx.ALL, 5)
+
+        mainbox.Add(aboutbox, 1, wx.ALL, 5)
+        mainbox.Add(uploadratebox, 1, wx.EXPAND|wx.ALL, 5)
+        mainbox.Add(buttonbox2, 1, wx.EXPAND, 1)
+        mainbox.Add(buttonbox, 1, wx.EXPAND, 1)
+        self.SetSizerAndFit(mainbox)
+
+        self.Bind(wx.EVT_BUTTON, self.OnAdvanced, advbtn)
+        self.Bind(wx.EVT_BUTTON, self.OnOK, okbtn)
+        #self.Bind(wx.EVT_BUTTON, self.OnCancel, cancelbtn)
+        self.Bind(wx.EVT_BUTTON, self.OnApply, applybtn)
+        #self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
+
+    def OnOK(self,event = None):
+        self.OnApply(event)
+        self.EndModal(wx.ID_OK)
+        
+    #def OnCancel(self,event = None):
+    #    self.EndModal(wx.ID_CANCEL)
+        
+    def OnApply(self,event = None):
+        print >>sys.stderr,"PlayerOptionsDialog: OnApply",self.port
+        
+        if self.port is not None:
+            session = self.wxapp.s
+            state_dir = session.get_state_dir()
+            cfgfilename = Session.get_default_config_filename(state_dir)
+            scfg = SessionStartupConfig.load(cfgfilename)
+            
+            scfg.set_listen_port(self.port)
+            print >>sys.stderr,"PlayerOptionsDialog: OnApply: Saving SessionStartupConfig to",cfgfilename
+            scfg.save(cfgfilename)
+        
+        uploadrate = int(self.uploadratectrl.GetValue())
+        # Updates value for global rate limiter too
+        self.wxapp.set_playerconfig('total_max_upload_rate',uploadrate)
+        self.wxapp.save_playerconfig()
+         
+        if self.port is not None and self.port != self.wxapp.s.get_listen_port():
+            dlg = wx.MessageDialog(None, "The SwarmPlugin will now exit to change the port. Reload the Web page to restart it", self.wxapp.appname+" Restart", wx.OK|wx.ICON_INFORMATION)
+            result = dlg.ShowModal()
+            dlg.Destroy()
+            self.wxapp.OnExit()
+            # F*cking wx won't exit. Die
+            os._exit(1)
+
+
+    def OnAdvanced(self,event = None):
+
+        if self.port is None:
+            self.port = self.wxapp.s.get_listen_port()
+        #destdir = self.wxapp.s.get_dest_dir()
+
+        dlg = PlayerAdvancedOptionsDialog(self.icons,self.port,self.wxapp)
+        ret = dlg.ShowModal()
+        if ret == wx.ID_OK:
+            self.port = dlg.get_port()
+        dlg.Destroy()
+
+
+class PlayerAdvancedOptionsDialog(wx.Dialog):
+    
+    def __init__(self,icons,port,wxapp):
+        style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER  # TODO: Add OK+Cancel
+        wx.Dialog.__init__(self, None, -1, 'SwarmPlugin Advanced Options', size=(400,200), style=style)
+        self.wxapp = wxapp
+
+        self.SetIcons(icons)
+
+        mainbox = wx.BoxSizer(wx.VERTICAL)
+        
+        portbox = wx.BoxSizer(wx.HORIZONTAL)
+        label = wx.StaticText(self, -1, 'Port')
+        self.portctrl = wx.TextCtrl(self, -1, str(port))
+        portbox.Add(label, 1, wx.ALIGN_CENTER_VERTICAL) 
+        portbox.Add(self.portctrl)
+
+        button2box = wx.BoxSizer(wx.HORIZONTAL)
+        clearbtn = wx.Button(self, -1, 'Clear disk cache and exit')
+        button2box.Add(clearbtn, 0, wx.ALL, 5)
+        self.Bind(wx.EVT_BUTTON, self.OnClear, clearbtn)
+        
+        buttonbox = wx.BoxSizer(wx.HORIZONTAL)
+        okbtn = wx.Button(self, wx.ID_OK, 'OK')
+        buttonbox.Add(okbtn, 0, wx.ALL, 5)
+        cancelbtn = wx.Button(self, wx.ID_CANCEL, 'Cancel')
+        buttonbox.Add(cancelbtn, 0, wx.ALL, 5)
+
+        mainbox.Add(portbox, 1, wx.EXPAND|wx.ALL, 5)
+        mainbox.Add(button2box, 1, wx.EXPAND, 1)
+        mainbox.Add(buttonbox, 1, wx.EXPAND, 1)
+        self.SetSizerAndFit(mainbox)
+
+    def get_port(self):
+        return int(self.portctrl.GetValue())
+        
+    def OnClear(self,event=None):
+        self.wxapp.clear_session_state()
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Plugin/AtomFeedParser.py b/instrumentation/next-share/BaseLib/Plugin/AtomFeedParser.py
new file mode 100644 (file)
index 0000000..50daef4
--- /dev/null
@@ -0,0 +1,126 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+import sys\r
+import time\r
+import xml.etree.ElementTree as etree\r
+\r
+from BaseLib.Core.Search.KeywordSearch import KeywordSearch\r
+from BaseLib.Core.Utilities.timeouturlopen import urlOpenTimeout\r
+\r
+\r
+class MetaFeedParser:\r
+    """ Parse an Atom feed that has Atom feeds as entries """\r
+    \r
+    def __init__(self,metafeedurl):\r
+        self.metafeedurl = metafeedurl\r
+        self.tree = None\r
+        \r
+    def parse(self):\r
+        self.feedurls = []\r
+        stream = urlOpenTimeout(self.metafeedurl,10)\r
+        self.tree = etree.parse(stream)\r
+        entries = self.tree.findall('{http://www.w3.org/2005/Atom}entry')\r
+        for entry in entries:\r
+            titleelement = entry.find('{http://www.w3.org/2005/Atom}title')\r
+            linkelement = entry.find('{http://www.w3.org/2005/Atom}link')\r
+            if linkelement is not None:\r
+                if linkelement.attrib['type'] == 'application/atom+xml':\r
+                    # Got feed\r
+                    feedurl = linkelement.attrib['href']\r
+                    self.feedurls.append(feedurl)\r
+                \r
+    def get_feedurls(self):\r
+        return self.feedurls\r
+\r
+\r
+class FeedParser:\r
+    \r
+    def __init__(self,feedurl):\r
+        self.feedurl = feedurl\r
+        self.tree = None\r
+        \r
+    def parse(self):\r
+        self.title2entrymap = {}\r
+        print >>sys.stderr,"feedp: Parsing",self.feedurl\r
+        stream = urlOpenTimeout(self.feedurl,10)\r
+        self.tree = etree.parse(stream)\r
+        entries = self.tree.findall('{http://www.w3.org/2005/Atom}entry')\r
+        for entry in entries:\r
+            titleelement = entry.find('{http://www.w3.org/2005/Atom}title')\r
+            #print >> sys.stderr,"feedp: Got title",titleelement.text\r
+            self.title2entrymap[titleelement.text] = entry\r
+                \r
+    def search(self,searchstr):\r
+        """ Use Jelle's smart keyword search """\r
+        needles = searchstr.strip().split(' ')\r
+        \r
+        haystack = []\r
+        for title,entry in self.title2entrymap.iteritems():\r
+            record = {}\r
+            record['name'] = title\r
+            record['entry'] = entry\r
+            haystack.append(record)\r
+            \r
+        records = KeywordSearch().search(haystack,needles)\r
+        hits = []\r
+        for record in records:\r
+            hits.append(record['entry'])\r
+        return hits\r
+\r
+\r
+def feedhits2atomxml(feedhits,searchstr,urlpathprefix):\r
+    \r
+    new_feed = etree.Element('{http://www.w3.org/2005/Atom}feed',attrib={'xmlns:rdf':"http://www.w3.org/1999/02/22-rdf-syntax-ns#", 'xmlns:sy':"http://purl.org/rss/1.0/modules/syndication/", 'xmlns:dc':"http://purl.org/dc/elements/1.1/", 'xmlns:p2pnext':"urn:p2pnext:contentfeed:2009", 'xmlns:taxo':"http://purl.org/rss/1.0/modules/taxonomy/"})\r
+    \r
+    title = etree.SubElement(new_feed, 'title')\r
+    title.text = 'Hits for '+searchstr\r
+\r
+    link = etree.SubElement(new_feed, 'link',attrib={'rel':'self','href':urlpathprefix})\r
+    author = etree.SubElement(new_feed, 'author')\r
+    name = etree.SubElement(author, 'name')\r
+    name.text = 'NSSA'\r
+    id = etree.SubElement(new_feed, 'id')\r
+    id.text = 'urn:nssa'\r
+    updated = etree.SubElement(new_feed, 'updated')\r
+    updated.text = now2formatRFC3339()\r
+    #TODO image = etree.SubElement(new_feed,'p2pnext:image',attrib={'src':"http://p2pnextfeed1.rad0.net/images/bbc.png"})\r
+\r
+    for entry in feedhits:\r
+        new_feed.append(entry)\r
+    \r
+    atom = '<?xml version="1.0" encoding="UTF-8"?>\n'\r
+    atom += etree.tostring(new_feed)\r
+    # Parser anomaly / formally correct bla bla\r
+    atom = atom.replace(":ns0=","=")\r
+    atom = atom.replace("ns0:","") \r
+    return atom\r
+\r
+def now2formatRFC3339():\r
+    formatstr = "%Y-%m-%dT%H:%M:%S"\r
+    s = time.strftime(formatstr, time.gmtime())\r
+    s += 'Z'\r
+    return s \r
+\r
+\r
+\r
+if __name__ == '__main__':\r
+    searchstr = "Episode"\r
+    \r
+    metafp = MetaFeedParser('http://p2pnextfeed1.rad0.net/content/feed/bbc')\r
+    metafp.parse()\r
+    \r
+    allhits = []\r
+    for feedurl in metafp.get_feedurls():\r
+        feedp = FeedParser(feedurl)\r
+        feedp.parse()\r
+        hits = feedp.search(searchstr)\r
+        allhits.extend(hits)\r
+    \r
+    #print >>sys.stderr,"Got hits",`hits`\r
+    \r
+    for hitentry in allhits:\r
+        titleelement = hitentry.find('{http://www.w3.org/2005/Atom}title')\r
+        print >>sys.stderr,"Got hit",titleelement.text\r
+        \r
+    atomxml = feedhits2atomxml(allhits,searchstr,"http://localhost/bla")\r
+    print >>sys.stderr,"Result feed",atomxml\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/BackgroundProcess.py b/instrumentation/next-share/BaseLib/Plugin/BackgroundProcess.py
new file mode 100644 (file)
index 0000000..bcacc60
--- /dev/null
@@ -0,0 +1,959 @@
+# Written by Arno Bakker, Diego Rabioli
+# see LICENSE.txt for license information
+#
+# Implements the BackgroundProcess, i.e. SwarmEngine for SwarmPlugin and 
+# SwarmTransport=SwarmPlayer v2. See Plugin/SwarmEngine.py and Transport/SwarmEngine.py
+# for main startup.
+#
+# The BackgroundProcess shares a base class BaseApp with the SwarmPlayer v1,
+# which is a standalone P2P-based video player.
+#
+#
+# Notes: 
+# - Implement play while hashcheck?
+#        Not needed when proper shutdown & restart was done.
+# - load_checkpoint with DLSTATUS_DOWNLOADING for Plugin? 
+#        Nah, if we start BG when plugin started we have a video to play soon,
+#        so start others in STOPPED state (rather than switching them all
+#        to off and restart one in VOD mode just after)
+#
+
+# History:
+#
+# NSSA API 1.0.2
+#
+#  1.0.2    Added STOP message to tell plugin to stop playing the current item
+#           (needed to support new behaviour where control conn is not always
+#           shutdown anymore to support input.set_p2ptarget.
+#
+#           Added ERROR message to tell plugin NSSA won't be able to serve the
+#           content requested via START (for <video> support).    
+#
+#  1.0.1    Added INFO message to convey NSSA info to plugin for providing 
+#           feedback to the user.
+#
+# NSPlugin JavaScript API 1.0.2
+#
+#  1.0.2    Added input.set_p2ptarget() method to switch the TorrentDef currently
+#           playing. Released in M24.1
+#
+#  1.0.1    Added input.p2pstatus read-only property giving the latest status as
+#           reported by the NSSA. Released in M24.
+#
+#  1.0.0    Copy of VLC's Javascript interface
+#
+# 
+# modify the sys.stderr and sys.stdout for safe output
+import BaseLib.Debug.console
+
+import os
+import sys
+import time
+import random
+import binascii
+import tempfile
+import urllib
+from cStringIO import StringIO
+from base64 import b64encode, encodestring, decodestring
+from traceback import print_exc,print_stack
+from threading import Thread,currentThread,Lock
+
+if sys.platform == "win32":
+    try:
+        import win32event
+        import win32api
+    except:
+        pass
+
+try:
+    import wxversion
+    wxversion.select('2.8')
+except:
+    pass
+import wx
+
+from BaseLib.Core.API import *
+from BaseLib.Core.osutils import *
+from BaseLib.Core.Utilities.utilities import get_collected_torrent_filename
+from BaseLib.Utilities.LinuxSingleInstanceChecker import *
+from BaseLib.Utilities.Instance2Instance import InstanceConnectionHandler,InstanceConnection, Instance2InstanceClient
+from BaseLib.Utilities.TimedTaskQueue import TimedTaskQueue
+from BaseLib.Player.BaseApp import BaseApp
+from BaseLib.Player.swarmplayer import get_status_msgs
+from BaseLib.Plugin.defs import *
+from BaseLib.Plugin.Search import *
+from BaseLib.Plugin.AtomFeedParser import *
+
+from BaseLib.Video.defs import *
+from BaseLib.Video.utils import videoextdefaults
+from BaseLib.Video.VideoServer import VideoHTTPServer,MultiHTTPServer
+from BaseLib.Video.Ogg import is_ogg,OggMagicLiveStream
+
+from BaseLib.Core.Statistics.Status import Status, LivingLabReporter
+from BaseLib.WebUI.WebUI import WebIFPathMapper
+from BaseLib.Core.ClosedSwarm.ClosedSwarm import InvalidPOAException
+
+
+DEBUG = True
+PHONEHOME = True
+
+ALLOW_MULTIPLE = False
+
+KILLONIDLE = False
+IDLE_BEFORE_SELFKILL = 60.0 # Number of seconds 
+
+
+class BackgroundApp(BaseApp):
+
+    def __init__(self, redirectstderrout, appname, appversion, params, single_instance_checker, installdir, i2iport, sport, httpport):
+
+        # Almost generic HTTP server
+        self.videoHTTPServer = VideoHTTPServer(httpport)
+        self.videoHTTPServer.register(self.videoservthread_error_callback,self.videoservthread_set_status_callback)
+
+        BaseApp.__init__(self, redirectstderrout, appname, appversion, params, single_instance_checker, installdir, i2iport, sport)
+        self.httpport = httpport
+        
+        # SEARCH:P2P
+        # Maps a query ID to the original searchstr, timestamp and all hits (local + remote)
+        self.id2hits = Query2HitsMap()
+        
+        # Maps a URL path received by HTTP server to the requested resource,
+        # reading or generating it dynamically.
+        #
+        # For saving .torrents received in hits to P2P searches using
+        # SIMPLE+METADATA queries
+        self.tqueue = TimedTaskQueue(nameprefix="BGTaskQueue")
+        self.searchmapper = SearchPathMapper(self.s,self.id2hits,self.tqueue)
+        self.hits2anypathmapper = Hits2AnyPathMapper(self.s,self.id2hits)
+        
+        self.videoHTTPServer.add_path_mapper(self.searchmapper)
+        self.videoHTTPServer.add_path_mapper(self.hits2anypathmapper)
+
+        # WEB Interface        
+        # Maps a URL path received by HTTP server to the requested resource,
+        # reading or generating it dynamically.
+        self.webIFmapper = WebIFPathMapper(self, self.s)
+        
+        self.videoHTTPServer.add_path_mapper(self.webIFmapper)
+
+        # Generic HTTP server start. Don't add mappers dynamically afterwards!
+        self.videoHTTPServer.background_serve()
+
+        # Maps Downloads to a using InstanceConnection and streaminfo when it 
+        # plays. So it contains the Downloads in VOD mode for which there is
+        # active interest from a plugin.
+        #
+        # At the moment each Download is used/owned by a single IC and a new
+        # request for the same torrent will stop playback to the original IC
+        # and resume it to the new user.
+        #
+        self.dusers = {}   
+        self.approxplayerstate = MEDIASTATE_STOPPED
+
+        self.counter = 0 # counter for the stats reported periodically
+        self.interval = 120 # report interval
+        self.iseedeadpeople = False
+        
+        if sys.platform == "win32":
+            # If the BG Process is started by the plug-in notify it with an event
+            try:
+                startupEvent = win32event.CreateEvent( None, 0, 0, 'startupEvent' )
+                win32event.SetEvent( startupEvent )
+                win32api.CloseHandle( startupEvent ) # TODO : is it possible to avoid importing win32api just to close an handler?
+            except:
+                pass
+
+    def OnInit(self):
+        try:
+            # Do common initialization
+            BaseApp.OnInitBase(self)
+            
+            # Arno, 2010-07-15: We try to detect browser presence by looking
+            # at get_speed_info JSON request from Firefox statusbar. However.
+            # these calls are unreliable, i.e., somethings the XmlHTTPRequest
+            # at the client doesn't reach the server, although the server is
+            # capable of replying to the request. Hence, we disable self-destruct
+            # for now.
+            if KILLONIDLE:
+                print >>sys.stderr,"bg: Kill-on-idle test enabled"
+                self.i2is.add_task(self.i2i_kill_on_browser_gone,IDLE_BEFORE_SELFKILL/2)
+            else:
+                print >>sys.stderr,"bg: Kill-on-idle test disabled"
+            
+            print >>sys.stderr,"bg: Awaiting commands"
+            return True
+
+        except Exception,e:
+            print_exc()
+            self.show_error(str(e))
+            self.OnExit()
+            return False
+
+
+    # Arno: SEARCH: disable overlay for now
+    # Also need to ensure that *stats*db SQL scripts are copied along during
+    # build and crap.
+    """
+    def configure_session(self):
+        # Leave buddycast, etc. enabled for SEARCH
+        self.sconfig.set_social_networking(False)
+        self.sconfig.set_bartercast(False)
+        self.sconfig.set_crawler(False) # Arno: Cleanup million stats dbs first
+    """
+
+    #
+    # InstanceConnectionHandler interface. Called by Instance2InstanceThread
+    #
+    def external_connection_made(self,s):
+        ic = BGInstanceConnection(s,self,self.readlinecallback,self.videoHTTPServer)
+        self.singsock2ic[s] = ic
+        if DEBUG:
+            print >>sys.stderr,"bg: Plugin connection_made",len(self.singsock2ic),"++++++++++++++++++++++++++++++++++++++++++++++++"
+          
+        # Arno: Concurrency problems getting SEARCHURL message to work, 
+        # JavaScript can't always read it. TODO  
+        ##ic.searchurl(self.searchurl)
+
+    def connection_lost(self,s):
+        if DEBUG:
+            print >>sys.stderr,"bg: Plugin: connection_lost ------------------------------------------------" 
+
+        ic = self.singsock2ic[s]
+        InstanceConnectionHandler.connection_lost(self,s)
+        wx.CallAfter(self.gui_connection_lost,ic)
+        
+    def gui_connection_lost(self,ic,switchp2ptarget=False):
+        # Find which download ic was interested in
+        d2remove = None
+        for d,duser in self.dusers.iteritems():
+            if duser['uic'] == ic:
+                duser['uic'] = None
+                d2remove = d
+                break
+        
+        # IC may or may not have been shutdown:
+        # Not: sudden browser crashes
+        # Yes: controlled stop via ic.shutdown()
+        try:
+            if switchp2ptarget:
+                ic.cleanup_playback() # idempotent
+            else:
+                ic.shutdown() # idempotent
+        except:
+            print_exc()
+        
+        if d2remove is not None:
+            # For VOD, apply cleanup policy to the Download, but only 
+            # after X seconds so if the plugin comes back with a new 
+            # request for the same stuff we can give it to him pronto. 
+            # This is expected to happen a lot due to page reloads / 
+            # history navigation.
+            #
+            # Arno, 2010-08-01: Restored old behaviour for live. Zapping
+            # more important than extra robustness.
+            #
+            d_delayed_remove_if_lambda = lambda:self.i2ithread_delayed_remove_if_not_complete(d2remove)
+            # h4x0r, abuse Istance2Instance server task queue for the delay
+            self.i2is.add_task(d_delayed_remove_if_lambda,10.0)
+        
+    def i2ithread_delayed_remove_if_not_complete(self,d2remove):
+        if DEBUG:
+            print >>sys.stderr,"bg: i2ithread_delayed_remove_if_not_complete"
+        d2remove.set_state_callback(self.sesscb_remove_playing_callback)
+        
+    def remove_playing_download(self,d2remove):
+        """ Called when sesscb_remove_playing_callback has determined that
+        we should remove this Download, because it would take too much
+        bandwidth to download it. However, we must check in another user has not
+        become interested. 
+        """
+        if DEBUG:
+            print >>sys.stderr,"bg: remove_playing_download @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
+        if d2remove in self.dusers:
+            duser = self.dusers[d2remove]
+            if duser['uic'] is None:
+                # No interest
+                if DEBUG:
+                    print >>sys.stderr,"bg: remove_playing_download: Yes, no interest"
+                BaseApp.remove_playing_download(self,d2remove)
+                if 'streaminfo' in duser:
+                    stream = duser['streaminfo']['stream']
+                    stream.close() # Close original stream. 
+                del self.dusers[d2remove]
+            elif DEBUG:
+                print >>sys.stderr,"bg: remove_playing_download: No, someone interested",`duser['uic']`
+
+        
+    def i2ithread_readlinecallback(self,ic,cmd):
+        """ Called by Instance2Instance thread """
+        wx.CallAfter(self.gui_readlinecallback,ic,cmd)
+
+    def split_params(self, url):
+        """
+        Returns a touple (path, {name:value}) where the map can be empty.
+        Example: "/path?p1=v1&p2=v2" -> ('/path', {"p1":"v1", "p2":"v2"})
+        """
+        params = {}
+        idx = url.find("?")
+        if idx > -1:
+            _params = url[idx+1:].split("&")
+            url = url[:idx]
+            for param in _params:
+                if param.find("=") == -1:
+                    continue # Not a parameter
+                (name, value) = param.split("=", 1)
+                params[name] = value
+        return (url, params)
+        
+    def gui_readlinecallback(self,ic,cmd):
+        """ Receive command from Plugin """
+        
+        if DEBUG:
+            print >>sys.stderr,"bg: Got command:",cmd
+        try:
+            # START command
+            if cmd.startswith( 'START' ):
+                torrenturl = cmd.partition( ' ' )[2]
+                if torrenturl is None:
+                    raise ValueError('bg: Unformatted START command')
+                else:
+                    # SWITCHP2PTARGET: See if already downloading/playing something
+                    items = self.dusers.items() 
+                    for d,duser in items:
+                        if duser['uic'] == ic:
+                            # Stop current
+                            self.gui_connection_lost(ic,switchp2ptarget=True)
+                    
+                    # Here we need to drag the POA off the torrenturl,
+                    # if one is given
+                    (url, params) = self.split_params(torrenturl)
+                    if "poa" in params:
+                        poa_serialized = decodestring(params["poa"])
+                        try:
+                            poa = ClosedSwarm.POA.deserialize(poa_serialized)
+                            poa.verify()
+                        except:
+                            print >>sys.stderr,"Bad POA, ignoring"
+                            poa = None
+                    else:
+                        poa = None
+                        url = torrenturl
+                    self.get_torrent_start_download(ic,url,poa=poa)
+        
+            # SHUTDOWN command
+            elif cmd.startswith('SHUTDOWN'):
+                print >>sys.stderr,"bg: Got SHUTDOWN, sending SHUTDOWN"
+                ic.shutdown()
+            elif cmd.startswith('SUPPORTS'):
+                # Arno, 2010-06-15: only used by SwarmTransport at the moment
+                # to convey it cannot pause.
+                ic.set_supported_vod_events([VODEVENT_START])
+            else:
+                raise ValueError('bg: Unknown command: '+cmd)
+        except Exception,e:
+            print_exc()
+            # Arno, 2010-05-27: Don't kill Control connection, for set_p2ptarget
+            ic.error(str(e))
+            ic.cleanup_playback()
+    
+    def get_torrent_start_download(self,ic,url,poa=None):
+        """ Retrieve torrent file from url and start it in VOD mode, if not already """
+        
+        if url.endswith(".html"):
+            # Search mode, in which case URL is apparently the base URL of the search page.
+            # Just to keep exception trace away.
+            return
+            
+        tdef  = TorrentDef.load_from_url(url)
+        
+        # tdef.input['announce'] = "http://dead.globe.cs.vu.nl:6969/announce"
+        #tdef.metainfo['announce'] = "http://dead.globe.cs.vu.nl:6969/announce"
+        
+        # Select which video to play (if multiple)
+        if tdef.get_live():
+            videofiles = tdef.get_files()
+        else:
+            videofiles = tdef.get_files(exts=videoextdefaults)
+        if len(videofiles) == 1:
+            dlfile = videofiles[0]
+        elif len(videofiles) == 0:
+            raise ValueError("bg: get_torrent_start_download: No video files found! Giving up")
+        elif len(videofiles) > 1:
+            raise ValueError("bg: get_torrent_start_download: Too many files found! Giving up")
+
+        if DEBUG:
+            print >>sys.stderr,"bg: get_torrent_start_download: Found video file",dlfile
+
+        # Closed swarms?
+        if not poa:
+            if tdef.get_cs_keys():
+                # This is a closed swarm, try to get a POA
+                poa = self._get_poa(tdef)
+
+        infohash = tdef.get_infohash()
+        oldd = None
+        for d in self.s.get_downloads():
+            if d.get_def().get_infohash() == infohash:
+                oldd = d
+                break
+        
+        #
+        # Start a new Download, or if it already exists, start playback from
+        # beginning. This means that we don't currently support two ICs
+        # playing the same video. That is, two browser windows cannot play the
+        # same video.
+        #
+        if oldd is None or (oldd not in self.downloads_in_vodmode):
+            # New Download, or Download exists, but not in VOD mode, restart
+          
+            if DEBUG:
+                if oldd is None:
+                    print >>sys.stderr,"bg: get_torrent_start_download: Starting new Download"
+                else:
+                    print >>sys.stderr,"bg: get_torrent_start_download: Restarting old Download in VOD mode"
+            
+            d = self.start_download(tdef,dlfile,poa,ic.get_supported_vod_events())
+            duser = {'uic':ic}
+            self.dusers[d] = duser
+        else:
+            # oldd is already running in VOD mode. If it's a VOD torrent we
+            # don't need to restart, we can just seek(0) on the stream.
+            # If it's a live torrent, we should tell EOF to any old IC and
+            # continue playback to the new IC where it left off.
+            #
+            duser = self.dusers[d]
+            olduic = duser['uic']
+            if olduic is not None:
+                # Cleanup like a shutdown, but send STOP
+                print >>sys.stderr,"bg: get_torrent_start_download: Telling old player to stop"
+                olduic.cleanup_playback()
+                olduic.stop()
+            duser['uic'] = ic
+            if 'streaminfo' not in duser:
+                # Hasn't started playing yet, ignore.
+                pass
+            else:
+                # Already playing. Tell previous owner IC to quit, let new IC 
+                # start either from start (VOD) or where previous left off 
+                # (live).
+                if not tdef.get_live():
+                    duser['streaminfo']['stream'].seek(0)
+                ic.set_streaminfo(duser['streaminfo'])
+                
+                ic.start_playback(infohash)
+                
+        duser['said_start_playback'] = False
+        duser['decodeprogress'] = 0
+        
+    #
+    # DownloadStates
+    #
+    def gui_states_callback(self,dslist,haspeerlist):
+        """ Override BaseApp """
+        #print >>sys.stderr,"bg: gui_states_callback",currentThread().getName()
+
+        (playing_dslist,totalhelping,totalspeed) = BaseApp.gui_states_callback(self,dslist,haspeerlist)
+        try:
+            self.report_periodic_vod_stats(playing_dslist)
+        except:
+            print_exc()
+       
+        for ds in playing_dslist:
+            d = ds.get_download()
+            duser = self.dusers[d]
+            uic = duser['uic']
+            if uic is not None:
+                # Generate info string for all
+                [topmsg,msg,duser['said_start_playback'],duser['decodeprogress']] = get_status_msgs(ds,self.approxplayerstate,self.appname,duser['said_start_playback'],duser['decodeprogress'],totalhelping,totalspeed)
+                info = msg
+                #if DEBUG:
+                #    print >>sys.stderr, 'bg: 4INFO: Sending',info
+                uic.info(info)
+            
+    def sesscb_vod_event_callback( self, d, event, params ):
+        """ Registered by BaseApp. Called by SessionCallbackThread """
+        wx.CallAfter(self.gui_vod_event_callback,d,event,params)
+        
+    def gui_vod_event_callback( self, d, event, params ):
+        if DEBUG:
+            print >>sys.stderr,"bg: gui_vod_event_callback: Event: ", event
+            print >>sys.stderr,"bg: gui_vod_event_callback: Params: ", params
+        if event == VODEVENT_START:
+            if params['filename']:
+                stream = open( params['filename'], "rb" )
+            else:
+                stream = params['stream']
+    
+            # Ric: small hack for the ogg mimetype (just for windows, 
+            # linux thinks it's an audio/ogg file)
+            if params['mimetype'] == 'video/x-ogg':
+                params['mimetype'] = 'application/ogg'
+                
+            # Arno: My Win7 thinks this is 'video/mpeg', so patch for that.  
+            selectedfiles = d.get_selected_files()
+            if selectedfiles is not None and len(selectedfiles) > 0:
+                for fn in selectedfiles:
+                    if is_ogg(fn):
+                        params['mimetype'] = 'application/ogg'
+            else:
+                name = d.get_def().get_name_as_unicode()
+                if is_ogg(name):
+                    params['mimetype'] = 'application/ogg'
+                    
+                    if d.get_def().get_live():
+                        # Live Ogg stream. To support this we need to do
+                        # two things:
+                        # 1. Write Ogg headers (stored in .tstream)
+                        # 2. Find first Ogg page in stream.
+                        stream = OggMagicLiveStream(d.get_def(),stream)
+
+            if not d.get_def().get_live() and not params['filename']:
+                # Arno, < 2010-08-10: Firefox reads aggressively, we just
+                # give it data at bitrate pace such that we know when we
+                # have to fallback to HTTP servers.
+                #
+                # 2010-08-10: not when file complete on disk ;-)
+                stream = AtBitrateStream( stream, params['bitrate'] )
+
+            
+            blocksize = d.get_def().get_piece_length()
+            #Ric: add svc on streaminfo, added bitrate
+            streaminfo = { 'mimetype': params['mimetype'], 'stream': stream, 'length': params['length'], 'blocksize':blocksize, 'svc': d.get_mode() == DLMODE_SVC, 'bitrate': params['bitrate'] }
+
+            duser = self.dusers[d]
+            duser['streaminfo'] = streaminfo
+            if duser['uic'] is not None:
+                # Only if playback wasn't canceled since starting
+                duser['uic'].set_streaminfo(duser['streaminfo'])
+                duser['uic'].start_playback(d.get_def().get_infohash())
+            
+                self.approxplayerstate = MEDIASTATE_PLAYING
+            else:
+                self.approxplayerstate = MEDIASTATE_STOPPED
+            
+        elif event == VODEVENT_PAUSE:
+            duser = self.dusers[d]
+            if duser['uic'] is not None:
+                duser['uic'].pause()
+            self.approxplayerstate = MEDIASTATE_PAUSED
+            
+        elif event == VODEVENT_RESUME:
+            duser = self.dusers[d]
+            if duser['uic'] is not None:
+                duser['uic'].resume()
+            self.approxplayerstate = MEDIASTATE_PLAYING
+
+
+    def get_supported_vod_events(self):
+        # See BGInstanceConnection.set_supported_vod_events() too.
+        return [ VODEVENT_START, VODEVENT_PAUSE, VODEVENT_RESUME ]
+
+    #
+    # VideoServer status/error reporting
+    #
+    def videoservthread_error_callback(self,e,url):
+        """ Called by HTTP serving thread """
+        wx.CallAfter(self.videoserver_error_guicallback,e,url)
+        
+    def videoserver_error_guicallback(self,e,url):
+        print >>sys.stderr,"bg: Video server reported error",str(e)
+        #    self.show_error(str(e))
+        pass
+        # ARNOTODO: schedule current Download for removal?
+
+    def videoservthread_set_status_callback(self,status):
+        """ Called by HTTP serving thread """
+        wx.CallAfter(self.videoserver_set_status_guicallback,status)
+
+    def videoserver_set_status_guicallback(self,status):
+        #print >>sys.stderr,"bg: Video server sets status callback",status
+        # ARNOTODO: Report status to plugin
+        pass
+
+    #
+    # reports vod stats collected periodically
+    #
+    def report_periodic_vod_stats(self,playing_dslist):
+        #print >>sys.stderr, "VOD Stats"
+        self.counter += 1
+        if self.counter%self.interval == 0:
+            event_reporter = Status.get_status_holder("LivingLab")
+            if event_reporter is not None:
+                for ds in playing_dslist:
+                    dw = ds.get_download()
+                    b64_infohash = b64encode(dw.get_def().get_infohash())
+                    vod_stats = ds.get_vod_stats()
+                    #if vod_stats_has_key("prebuf"): event_reporter.add_event(b64_infohash, "prebufp:%d" % vod_stats['prebuf']) # prebuffering time that was needed
+                    if vod_stats.has_key("stall"): event_reporter.create_and_add_event("stall", [b64_infohash, vod_stats['stall']]) # time the player stalled
+                    if vod_stats.has_key("late"): event_reporter.create_and_add_event("late", [b64_infohash, vod_stats['late']]) # number of pieces arrived after they were due
+                    if vod_stats.has_key("dropped"): event_reporter.create_and_add_event("dropped", [b64_infohash, vod_stats['dropped']]) # number of pieces lost
+                    if vod_stats.has_key("pos"): event_reporter.create_and_add_event("pos", [b64_infohash, vod_stats['pos']]) # playback position
+
+    def gui_webui_remove_download(self,d2remove):
+        """ Called when user has decided to remove a specific DL via webUI """
+        if DEBUG:
+            print >>sys.stderr,"bg: gui_webui_remove_download"
+        self.gui_webui_halt_download(d2remove,stop=False)
+
+
+    def gui_webui_stop_download(self,d2stop):
+        """ Called when user has decided to stop a specific DL via webUI """
+        if DEBUG:
+            print >>sys.stderr,"bg: gui_webui_stop_download"
+        self.gui_webui_halt_download(d2stop,stop=True)
+        
+        
+    def gui_webui_restart_download(self,d2restart):
+        """ Called when user has decided to restart a specific DL via webUI for sharing """
+        duser = {'uic':None}
+        self.dusers[d2restart] = duser
+        d2restart.restart()
+
+
+    def gui_webui_halt_download(self,d2halt,stop=False):
+        """ Called when user has decided to stop or remove a specific DL via webUI.
+        For stop the Download is not removed. """
+        if d2halt in self.dusers:
+            try:
+                duser = self.dusers[d2halt]
+                olduic = duser['uic'] 
+                if olduic is not None:
+                    print >>sys.stderr,"bg: gui_webui_halt_download: Oops, someone interested, removing anyway"
+                    olduic.shutdown()
+                if 'streaminfo' in duser:
+                    # Download was already playing, clean up.
+                    stream = duser['streaminfo']['stream']
+                    stream.close() # Close original stream.
+            finally: 
+                del self.dusers[d2halt]
+        if stop:
+            BaseApp.stop_playing_download(self,d2halt)
+        else:
+            BaseApp.remove_playing_download(self,d2halt)
+
+
+    def gui_webui_remove_all_downloads(self,ds2remove):
+        """ Called when user has decided to remove all DLs via webUI """
+        if DEBUG:
+            print >>sys.stderr,"bg: gui_webui_remove_all_downloads"
+            
+        for d2remove in ds2remove:
+            self.gui_webui_halt_download(d2remove,stop=False)
+            
+            
+    def gui_webui_stop_all_downloads(self,ds2stop):
+        """ Called when user has decided to stop all DLs via webUI """
+        if DEBUG:
+            print >>sys.stderr,"bg: gui_webui_stop_all_downloads"
+            
+        for d2stop in ds2stop:
+            self.gui_webui_halt_download(d2stop,stop=True)
+
+
+    def gui_webui_restart_all_downloads(self,ds2restart):
+        """ Called when user has decided to restart all DLs via webUI """
+        if DEBUG:
+            print >>sys.stderr,"bg: gui_webui_restart_all_downloads"
+            
+        for d2restart in ds2restart:
+            self.gui_webui_restart_download(d2restart)
+
+    def i2i_kill_on_browser_gone(self):
+        resched = True
+        try:
+            lastt = self.webIFmapper.lastreqtime
+            
+            print >>sys.stderr,"bg: Test for self destruct: idle",time.time()-lastt,currentThread().getName()
+            
+            if time.time() - IDLE_BEFORE_SELFKILL > lastt:
+                if self.iseedeadpeople:
+                    print >>sys.stderr,"bg: SHOULD HAVE self destructed, hardcore stylie"
+                    resched = False
+                    #os._exit(0)
+                else:
+                    print >>sys.stderr,"bg: SHOULD HAVE self destructed"
+                    self.iseedeadpeople = True
+                    # No sign of life from statusbar, self destruct 
+                    #wx.CallAfter(self.ExitMainLoop)            
+        finally:
+            if resched:
+                self.i2is.add_task(self.i2i_kill_on_browser_gone,IDLE_BEFORE_SELFKILL/2)
+
+
+class BGInstanceConnection(InstanceConnection):
+    
+    def __init__(self,singsock,connhandler,readlinecallback,videoHTTPServer):
+        InstanceConnection.__init__(self, singsock, connhandler, readlinecallback)
+        
+        self.bgapp = connhandler
+        self.videoHTTPServer = videoHTTPServer
+        self.urlpath = None
+        self.cstreaminfo = {}
+        self.shutteddown = False
+        self.supportedvodevents = [VODEVENT_START,VODEVENT_PAUSE,VODEVENT_RESUME]
+
+
+    def set_streaminfo(self,streaminfo):
+        """ Copy streaminfo contents and replace stream with a ControlledStream """
+        """
+        For each IC we create separate stream object and a unique path in the 
+        HTTP server. This avoids nasty thread synchronization with the server
+        when a new IC wants to play the same content. The Tribler Core stream
+        does not allow multiple readers. This means we would have to stop
+        the HTTP server from writing the stream to the old IC, before we
+        can allow the new IC to read.
+        
+        We solved this as follows. The original Tribler Core stream is
+        wrapped in a ControlledStream, one for each IC. When a new IC 
+        wants to play we tell the old IC's ControlledStream to generate
+        an EOF to the HTTP server, and tell the old IC to SHUTDOWN. We
+        then either rewind the Tribler Core stream (VOD) or leave it (live)
+        and tell the new IC to PLAY. The new ControlledStream will then
+        be read by the HTTP server again.
+        """
+        self.cstreaminfo.update(streaminfo)
+        stream = streaminfo['stream']
+        cstream = ControlledStream( stream )
+        self.cstreaminfo['stream'] = cstream
+
+    def start_playback(self,infohash):
+        """ Register cstream with HTTP server and tell IC to start reading """
+        
+        self.urlpath = URLPATH_CONTENT_PREFIX+'/'+infohash2urlpath(infohash)+'/'+str(random.random())
+
+        self.videoHTTPServer.set_inputstream(self.cstreaminfo,self.urlpath)
+        
+        if DEBUG:
+            print >> sys.stderr, "bg: Telling plugin to start playback of",self.urlpath
+        
+        self.write( 'PLAY '+self.get_video_url()+'\r\n' )
+
+    def cleanup_playback(self):
+        if DEBUG:
+            print >>sys.stderr,'bg: ic: cleanup'
+        # Cause HTTP server thread to receive EOF on inputstream
+        if len(self.cstreaminfo) != 0:
+            self.cstreaminfo['stream'].close()
+            try:
+                # TODO: get rid of del_inputstream lock
+                # Arno, 2009-12-11: Take this out of critical path on MainThread
+                http_del_inputstream_lambda = lambda:self.videoHTTPServer.del_inputstream(self.urlpath)
+                self.bgapp.tqueue.add_task(http_del_inputstream_lambda,0) 
+            except:
+                print_exc()
+        
+
+    def get_video_url(self):
+        return 'http://127.0.0.1:'+str(self.videoHTTPServer.get_port())+self.urlpath
+
+    def pause(self):
+        self.write( 'PAUSE\r\n' )
+        
+    def resume(self):
+        self.write( 'RESUME\r\n' )
+
+    def info(self,infostr):
+        self.write( 'INFO '+infostr+'\r\n' )        
+
+    # Arno, 2010-05-28: Convey the BGprocess won't be able to serve the content
+    def error(self,infostr):
+        self.write( 'ERROR '+infostr+'\r\n' )        
+
+    # Arno, 2010-05-27: Stop playback
+    def stop(self):
+        # Stop playback
+        self.write( 'STOP\r\n' )
+
+    def shutdown(self):
+        # SHUTDOWN Service
+        if DEBUG:
+            print >>sys.stderr,'bg: ic: shutdown'
+        if not self.shutteddown:
+            self.shutteddown = True
+            self.cleanup_playback()
+            
+            self.write( 'SHUTDOWN\r\n' )
+            # Will cause BaseApp.connection_lost() to be called, where we'll
+            # handle what to do about the Download that was started for this
+            # IC.
+            try:
+                self.close()
+            except:
+                print_exc()
+
+    def get_supported_vod_events(self):
+        return self.supportedvodevents
+    
+    def set_supported_vod_events(self,eventlist):
+        self.supportedvodevents = eventlist
+
+
+class ControlledStream:
+    """ A file-like object that throws EOF when closed, without actually closing
+    the underlying inputstream. See BGInstanceConnection.set_streaminfo() for
+    an explanation on how this is used. 
+    """
+    def __init__(self,stream):
+        self.stream = stream
+        self.done = False # Event()
+        
+    def read(self,nbytes=None):
+        if not self.done:
+            return self.stream.read(nbytes)
+        else:
+            return '' # EOF
+
+    def seek(self,pos,whence=os.SEEK_SET):
+        self.stream.seek(pos,whence)
+        
+    def close(self):
+        self.done = True
+        # DO NOT close original stream
+
+class AtBitrateStream:
+    """ Give from playback position plus a safe margin at video bitrate speed.
+        On seeking resync the playback position and the safe margin.
+    """
+
+    # Give at bitrate speed policy: give from playback position + SAFE_MARGIN_TIME
+    # at bitrate speed during STREAM_STATE_PLAYING, give at full speed during
+    # STREAM_STATE_PREBUFFER. STREAM_STATE_TRANSITION indicates that the playback has
+    # to start or that the user just seeked.
+
+    # Safe buffer size in seconds
+    SAFE_MARGIN_TIME = 10.0  # same as VideoOnDemand.py
+
+    # Increment the bitrate by percentage (give more bandwidth to the player).
+    BITRATE_SPEED_INCREMENT = 1.05 # +5%
+
+    # Streaming status
+    STREAM_STATE_TRANSITION = 0
+    STREAM_STATE_PREBUFFER  = 1
+    STREAM_STATE_PLAYING    = 2
+
+    def __init__( self, stream, bitrate ):
+        self.stream = stream
+        self.done = False # Event()
+        self.bitrate = bitrate
+        self.safe_bytes = self.SAFE_MARGIN_TIME * bitrate
+        self.stream_state = self.STREAM_STATE_TRANSITION
+        self.last_time = 0.0
+        self.playback = 0.0
+        self.given_bytes_till = 0
+
+    def has_to_sleep( self, nbytes ):
+        curr_time = time.time()
+        if self.stream_state is self.STREAM_STATE_TRANSITION:
+            self.last_time = curr_time
+            elapsed_time = 0.0
+            self.stream_state = self.STREAM_STATE_PREBUFFER
+        else:
+            elapsed_time = curr_time - self.last_time
+            self.last_time = curr_time
+
+        self.playback += elapsed_time * self.BITRATE_SPEED_INCREMENT
+        if self.stream_state is self.STREAM_STATE_PREBUFFER:
+            played_bytes = self.playback * self.bitrate
+            if played_bytes + self.safe_bytes <= self.given_bytes_till:
+                self.stream_state = self.STREAM_STATE_PLAYING
+            self.given_bytes_till += nbytes
+            return 0.0
+        else:
+            delta_time = ( self.given_bytes_till / float( self.bitrate ) ) - ( self.playback + self.SAFE_MARGIN_TIME )
+            if delta_time <= 0.0:
+                self.stream_state = self.STREAM_STATE_PREBUFFER
+            self.given_bytes_till += nbytes
+            return max( 0.0, delta_time )
+
+    def read(self,nbytes=None):
+        if not self.done:
+            to_give = self.stream.read( nbytes )
+            sleep_time = self.has_to_sleep( nbytes )
+            #print >>sys.stderr,"DIEGO DEBUG : SLEEP_time", sleep_time
+            if sleep_time > 0.0:
+                time.sleep( sleep_time )
+            return to_give
+        else:
+            return '' # EOF
+
+    def seek(self,pos,whence=os.SEEK_SET):
+        self.stream.seek(pos,whence)
+        self.stream_state = self.STREAM_STATE_TRANSITION
+        self.given_bytes_till = pos
+        self.playback = pos / float( self.bitrate )
+        
+    def close(self):
+        self.done = True
+        # DO NOT close original stream
+
+
+##############################################################
+#
+# Main Program Start Here
+#
+##############################################################
+def run_bgapp(appname,appversion,i2iport,sessport,httpport, params = None,killonidle=False):
+    """ Set sys.argv[1] to "--nopause" to inform the Core that the player
+    doesn't support VODEVENT_PAUSE, e.g. the SwarmTransport.
+    """ 
+    if params is None:
+        params = [""]
+    
+    if len(sys.argv) > 1:
+        params = sys.argv[1:]
+
+    global KILLONIDLE
+    KILLONIDLE = killonidle
+
+    """
+    # Create single instance semaphore
+    # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears
+    # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc()
+    #
+    if sys.platform != 'linux2':
+        single_instance_checker = wx.SingleInstanceChecker(appname+"-"+ wx.GetUserId())
+    else:
+        single_instance_checker = LinuxSingleInstanceChecker(appname)
+    """
+    # Arno, 2010-03-05: This is a vital print that must not be removed, otherwise
+    # the program will just say "15:29:02: Deleted stale lock file '/home/arno/SwarmPlugin-arno'"
+    # and exit after a restart of the instance :-(
+    #
+    print >>sys.stderr,"bg: Test if already running"
+    single_instance_checker = wx.SingleInstanceChecker(appname+"-"+ wx.GetUserId())
+    if single_instance_checker.IsAnotherRunning():
+        print >>sys.stderr,"bg: Already running, exit"
+        os._exit(0)
+
+    arg0 = sys.argv[0].lower()
+    if arg0.endswith('.exe'):
+        installdir = os.path.abspath(os.path.dirname(sys.argv[0]))
+    else:
+        installdir = os.getcwd()  
+
+    # Launch first single instance
+    app = BackgroundApp(0, appname, appversion, params, single_instance_checker, installdir, i2iport, sessport, httpport)
+    s = app.s
+
+    # Enable P2P-Next ULANC logging.
+    if PHONEHOME: 
+        status = Status.get_status_holder("LivingLab")
+        id = encodestring(s.get_permid()).replace("\n","")
+        reporter = LivingLabReporter.LivingLabPeriodicReporter("Living lab CS reporter", 300, id) # Report every 5 minutes 
+        status.add_reporter(reporter)
+
+    app.MainLoop()
+
+    if PHONEHOME:
+        reporter.stop()
+
+    print >>sys.stderr,"Sleeping seconds to let other threads finish"
+    time.sleep(2)
+
+    if not ALLOW_MULTIPLE:
+        del single_instance_checker
+        
+    # Ultimate catchall for hanging popen2's and what not
+    os._exit(0)
+
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/SwarmPlugin.inf b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/SwarmPlugin.inf
new file mode 100644 (file)
index 0000000..27bd385
--- /dev/null
@@ -0,0 +1,26 @@
+; Version number and signature of INF file.\r
+; Written by Diego Andres Rabaioli\r
+; see LICENSE.txt for license information\r
+;\r
+[version]\r
+  signature="$CHICAGO$"\r
+  AdvancedINF=2.0\r
+\r
+[Add.Code]\r
+  axvlc.dll=axvlc.dll\r
+  SwarmPlugin_1.1.0.exe=SwarmPlugin_1.1.0.exe\r
+\r
+[axvlc.dll]\r
+  FileVersion=1,1,0,0\r
+  clsid={1800B8AF-4E33-43C0-AFC7-894433C13538}\r
+  RegisterServer=no\r
+  Hook=runinstaller\r
+\r
+[SwarmPlugin_1.1.0.exe]\r
+  FileVersion=1,1,0,0\r
+  file-win32-x86=http://trial.p2p-next.org/download/SwarmPlugin_1.1.0.exe\r
+\r
+[runinstaller]\r
+  run=%EXTRACT_DIR%\SwarmPlugin_1.1.0.exe\r
+\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/SwarmPlugin_IE.inf b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/SwarmPlugin_IE.inf
new file mode 100644 (file)
index 0000000..bc4ff18
--- /dev/null
@@ -0,0 +1,26 @@
+; Version number and signature of INF file.\r
+; Written by Diego Andres Rabaioli\r
+; see LICENSE.txt for license information\r
+;\r
+[version]\r
+  signature="$CHICAGO$"\r
+  AdvancedINF=2.0\r
+\r
+[Add.Code]\r
+  axvlc.dll=axvlc.dll\r
+  SwarmPlugin_1.1.0.exe=SwarmPlugin_IE_1.1.0.exe\r
+\r
+[axvlc.dll]\r
+  FileVersion=1,1,0,0\r
+  clsid={1800B8AF-4E33-43C0-AFC7-894433C13538}\r
+  RegisterServer=no\r
+  Hook=runinstaller\r
+\r
+[SwarmPlugin_1.1.0.exe]\r
+  FileVersion=1,1,0,0\r
+  file-win32-x86=http://trial.p2p-next.org/download/SwarmPlugin_IE_1.1.0.exe\r
+\r
+[runinstaller]\r
+  run=%EXTRACT_DIR%\SwarmPlugin_IE_1.1.0.exe\r
+\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/heading.bmp b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/heading.bmp
new file mode 100644 (file)
index 0000000..7bdbfcd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/heading.bmp differ
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/setupBGexe.py b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/setupBGexe.py
new file mode 100644 (file)
index 0000000..ed810d6
--- /dev/null
@@ -0,0 +1,17 @@
+# Written by Diego Rabioli, Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# Run from console: "python createBGexe.py py2exe"\r
+import os\r
+\r
+from distutils.core import setup\r
+import py2exe # Not a superfluous import!\r
+\r
+from BaseLib.__init__ import LIBRARYNAME\r
+mainfile = os.path.join(LIBRARYNAME,'Plugin','SwarmEngine.py')\r
+\r
+# Arno: 2009-06-09: changed from console= to make sure py2exe writes\r
+# a BackgroundProcess.exe.log\r
+#\r
+setup(windows=[mainfile]) \r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin.exe.manifest b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin.exe.manifest
new file mode 100644 (file)
index 0000000..1082ce2
--- /dev/null
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\r
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">\r
+       <assemblyIdentity\r
+          version="0.1.0.0"\r
+          processorArchitecture="X86"\r
+          name="Microsoft.Winweb.SwarmPlugin"\r
+          type="win32"\r
+       />\r
+       <description>SwarmPlugin for Internet Explorer</description>\r
+       <dependency>\r
+          <dependentAssembly>\r
+            <assemblyIdentity\r
+              type="win32"\r
+              name="Microsoft.Windows.Common-Controls"\r
+              version="6.0.0.0"\r
+              processorArchitecture="X86"\r
+              publicKeyToken="6595b64144ccf1df"\r
+              language="*"\r
+            />\r
+          </dependentAssembly>\r
+       </dependency>\r
+</assembly>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin.nsi b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin.nsi
new file mode 100644 (file)
index 0000000..e3aa53b
--- /dev/null
@@ -0,0 +1,195 @@
+!define PRODUCT "SwarmPlugin"\r
+!define VERSION "1.1.0"\r
+!define BG "bgprocess"\r
+\r
+!include "MUI.nsh"\r
+\r
+\r
+;--------------------------------\r
+;Configuration\r
+\r
+;General\r
+Name "${PRODUCT} ${VERSION}"\r
+OutFile "${PRODUCT}_${VERSION}.exe"\r
+\r
+;Folder selection page\r
+InstallDir "$PROGRAMFILES\${PRODUCT}"\r
\r
+;Remember install folder\r
+InstallDirRegKey HKCU "Software\${PRODUCT}" ""\r
+\r
+;\r
+; Uncomment for smaller file size\r
+;\r
+SetCompressor "lzma"\r
+;\r
+; Uncomment for quick built time\r
+;\r
+;SetCompress "off"\r
+\r
+CompletedText "Installation completed. Thank you for choosing ${PRODUCT}"\r
+\r
+BrandingText "${PRODUCT}"\r
+\r
+;--------------------------------\r
+;Modern UI Configuration\r
+\r
+!define MUI_ABORTWARNING\r
+!define MUI_HEADERIMAGE\r
+!define MUI_HEADERIMAGE_BITMAP "heading.bmp"\r
+\r
+;--------------------------------\r
+;Pages\r
+\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept"\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline"\r
+;   !define MUI_FINISHPAGE_RUN "$INSTDIR\swarmplayer.exe"\r
+\r
+!insertmacro MUI_PAGE_LICENSE "binary-LICENSE.txt"\r
+!insertmacro MUI_PAGE_COMPONENTS\r
+!insertmacro MUI_PAGE_DIRECTORY\r
+!insertmacro MUI_PAGE_INSTFILES\r
+!insertmacro MUI_PAGE_FINISH\r
+\r
+!insertmacro MUI_UNPAGE_CONFIRM\r
+!insertmacro MUI_UNPAGE_INSTFILES\r
+\r
+;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp"\r
+\r
+;--------------------------------\r
+;Languages\r
+\r
+!insertmacro MUI_LANGUAGE "English"\r
\r
+;--------------------------------\r
+;Language Strings\r
+\r
+;Description\r
+LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}"\r
+LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts"\r
+\r
+;--------------------------------\r
+;Installer Sections\r
+\r
+Section "!Main EXE" SecMain\r
+ SectionIn RO\r
+ SetOutPath "$INSTDIR"\r
+ File *.txt\r
+  ; TODO : add checkbox for IE and Fx\r
+ File activex\axvlc.dll\r
+ File mozilla\npvlc.dll\r
+ File *.dll\r
+ File activex\axvlc.dll.manifest\r
+ File mozilla\npvlc.dll.manifest\r
+ File *.dll.manifest\r
+\r
+ File /r bgprocess\r
+\r
+ File /r plugins\r
+ File /r locale\r
+ File /r osdmenu\r
+ File /r http\r
+\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "BGProcessPath" "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "InstallDir" "$INSTDIR"\r
+\r
+ ; Register IE Plug-in\r
+ RegDLL "$INSTDIR\axvlc.dll"\r
+\r
+ ; Register Firefox Plug-in\r
+ !define MozillaPlugin "Software\MozillaPlugins\@P2P-Next.org/swarmplugin,version=${VERSION}"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Description" "SwarmPlugin for Mozilla Firefox"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Path" "$INSTDIR\npvlc.dll"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Product" "SwarmPlugin P2P Multimedia Plug-in"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Vendor" "P2P-Next"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Version" "${VERSION}"\r
+\r
+; Vista Registration\r
+  ; Vista detection\r
+  ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion\r
+  StrCpy $R1 $R0 3\r
+  StrCmp $R1 '6.0' lbl_vista lbl_done\r
+\r
+  ; TODO : look at that\r
+  lbl_vista:\r
+  WriteRegStr HKLM "Software\RegisteredApplications" "${PRODUCT}" "Software\Clients\Media\${PRODUCT}\Capabilities"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationName" "${PRODUCT} media player"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationDescription" "${PRODUCT} - Torrent videostreaming browser plugin"\r
+\r
+  lbl_done:\r
+\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)"\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe"\r
+\r
+; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user\r
+ DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+; Remove old error log if present\r
+ Delete "$INSTDIR\swarmplayer.exe.log"\r
+\r
+ WriteUninstaller "$INSTDIR\Uninstall.exe"\r
+\r
+\r
+  ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled\r
+  SimpleFC::AddApplication "SwarmEngine" "$INSTDIR\bgprocess\SwarmEngine.exe" 0 2 "" 1\r
+\r
+  ; Pop $0 ; return error(1)/success(0)\r
\r
+SectionEnd\r
+\r
+\r
+Section "Startmenu Icons" SecStart\r
+   SetShellVarContext all\r
+   CreateDirectory "$SMPROGRAMS\${PRODUCT}"\r
+   CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0\r
+SectionEnd\r
+\r
+;--------------------------------\r
+;Descriptions\r
+\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart)\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_END\r
+\r
+;--------------------------------\r
+;Uninstaller Section\r
+\r
+Section "Uninstall"\r
+\r
+ UnRegDLL "$INSTDIR\axvlc.dll"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "Software\MozillaPlugins\@P2P-Next.org/swarmplugin,version=${VERSION}"\r
+ RMDir /r "$INSTDIR"\r
+\r
+ SetShellVarContext all\r
+ RMDir "$SMPROGRAMS\${PRODUCT}"\r
+ RMDir /r "$SMPROGRAMS\${PRODUCT}"\r
\r
+\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "Software\Clients\Media\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+\r
+\r
+ ; Remove an application from the firewall exception list\r
+ SimpleFC::RemoveApplication "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+\r
+ ; Pop $0 ; return error(1)/success(0)\r
+\r
+\r
+SectionEnd\r
+\r
+;--------------------------------\r
+;Functions Section\r
+\r
+Function .onInit\r
+  System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SwarmPlugin") i .r1 ?e' \r
+\r
+  Pop $R0 \r
+\r
+  StrCmp $R0 0 +3 \r
+\r
+  MessageBox MB_OK "The installer is already running."\r
+\r
+  Abort \r
+FunctionEnd\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin_FX_only.nsi b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin_FX_only.nsi
new file mode 100644 (file)
index 0000000..bc45cce
--- /dev/null
@@ -0,0 +1,180 @@
+!define PRODUCT "SwarmPlugin"\r
+!define VERSION "1.1.0"\r
+!define BG "bgprocess"\r
+\r
+\r
+!include "MUI.nsh"\r
+\r
+;--------------------------------\r
+;Configuration\r
+\r
+;General\r
+Name "${PRODUCT} ${VERSION}"\r
+OutFile "${PRODUCT}_${VERSION}.exe"\r
+\r
+;Folder selection page\r
+InstallDir "$PROGRAMFILES\${PRODUCT}"\r
\r
+;Remember install folder\r
+InstallDirRegKey HKCU "Software\${PRODUCT}" ""\r
+\r
+;\r
+; Uncomment for smaller file size\r
+;\r
+SetCompressor "lzma"\r
+;\r
+; Uncomment for quick built time\r
+;\r
+;SetCompress "off"\r
+\r
+CompletedText "Installation completed. Thank you for choosing ${PRODUCT}"\r
+\r
+BrandingText "${PRODUCT}"\r
+\r
+;--------------------------------\r
+;Modern UI Configuration\r
+\r
+!define MUI_ABORTWARNING\r
+!define MUI_HEADERIMAGE\r
+!define MUI_HEADERIMAGE_BITMAP "heading.bmp"\r
+\r
+;--------------------------------\r
+;Pages\r
+\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept"\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline"\r
+;   !define MUI_FINISHPAGE_RUN "$INSTDIR\swarmplayer.exe"\r
+\r
+!insertmacro MUI_PAGE_INSTFILES\r
+\r
+!insertmacro MUI_UNPAGE_CONFIRM\r
+!insertmacro MUI_UNPAGE_INSTFILES\r
+\r
+;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp"\r
+\r
+;--------------------------------\r
+;Languages\r
+\r
+!insertmacro MUI_LANGUAGE "English"\r
\r
+;--------------------------------\r
+;Language Strings\r
+\r
+;Description\r
+LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}"\r
+LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts"\r
+\r
+;--------------------------------\r
+;Installer Sections\r
+\r
+Section "!Main EXE" SecMain\r
+ SectionIn RO\r
+ SetOutPath "$INSTDIR"\r
+ File *.txt\r
+ File mozilla\npvlc.dll\r
+ File mozilla\npvlc.dll.manifest\r
\r
+ File *.dll\r
+ File /r bgprocess\r
+\r
+ File /r plugins\r
+ File /r locale\r
+ File /r osdmenu\r
+ File /r http\r
+\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "BGProcessPath" "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "InstallDir" "$INSTDIR"\r
+\r
+ ; Register Firefox Plug-in\r
+ !define MozillaPlugin "Software\MozillaPlugins\@P2P-Next.org/swarmplugin,version=${VERSION}"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Description" "SwarmPlugin for Mozilla Firefox"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Path" "$INSTDIR\npvlc.dll"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Product" "SwarmPlugin P2P Multimedia Plug-in"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Vendor" "P2P-Next"\r
+ WriteRegStr HKLM ${MozillaPlugin} "Version" "${VERSION}"\r
+\r
+; Vista Registration\r
+  ; Vista detection\r
+  ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion\r
+  StrCpy $R1 $R0 3\r
+  StrCmp $R1 '6.0' lbl_vista lbl_done\r
+\r
+  ; TODO : look at that\r
+  lbl_vista:\r
+  WriteRegStr HKLM "Software\RegisteredApplications" "${PRODUCT}" "Software\Clients\Media\${PRODUCT}\Capabilities"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationName" "${PRODUCT} media player"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationDescription" "${PRODUCT} - Torrent videostreaming browser plugin"\r
+\r
+  lbl_done:\r
+\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)"\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe"\r
+\r
+; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user\r
+ DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+; Remove old error log if present\r
+ Delete "$INSTDIR\swarmplayer.exe.log"\r
+\r
+ WriteUninstaller "$INSTDIR\Uninstall.exe"\r
+\r
+  ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled\r
+  SimpleFC::AddApplication "SwarmEngine" "$INSTDIR\bgprocess\SwarmEngine.exe" 0 2 "" 1\r
+  \r
+  ; Pop $0 ; return error(1)/success(0)\r
+\r
+\r
+SectionEnd\r
+\r
+Section "Startmenu Icons" SecStart\r
+   SetShellVarContext all\r
+   CreateDirectory "$SMPROGRAMS\${PRODUCT}"\r
+   CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0\r
+SectionEnd\r
+\r
+;--------------------------------\r
+;Descriptions\r
+\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart)\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_END\r
+\r
+;--------------------------------\r
+;Uninstaller Section\r
+\r
+Section "Uninstall"\r
+\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "Software\MozillaPlugins\@P2P-Next.org/swarmplugin,version=${VERSION}"\r
+ RMDir /r "$INSTDIR"\r
+\r
+ SetShellVarContext all\r
+ RMDir "$SMPROGRAMS\${PRODUCT}"\r
+ RMDir /r "$SMPROGRAMS\${PRODUCT}"\r
+\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "Software\Clients\Media\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+\r
+ ; Remove an application from the firewall exception list\r
+ SimpleFC::RemoveApplication "$INSTDIR\bgprocess\SwarmEngine.exe"\r
\r
+ ; Pop $0 ; return error(1)/success(0)\r
+\r
+SectionEnd\r
+\r
+\r
+;--------------------------------\r
+;Functions Section\r
+\r
+Function .onInit\r
+  System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SwarmPlugin") i .r1 ?e' \r
+\r
+  Pop $R0 \r
+\r
+  StrCmp $R0 0 +3 \r
+\r
+  MessageBox MB_OK "The installer is already running."\r
+\r
+  Abort \r
+FunctionEnd\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin_IE_only.nsi b/instrumentation/next-share/BaseLib/Plugin/Build/Win32/swarmplugin_IE_only.nsi
new file mode 100644 (file)
index 0000000..bd46ac0
--- /dev/null
@@ -0,0 +1,178 @@
+!define PRODUCT "SwarmPlugin"\r
+!define VERSION "1.1.0"\r
+!define BG "bgprocess"\r
+\r
+\r
+!include "MUI.nsh"\r
+\r
+;--------------------------------\r
+;Configuration\r
+\r
+;General\r
+Name "${PRODUCT} ${VERSION}"\r
+OutFile "${PRODUCT}_${VERSION}.exe"\r
+\r
+;Folder selection page\r
+InstallDir "$PROGRAMFILES\${PRODUCT}"\r
\r
+;Remember install folder\r
+InstallDirRegKey HKCU "Software\${PRODUCT}" ""\r
+\r
+;\r
+; Uncomment for smaller file size\r
+;\r
+SetCompressor "lzma"\r
+;\r
+; Uncomment for quick built time\r
+;\r
+;SetCompress "off"\r
+\r
+CompletedText "Installation completed. Thank you for choosing ${PRODUCT}"\r
+\r
+BrandingText "${PRODUCT}"\r
+\r
+;--------------------------------\r
+;Modern UI Configuration\r
+\r
+!define MUI_ABORTWARNING\r
+!define MUI_HEADERIMAGE\r
+!define MUI_HEADERIMAGE_BITMAP "heading.bmp"\r
+\r
+;--------------------------------\r
+;Pages\r
+\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept"\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline"\r
+;   !define MUI_FINISHPAGE_RUN "$INSTDIR\swarmplayer.exe"\r
+\r
+!insertmacro MUI_PAGE_INSTFILES\r
+\r
+!insertmacro MUI_UNPAGE_CONFIRM\r
+!insertmacro MUI_UNPAGE_INSTFILES\r
+\r
+;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp"\r
+\r
+;--------------------------------\r
+;Languages\r
+\r
+!insertmacro MUI_LANGUAGE "English"\r
\r
+;--------------------------------\r
+;Language Strings\r
+\r
+;Description\r
+LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}"\r
+LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts"\r
+\r
+;--------------------------------\r
+;Installer Sections\r
+\r
+Section "!Main EXE" SecMain\r
+ SectionIn RO\r
+ SetOutPath "$INSTDIR"\r
+ File *.txt\r
+  ; TODO : add checkbox for IE and Fx\r
+ File activex\axvlc.dll\r
+ File activex\axvlc.dll.manifest\r
+ File *.dll\r
+ File *.dll.manifest\r
\r
+ File /r bgprocess\r
+\r
+ File /r plugins\r
+ File /r locale\r
+ File /r osdmenu\r
+ File /r http\r
+\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "BGProcessPath" "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "InstallDir" "$INSTDIR"\r
\r
+ ; Register IE Plug-in\r
+ RegDLL "$INSTDIR\axvlc.dll"\r
+\r
+; Vista Registration\r
+  ; Vista detection\r
+  ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion\r
+  StrCpy $R1 $R0 3\r
+  StrCmp $R1 '6.0' lbl_vista lbl_done\r
+\r
+  ; TODO : look at that\r
+  lbl_vista:\r
+  WriteRegStr HKLM "Software\RegisteredApplications" "${PRODUCT}" "Software\Clients\Media\${PRODUCT}\Capabilities"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationName" "${PRODUCT} media player"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationDescription" "${PRODUCT} - Torrent videostreaming browser plugin"\r
+\r
+  lbl_done:\r
+\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)"\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe"\r
+\r
+; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user\r
+ DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+; Remove old error log if present\r
+ Delete "$INSTDIR\swarmplayer.exe.log"\r
+\r
+ WriteUninstaller "$INSTDIR\Uninstall.exe"\r
+\r
+  ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled\r
+  SimpleFC::AddApplication "SwarmEngine" "$INSTDIR\bgprocess\SwarmEngine.exe" 0 2 "" 1\r
+\r
+  ; Pop $0 ; return error(1)/success(0)\r
+\r
+\r
+SectionEnd\r
+\r
+Section "Startmenu Icons" SecStart\r
+   SetShellVarContext all\r
+   CreateDirectory "$SMPROGRAMS\${PRODUCT}"\r
+   CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0\r
+SectionEnd\r
+\r
+\r
+;--------------------------------\r
+;Descriptions\r
+\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart)\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_END\r
+\r
+;--------------------------------\r
+;Uninstaller Section\r
+\r
+Section "Uninstall"\r
+\r
+ UnRegDLL "$INSTDIR\axvlc.dll"\r
+ RMDir /r "$INSTDIR"\r
+\r
+ SetShellVarContext all\r
+ RMDir "$SMPROGRAMS\${PRODUCT}"\r
+ RMDir /r "$SMPROGRAMS\${PRODUCT}"\r
+\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "Software\Clients\Media\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
\r
+ ; Remove an application from the firewall exception list\r
+ SimpleFC::RemoveApplication "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+\r
+ ; Pop $0 ; return error(1)/success(0)\r
+\r
+SectionEnd\r
+\r
+\r
+;--------------------------------\r
+;Functions Section\r
+\r
+Function .onInit\r
+  System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SwarmPlugin") i .r1 ?e' \r
+\r
+  Pop $R0 \r
+\r
+  StrCmp $R0 0 +3 \r
+\r
+  MessageBox MB_OK "The installer is already running."\r
+\r
+  Abort \r
+FunctionEnd\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/Search.py b/instrumentation/next-share/BaseLib/Plugin/Search.py
new file mode 100644 (file)
index 0000000..9686e24
--- /dev/null
@@ -0,0 +1,933 @@
+# Written by Arno Bakker, Diego Rabioli
+# see LICENSE.txt for license information
+
+#
+# TODO:
+#   - Switch to SIMPLE+METADATA query
+#
+#   - adjust SIMPLE+METADATA such that it returns P2PURLs if possible.
+
+#   - DO NOT SAVE P2PURLs as .torrent, put in 'torrent_file_name' field in DB.
+#
+#   - Implement continuous dump of results to JS. I.e. push sorting and 
+#     rendering to browser.
+#       * One option is RFC5023: Atom Pub Proto, $10.1 "Collecting Partial 
+#       Lists" I.e. return a partial list and add a 
+#            <link ref="next" href="/.../next10> tag pointing
+#       to the next set. See http://www.iana.org/assignments/link-relations/link-relations.xhtml
+#       for def of next/first/last, etc. link relations.
+#
+#        Arno, 2009-10-10: we current add such a <link ref="next" link,
+#        which contains a URL that will give all hits found so far. So
+#        people should poll this URL.
+#
+#  - Background thread to save torrentfiles to localdb.
+#        Arno, 2009-12-03: Now offloaded to a new TimedTaskQueue.
+# 
+#
+#  - garbage collect hits at connection close. 
+#     Not vital, current mechanism will GC. 
+#        
+#  - Support for multifile torrents
+#
+#  - BuddyCast hits: Create LIVE MPEG7 fields for live (i.e., livetimepoint) 
+#    and VOD MPEG7 fields for VOD. 
+#
+#  - Use separate HTTP server, Content-serving one needs to be single-threaded
+#    at the moment to prevent concurrent RANGE queries on same stream from VLC.
+#    Alternative is to put a Condition variable on a content stream.
+#
+#       Arno, 2009-12-4: I've added locks per content URL and made 
+#       VideoHTTPServer multithreaded and it now also serves the search traffic.
+#
+#  - Debug hanging searches on Windows. May be due to "incomplete outbound TCP 
+#    connection" limit, see Encrypter.py :-( I get timeouts opening the feeds
+#    listed in the metafeed, whilst the feed server is responding fast.
+#    Lowering Encrypter's MAX_INCOMPLETE doesn't help. Alt is to periodically
+#    parse the feeds and store the results. 
+#
+#       Arno, 2009-12-4: Problem still exists. Note that TCP limit has been
+#       lifted on Windows > Vista SP2.
+#
+#  - Update VLC plugin-1.0.1 such that it doesn't show a video window when
+#    target is empty.
+#
+#       Arno, 2009-12-4: At the moment, setting the window size to (0,0) and
+#       not providing a URL of a torrent works.
+# 
+# - query_connected_peers() now returns Unicode names, make sure these are
+#   properly handled when generating HTML output.
+
+
+import sys
+import time
+import random
+import urllib
+import urlparse
+import cgi
+import binascii
+import copy
+from cStringIO import StringIO
+from traceback import print_exc,print_stack
+from threading import RLock
+
+from BaseLib.Core.API import *
+from BaseLib.Core.BitTornado.bencode import *
+from BaseLib.Core.Utilities.utilities import get_collected_torrent_filename
+from BaseLib.Video.VideoServer import AbstractPathMapper
+
+
+from BaseLib.Plugin.defs import *
+from BaseLib.Plugin.AtomFeedParser import *
+
+DEBUG = False
+
+
+P2PQUERYTYPE = "SIMPLE"
+
+def streaminfo404():
+    return {'statuscode':404, 'statusmsg':'404 Not Found'}
+
+
+class SearchPathMapper(AbstractPathMapper):
+    
+    def __init__(self,session,id2hits,tqueue):
+        self.session = session
+        self.id2hits = id2hits
+        self.tqueue = tqueue
+        
+        self.metafp = None
+        self.metafeedurl = None
+        
+    def get(self,urlpath):
+        """
+        Possible paths:
+        /search<application/x-www-form-urlencoded query>
+        """
+        if not urlpath.startswith(URLPATH_SEARCH_PREFIX):
+            return streaminfo404()
+        
+        fakeurl = 'http://127.0.0.1'+urlpath
+        o = urlparse.urlparse(fakeurl)
+        qdict = cgi.parse_qs(o[4])
+        if DEBUG:
+            print >>sys.stderr,"searchmap: qdict",qdict
+        
+        searchstr = qdict['q'][0]
+        searchstr = searchstr.strip()
+        collection = qdict['collection'][0]
+        metafeedurl = qdict['metafeed'][0]
+        
+        print >>sys.stderr,"\nbg: search: Got search for",`searchstr`,"in",collection
+        
+        # Garbage collect:
+        self.id2hits.garbage_collect_timestamp_smaller(time.time() - HITS_TIMEOUT)
+
+        
+        if collection == "metafeed":
+            if not self.check_reload_metafeed(metafeedurl):
+                return {'statuscode':504, 'statusmsg':'504 MetaFeed server did not respond'}
+            return self.process_search_metafeed(searchstr)
+        else:
+            return self.process_search_p2p(searchstr)
+
+
+    def process_search_metafeed(self,searchstr):
+        """ Search for hits in the ATOM feeds we got from the meta feed """
+
+        allhits = []
+        for feedurl in self.metafp.get_feedurls():
+            feedp = FeedParser(feedurl)
+            try:
+                feedp.parse()
+            except:
+                # TODO: return 504 gateway error if none of the feeds return anything
+                print_exc()
+            hits = feedp.search(searchstr)
+            allhits.extend(hits)
+        
+        for hitentry in allhits:
+            titleelement = hitentry.find('{http://www.w3.org/2005/Atom}title')
+            print >>sys.stderr,"bg: search: meta: Got hit",titleelement.text
+
+        
+        id = str(random.random())[2:]
+        atomurlpathprefix = URLPATH_HITS_PREFIX+'/'+str(id)
+        atomxml = feedhits2atomxml(allhits,searchstr,atomurlpathprefix)
+        
+        atomstream = StringIO(atomxml)
+        atomstreaminfo = { 'statuscode':200,'mimetype': 'application/atom+xml', 'stream': atomstream, 'length': len(atomxml)}
+
+        return atomstreaminfo
+
+
+    def process_search_p2p(self,searchstr):
+        """ Search for hits in local database and perform remote query. 
+        EXPERIMENTAL: needs peers with SIMPLE+METADATA query support.
+        """
+        
+        # Initially, searchstr = keywords
+        keywords = searchstr.split()
+
+        id = str(random.random())[2:]
+        self.id2hits.add_query(id,searchstr,time.time())
+        
+        # Parallel:  initiate remote query
+        q = P2PQUERYTYPE+' '+searchstr
+        
+        print >>sys.stderr,"bg: search: Send remote query for",q
+        got_remote_hits_lambda = lambda permid,query,remotehits:self.sesscb_got_remote_hits(id,permid,query,remotehits)
+        self.st = time.time()
+        self.session.query_connected_peers(q,got_remote_hits_lambda,max_peers_to_query=20)
+        
+        # Query local DB while waiting
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        localdbhits = torrent_db.searchNames(keywords)
+        print >>sys.stderr,"bg: search: Local hits",len(localdbhits)
+        self.session.close_dbhandler(torrent_db)
+        
+        # Convert list to dict keyed by infohash
+        localhits = localdbhits2hits(localdbhits)
+        self.id2hits.add_hits(id,localhits)
+
+        # TODO ISSUE: incremental display of results to user? How to implement this?
+        atomurlpathprefix = URLPATH_HITS_PREFIX+'/'+str(id)
+        nextlinkpath = atomurlpathprefix  
+        
+        if False: 
+            # Return ATOM feed directly
+            atomhits = hits2atomhits(localhits,atomurlpathprefix)
+            atomxml = atomhits2atomxml(atomhits,searchstr,atomurlpathprefix,nextlinkpath=nextlinkpath)
+            
+            atomstream = StringIO(atomxml)
+            atomstreaminfo = { 'statuscode':200,'mimetype': 'application/atom+xml', 'stream': atomstream, 'length': len(atomxml)}
+            
+            return atomstreaminfo
+        else:
+            # Return redirect to ATOM feed URL, this allows us to do a page 
+            # page reload to show remote queries that have come in (DEMO)
+            streaminfo = { 'statuscode':301,'statusmsg':nextlinkpath }
+            return streaminfo
+        
+
+    def sesscb_got_remote_hits(self,id,permid,query,remotehits):
+        # Called by SessionCallback thread 
+        try:
+            
+            et = time.time()
+            diff = et - self.st
+            print >>sys.stderr,"bg: search: Got",len(remotehits),"remote hits" # ,"after",diff
+
+            hits = remotehits2hits(remotehits)
+            self.id2hits.add_hits(id,hits)
+        
+            if P2PQUERYTYPE=="SIMPLE+METADATA": 
+                bgsearch_save_remotehits_lambda = lambda:self.tqueue_save_remote_hits(remotehits) 
+                self.tqueue.add_task(bgsearch_save_remotehits_lambda,0)
+            
+        except:
+            print_exc()
+
+
+    def check_reload_metafeed(self,metafeedurl):
+        if self.metafeedurl is None or self.metafeedurl != metafeedurl:
+            self.metafp = MetaFeedParser(metafeedurl)
+            try:
+                self.metafp.parse() # TODO: offload to separate thread?
+                print >>sys.stderr,"bg: search: meta: Found feeds",self.metafp.get_feedurls()
+                self.metafeedurl = metafeedurl
+            except:
+                print_exc()
+                return False
+            
+        return True
+                
+    def tqueue_save_remote_hits(self,remotehits):
+        """ Save .torrents received from SIMPLE+METADATA query on a separate
+        thread.
+        Run by TimedTaskQueueThread
+        """
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)        
+        extra_info = {'status':'good'}
+        
+        n = len(remotehits)
+        count = 0
+        commit = False
+        for infohash,remotehit in remotehits.iteritems():
+            if count == n-1:
+                commit = True
+            try:
+                torrentpath = self.tqueue_save_collected_torrent(remotehit['metatype'],remotehit['metadata'])
+                torrent_db.addExternalTorrent(torrentpath, source='BC', extra_info=extra_info, commit=commit)
+            except:
+                print_exc()
+            count += 1
+            
+        self.session.close_dbhandler(torrent_db)
+
+    def tqueue_save_collected_torrent(self,metatype,metadata):
+        """ Run by TimedTaskQueueThread """
+        if metatype == URL_MIME_TYPE:
+            tdef = TorrentDef.load_from_url(metadata)
+        else:
+            metainfo = bdecode(metadata)
+            tdef = TorrentDef.load_from_dict(metainfo)
+
+        infohash = tdef.get_infohash()
+        colldir = self.session.get_torrent_collecting_dir()
+        
+        filename = get_collected_torrent_filename(infohash)
+        torrentpath = os.path.join(colldir, filename)
+        
+        print >>sys.stderr,"bg: search: saving remotehit",torrentpath
+        tdef.save(torrentpath)
+        return torrentpath
+
+
+def localdbhits2hits(localdbhits):
+    hits = {}
+    for dbhit in localdbhits:
+        localhit = {}
+        localhit['hittype'] = "localdb"
+        localhit.update(dbhit)
+        infohash = dbhit['infohash'] # convenient to also have in record
+        hits[infohash] = localhit
+    return hits
+
+
+def remotehits2hits(remotehits):
+    hits = {}
+    for infohash,hit in remotehits.iteritems():
+        
+        #print >>sys.stderr,"remotehit2hits: keys",hit.keys()
+        
+        remotehit = {}
+        remotehit['hittype'] = "remote"
+        #remotehit['query_permid'] = permid # Bit of duplication, ignore
+        remotehit['infohash'] = infohash  # convenient to also have in record
+        remotehit.update(hit)
+
+        # HACK until we use SIMPLE+METADATA: Create fake torrent file
+        if not 'metadata' in hit:
+            metatype = TSTREAM_MIME_TYPE
+            metadata = hack_make_default_merkletorrent(hit['content_name'])
+            remotehit['metatype'] = metatype
+            remotehit['metadata'] = metadata
+        
+        hits[infohash] = remotehit
+    return hits
+
+
+class Query2HitsMap:
+    """ Stores localdb and remotehits in common hits format, i.e., each
+    hit has a 'hittype' attribute that tells which type it is (localdb or remote).
+    This Query2HitsMap is passed to the Hits2AnyPathMapper, which is connected
+    to the internal HTTP server. 
+    
+    The HTTP server will then forward all "/hits" GET requests to this mapper.
+    The mapper then dynamically generates the required contents from the stored
+    hits, e.g. an ATOM feed, MPEG7 description, .torrent file and thumbnail
+    images from the torrent.
+    """
+
+    def __init__(self):
+        self.lock = RLock()
+        self.d = {}
+
+        
+    def add_query(self,id,searchstr,timestamp):
+        if DEBUG:
+            print >>sys.stderr,"q2h: lock1",id
+        self.lock.acquire()
+        try:
+            qrec = self.d.get(id,{})
+            qrec['searchstr'] = searchstr
+            qrec['timestamp'] = timestamp
+            qrec['hitlist'] = {}
+            self.d[id] = qrec
+        finally:
+            if DEBUG:
+                print >>sys.stderr,"q2h: unlock1"
+            self.lock.release()
+
+        
+    def add_hits(self,id,hits):
+        if DEBUG:
+            print >>sys.stderr,"q2h: lock2",id,len(hits)
+        self.lock.acquire()
+        try:
+            qrec = self.d[id]
+            qrec['hitlist'].update(hits)
+        finally:
+            if DEBUG:
+                print >>sys.stderr,"q2h: unlock2"
+            self.lock.release()
+            
+    def get_hits(self,id):
+        if DEBUG:
+            print >>sys.stderr,"q2h: lock3",id
+        self.lock.acquire()
+        try:
+            qrec = self.d[id]
+            return copy.copy(qrec['hitlist']) # return shallow copy
+        finally:
+            if DEBUG:
+                print >>sys.stderr,"q2h: unlock3"
+            self.lock.release()
+
+    def get_searchstr(self,id):
+        if DEBUG:
+            print >>sys.stderr,"q2h: lock4"
+        self.lock.acquire()
+        try:
+            qrec = self.d[id]
+            return qrec['searchstr']
+        finally:
+            if DEBUG:
+                print >>sys.stderr,"q2h: unlock4"
+            self.lock.release()
+
+    def garbage_collect_timestamp_smaller(self,timethres):
+        self.lock.acquire()
+        try:
+            idlist = []
+            for id,qrec in self.d.iteritems():
+                if qrec['timestamp'] < timethres:
+                    idlist.append(id)
+            for id in idlist:
+                del self.d[id]
+        finally:
+            self.lock.release()
+            
+
+
+class Hits2AnyPathMapper(AbstractPathMapper):
+    """ See Query2Hits description """
+    
+    def __init__(self,session,id2hits):
+        self.session = session
+        self.id2hits = id2hits
+        
+    def get(self,urlpath):
+        """ 
+        Possible paths:
+        /hits/id -> ATOM feed
+        /hits/id/infohash.xml  -> MPEG 7
+        /hits/id/infohash.tstream -> Torrent file
+        /hits/id/infohash.tstream/thumbnail -> Thumbnail
+        """
+        if DEBUG:
+            print >>sys.stderr,"hitsmap: Got",urlpath
+        
+        if not urlpath.startswith(URLPATH_HITS_PREFIX):
+            return streaminfo404()
+
+        paths = urlpath.split('/')
+        if len(paths) < 3:
+            return streaminfo404()
+        
+        id = paths[2]
+        if len(paths) == 3:
+            # ATOM feed
+            searchstr = self.id2hits.get_searchstr(id)
+            hits = self.id2hits.get_hits(id)
+
+            if DEBUG:
+                print >>sys.stderr,"hitsmap: Found",len(hits),"hits"
+
+            
+            atomhits = hits2atomhits(hits,urlpath)
+
+            if DEBUG:
+                print >>sys.stderr,"hitsmap: Found",len(atomhits),"atomhits"
+            
+            
+            atomxml = atomhits2atomxml(atomhits,searchstr,urlpath)
+            
+            #if DEBUG:
+            #    print >>sys.stderr,"hitsmap: atomstring is",`atomxml`
+                
+            atomstream = StringIO(atomxml)
+            atomstreaminfo = { 'statuscode':200,'mimetype': 'application/atom+xml', 'stream': atomstream, 'length': len(atomxml)}
+            return atomstreaminfo
+        
+        elif len(paths) >= 4:
+            # Either NS Metadata, Torrent file, or thumbnail
+            urlinfohash = paths[3]
+            
+            print >>sys.stderr,"hitsmap: path3 is",urlinfohash
+            
+            if urlinfohash.endswith(URLPATH_TORRENT_POSTFIX):
+                # Torrent file, or thumbnail
+                coded = urlinfohash[:-len(URLPATH_TORRENT_POSTFIX)]
+                infohash = urlpath2infohash(coded)
+            else:
+                # NS Metadata / MPEG7
+                coded = urlinfohash[:-len(URLPATH_NSMETA_POSTFIX)]
+                infohash = urlpath2infohash(coded)
+            
+            # Check if hit:
+            hits = self.id2hits.get_hits(id)
+            print >>sys.stderr,"hitsmap: meta: Found",len(hits),"hits"
+            
+            hit = hits.get(infohash,None)
+            if hit is not None:
+                if len(paths) == 5:
+                    # Thumbnail
+                    return self.get_thumbstreaminfo(infohash,hit)
+                
+                elif urlinfohash.endswith(URLPATH_TORRENT_POSTFIX):
+                    # Torrent file
+                    return self.get_torrentstreaminfo(infohash,hit)
+                else:
+                    # NS Metadata / MPEG7
+                    hiturlpathprefix = URLPATH_HITS_PREFIX+'/'+id
+                    return self.get_nsmetastreaminfo(infohash,hit,hiturlpathprefix,urlpath)
+        return streaminfo404()
+
+    def get_torrentstreaminfo(self,infohash,hit):
+        
+        if DEBUG:
+            print >>sys.stderr,"hitmap: get_torrentstreaminfo",infohash2urlpath(infohash)
+        
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        try:
+            if hit['hittype'] == "localdb":
+                
+                dbhit = torrent_db.getTorrent(infohash,include_mypref=False)
+                
+                colltorrdir = self.session.get_torrent_collecting_dir()
+                filepath = os.path.join(colltorrdir,dbhit['torrent_file_name'])
+                # Return stream that contains torrent file
+                stream = open(filepath,"rb")
+                length = os.path.getsize(filepath)
+                torrentstreaminfo = {'statuscode':200,'mimetype':TSTREAM_MIME_TYPE,'stream':stream,'length':length}
+                return torrentstreaminfo
+            else:
+                if hit['metatype'] == URL_MIME_TYPE:
+                    # Shouldn't happen, P2PURL should be embedded in atom
+                    return streaminfo404()
+                else:
+                    stream = StringIO(hit['metadata'])
+                    length = len(hit['metadata'])
+                    torrentstreaminfo = {'statuscode':200,'mimetype':TSTREAM_MIME_TYPE,'stream':stream,'length':length}
+                    return torrentstreaminfo
+        finally:
+            self.session.close_dbhandler(torrent_db)
+
+    def get_thumbstreaminfo(self,infohash,hit):
+        
+        if DEBUG:
+            print >>sys.stderr,"hitmap: get_thumbstreaminfo",infohash2urlpath(infohash)
+        
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        try:
+            if hit['hittype'] == "localdb":
+                dbhit = torrent_db.getTorrent(infohash,include_mypref=False)
+                
+                colltorrdir = self.session.get_torrent_collecting_dir()
+                filepath = os.path.join(colltorrdir,dbhit['torrent_file_name'])
+                tdef = TorrentDef.load(filepath)
+                (thumbtype,thumbdata) = tdef.get_thumbnail()
+                return self.create_thumbstreaminfo(thumbtype,thumbdata)
+                    
+            else:
+                if hit['metatype'] == URL_MIME_TYPE:
+                    # Shouldn't happen, not thumb in P2PURL
+                    return streaminfo404()
+                else:
+                    if DEBUG:
+                        print >>sys.stderr,"hitmap: get_thumbstreaminfo: looking for thumb in remote hit"
+                    
+                    metainfo = bdecode(hit['metadata'])
+                    tdef = TorrentDef.load_from_dict(metainfo)
+                    (thumbtype,thumbdata) = tdef.get_thumbnail()
+                    return self.create_thumbstreaminfo(thumbtype,thumbdata)
+        finally:
+            self.session.close_dbhandler(torrent_db)
+
+
+    def create_thumbstreaminfo(self,thumbtype,thumbdata):
+        if thumbtype is None:
+            return streaminfo404()
+        else:
+            # Return stream that contains thumb
+            stream = StringIO(thumbdata)
+            length = len(thumbdata)
+            thumbstreaminfo = {'statuscode':200,'mimetype':thumbtype,'stream':stream,'length':length}
+            return thumbstreaminfo
+
+    def get_nsmetastreaminfo(self,infohash,hit,hiturlpathprefix,hitpath):
+        colltorrdir = self.session.get_torrent_collecting_dir()
+        nsmetahit = hit2nsmetahit(hit,hiturlpathprefix,colltorrdir)
+        
+        if DEBUG:
+            print >>sys.stderr,"hitmap: get_nsmetastreaminfo: nsmetahit is",`nsmetahit`
+        
+        nsmetarepr = nsmetahit2nsmetarepr(nsmetahit,hitpath)
+        nsmetastream = StringIO(nsmetarepr)
+        nsmetastreaminfo = { 'statuscode':200,'mimetype': 'text/xml', 'stream': nsmetastream, 'length': len(nsmetarepr)}
+        return nsmetastreaminfo
+
+
+def infohash2urlpath(infohash):
+    
+    if len(infohash) != 20:
+        raise ValueError("infohash len 20 !=" + str(len(infohash)))
+    
+    hex = binascii.hexlify(infohash)
+    if len(hex) != 40:
+        raise ValueError("hex len 40 !=" + str(len(hex)))
+    
+    return hex
+    
+def urlpath2infohash(hex):
+
+    if len(hex) != 40:
+        raise ValueError("hex len 40 !=" + str(len(hex)) + " " + hex)
+
+    infohash = binascii.unhexlify(hex)
+    if len(infohash) != 20:
+        raise ValueError("infohash len 20 !=" + str(len(infohash)))
+    
+    return infohash
+
+
+def hits2atomhits(hits,urlpathprefix):
+    atomhits = {}
+    for infohash,hit in hits.iteritems():
+        if hit['hittype'] == "localdb":
+            atomhit = localdbhit2atomhit(hit,urlpathprefix)
+            atomhits[infohash] = atomhit
+        else:
+            atomhit = remotehit2atomhit(hit,urlpathprefix)
+            atomhits[infohash] = atomhit
+            
+    return atomhits
+            
+
+def localdbhit2atomhit(dbhit,urlpathprefix):
+    atomhit = {}
+    atomhit['title'] = htmlfilter(dbhit['name'].encode("UTF-8"))
+    atomhit['summary'] = htmlfilter(dbhit['comment'].encode("UTF-8"))
+    if dbhit['thumbnail']:
+        urlpath = urlpathprefix+'/'+infohash2urlpath(dbhit['infohash'])+URLPATH_TORRENT_POSTFIX+URLPATH_THUMBNAIL_POSTFIX
+        atomhit['p2pnext:image'] = urlpath
+    
+    return atomhit
+
+def remotehit2atomhit(remotehit,urlpathprefix):
+    # TODO: make RemoteQuery return full DB schema of TorrentDB
+    
+    #print >>sys.stderr,"remotehit2atomhit: keys",remotehit.keys()
+    
+    atomhit = {}
+    atomhit['title'] = htmlfilter(remotehit['content_name'].encode("UTF-8"))
+    atomhit['summary'] = "Seeders: "+str(remotehit['seeder'])+" Leechers: "+str(remotehit['leecher'])
+    if remotehit['metatype'] != URL_MIME_TYPE:
+        # TODO: thumbnail, see if we can detect presence (see DB schema remark). 
+        # Now we assume it's always there if not P2PURL
+        urlpath = urlpathprefix+'/'+infohash2urlpath(remotehit['infohash'])+URLPATH_TORRENT_POSTFIX+URLPATH_THUMBNAIL_POSTFIX
+        atomhit['p2pnext:image'] = urlpath
+
+    return atomhit
+
+def htmlfilter(s):
+    """ Escape characters to which HTML parser is sensitive """
+    if s is None:
+        return ""
+    news = s
+    news = news.replace('&','&amp;')
+    news = news.replace('<','&lt;')
+    news = news.replace('>','&gt;')
+    return news
+
+def atomhits2atomxml(atomhits,searchstr,urlpathprefix,nextlinkpath=None):
+    
+    # TODO: use ElementTree parser here too, see AtomFeedParser:feedhits2atomxml
+    atom = ''
+    atom += '<?xml version="1.0" encoding="UTF-8"?>\n'
+    atom += '<feed xmlns="http://www.w3.org/2005/Atom" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:p2pnext="urn:p2pnext:contentfeed:2009" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/">\n'
+    atom += '  <title>Hits for '+searchstr+'</title>\n'
+    atom += '  <link rel="self" href="'+urlpathprefix+'" />\n'
+    if nextlinkpath:
+         atom += '  <link rel="next" href="'+nextlinkpath+'" />\n'
+    atom += '  <author>\n'
+    atom += '  <name>NSSA</name>\n'
+    atom += '  </author>\n'
+    atom += '  <id>urn:nssa</id>\n'
+    atom += '  <updated>'+now2formatRFC3339()+'</updated>\n'
+    #atom += '<p2pnext:image src="http://p2pnextfeed1.rad0.net/images/bbc.png" />\n' # TODO
+
+    for infohash,hit in atomhits.iteritems():
+        urlinfohash = infohash2urlpath(infohash)
+        hitpath = urlpathprefix+'/'+urlinfohash+URLPATH_NSMETA_POSTFIX
+        atom += '  <entry>\n'
+        atom += '    <title>'+hit['title']+'</title>\n'
+        atom += '    <link type="application/xml" href="'+hitpath+'" />\n'
+        atom += '    <id>urn:nssa-'+urlinfohash+'</id>\n'
+        atom += '    <updated>'+now2formatRFC3339()+'</updated>\n'
+        if hit['summary'] is not None:
+            atom += '    <summary>'+hit['summary']+'</summary>\n'
+        if 'p2pnext:image' in hit:
+            atom += '    <p2pnext:image src="'+hit['p2pnext:image']+'" />\n'
+        atom += '  </entry>\n'
+    
+    atom += '</feed>\n'
+
+    return atom
+
+
+def hit2nsmetahit(hit,hiturlprefix,colltorrdir):
+    """ Convert common hit to the fields required for the MPEG7 NS metadata """
+
+    print >>sys.stderr,"his2nsmetahit:"
+    
+    # Read info from torrent files / P2PURLs
+    if hit['hittype'] == "localdb":
+        
+        name = hit['name']
+        if hit['torrent_file_name'].startswith(P2PURL_SCHEME): 
+            # Local DB hit that is P2PURL 
+            torrenturl = hit['torrent_file_name']
+            titleimgurl = None
+            tdef = TorrentDef.load_from_url(torrenturl)
+        else: 
+            # Local DB hit that is torrent file
+            torrenturlpath = '/'+infohash2urlpath(hit['infohash'])+URLPATH_TORRENT_POSTFIX
+            torrenturl = hiturlprefix + torrenturlpath
+            filepath = os.path.join(colltorrdir,hit['torrent_file_name'])
+            tdef = TorrentDef.load(filepath)
+            (thumbtype,thumbdata) = tdef.get_thumbnail()
+            if thumbtype is None:
+                titleimgurl = None
+            else:
+                titleimgurl = torrenturl+URLPATH_THUMBNAIL_POSTFIX
+           
+    else:
+        # Remote hit
+        name = hit['content_name']
+        if hit['metatype'] == URL_MIME_TYPE:
+            torrenturl = hit['torrent_file_name']
+            titleimgurl = None
+            tdef = TorrentDef.load_from_url(torrenturl)
+        else:
+            torrenturlpath = '/'+infohash2urlpath(hit['infohash'])+URLPATH_TORRENT_POSTFIX
+            torrenturl = hiturlprefix + torrenturlpath
+            metainfo = bdecode(hit['metadata'])
+            tdef = TorrentDef.load_from_dict(metainfo)
+            (thumbtype,thumbdata) = tdef.get_thumbnail()
+            if thumbtype is None:
+                titleimgurl = None
+            else:
+                titleimgurl = torrenturl+URLPATH_THUMBNAIL_POSTFIX
+
+    
+    # Extract info required for NS metadata MPEG7 representation. 
+    nsmetahit = {}
+    nsmetahit['title'] = unicode2iri(name)
+    nsmetahit['titleimgurl'] = titleimgurl
+    comment = tdef.get_comment()
+    if comment is None:
+        nsmetahit['abstract'] = None
+    else:
+        nsmetahit['abstract'] = unicode2iri(comment)
+    nsmetahit['producer'] = 'Insert Name Here'
+    creator = tdef.get_created_by()
+    if creator is None:
+        creator = 'Insert Name Here Too' 
+    nsmetahit['disseminator'] = creator 
+    nsmetahit['copyrightstr'] = 'Copyright '+creator
+    nsmetahit['torrent_url'] = torrenturl
+    # TODO: multifile torrents, LIVE
+    nsmetahit['duration']  = bitratelength2nsmeta_duration(tdef.get_bitrate(),tdef.get_length())  
+
+    return nsmetahit
+
+    
+
+def unicode2iri(uni):
+    # Roughly after http://www.ietf.org/rfc/rfc3987.txt Sec 3.1 procedure.
+    # TODO: do precisely after.
+    s = uni.encode('UTF-8')
+    return urllib.quote(s)    
+
+
+    
+def bitratelength2nsmeta_duration(bitrate,length):    
+    # Format example: PT0H15M0S
+    if bitrate is None:
+        return 'PT01H00M0S' # 1 hour
+    secs = float(length)/float(bitrate)
+    hours = float(int(secs / 3600.0))
+    secs = secs - hours*3600.0
+    mins = float(int(secs / 60.0))
+    secs = secs - mins*60.0
+    
+    return 'PT%02.0fH%02.0fM%02.0fS' % (hours,mins,secs)
+
+
+def nsmetahit2nsmetarepr(hit,hitpath):
+    
+    title = hit['title']
+    titleimgurl = hit['titleimgurl']
+    abstract = hit['abstract']   
+    producer = hit['producer']
+    disseminator = hit['disseminator']
+    copyrightstr = hit['copyrightstr']
+    torrenturl = hit['torrent_url']
+    duration  = hit['duration'] # Format example: PT0H15M0S
+    livetimepoint = now2formatRFC3339() # Format example: '2009-10-05T00:40:00+01:00' # TODO VOD
+    
+    s = ''
+    s += '<?xml version="1.0" encoding="UTF-8"?>\n'
+    s += '<Mpeg7 xmlns="urn:mpeg:mpeg7:schema:2001" xmlns:p2pnext="urn:p2pnext:metadata:2008" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n'
+    s += '  <Description xsi:type="p2pnext:P2PBasicDescriptionType">\n'
+    s += '    <CreationInformation>\n'
+    s += '     <Creation>\n'
+    s += '        <Title type="main" xml:lang="en">'+title+'</Title>\n'
+    s += '        <TitleMedia xsi:type="TitleMediaType">\n'
+    if titleimgurl:
+        s += '         <TitleImage>\n'
+        s += '            <MediaUri>'+titleimgurl+'</MediaUri>\n'
+        s += '          </TitleImage>\n'
+    s += '        </TitleMedia>\n'
+    if abstract:
+        s += '        <Abstract>\n'
+        s += '          <FreeTextAnnotation>'+abstract+'</FreeTextAnnotation>\n'
+        s += '        </Abstract>\n'
+    s += '        <Creator>\n'
+    s += '          <Role href="urn:mpeg:mpeg7:cs:RoleCS:2001:PRODUCER" />\n'
+    s += '          <Agent xsi:type="OrganizationType">\n'
+    s += '            <Name>'+producer+'</Name>\n'
+    s += '          </Agent>\n'
+    s += '        </Creator>\n'
+    s += '        <Creator>\n'
+    s += '          <Role href="urn:mpeg:mpeg7:cs:RoleCS:2001:DISSEMINATOR" />\n'
+    s += '          <Agent xsi:type="OrganizationType">\n'
+    s += '            <Name>'+disseminator+'</Name>\n'
+    s += '          </Agent>\n'
+    s += '        </Creator>\n'
+    s += '        <CopyrightString>'+copyrightstr+'</CopyrightString>\n'
+    s += '      </Creation>\n'
+    s += '    </CreationInformation>\n'
+    s += '    <p2pnext:IsInteractiveContent>false</p2pnext:IsInteractiveContent>\n'
+    s += '    <p2pnext:IsCommercialContent>false</p2pnext:IsCommercialContent>\n'
+    s += '    <p2pnext:ContainsCommercialContent>false</p2pnext:ContainsCommercialContent>\n'
+    s += '    <p2pnext:P2PData>\n'
+    s += '      <p2pnext:Torrent>\n'
+    s += '        <MediaUri>'+torrenturl+'</MediaUri>\n'
+    s += '      </p2pnext:Torrent>\n'
+    s += '      <p2pnext:P2PFragment>offset(0, 1000)</p2pnext:P2PFragment>\n'
+    s += '   </p2pnext:P2PData>\n'
+    s += '  </Description>\n'
+    s += '  <Description xsi:type="ContentEntityType">\n'
+    s += '    <MultimediaContent xsi:type="VideoType">\n'
+    s += '      <Video>\n'
+    s += '        <MediaTime>\n'
+    s += '          <MediaTimePoint>T00:00:00</MediaTimePoint>\n'
+    s += '          <MediaDuration>'+duration+'</MediaDuration>\n'
+    s += '        </MediaTime>\n'
+    s += '      </Video>\n'
+    s += '    </MultimediaContent>\n'
+    s += '  </Description>\n'
+    s += '  <Description xsi:type="UsageDescriptionType">\n'
+    s += '    <UsageInformation>\n'
+    s += '      <Availability>\n'
+    s += '        <InstanceRef href="'+hitpath+'" />\n'
+    s += '        <AvailabilityPeriod type="live">\n'
+    s += '          <TimePoint>'+livetimepoint+'</TimePoint>\n'
+    s += '        </AvailabilityPeriod>\n'
+    s += '     </Availability>\n'
+    s += '    </UsageInformation>\n'
+    s += '  </Description>\n'
+    s += '</Mpeg7>\n'
+
+    return s
+
+
+
+
+
+def hack_make_default_merkletorrent(title):
+    metainfo = {}
+    metainfo['announce'] = 'http://localhost:0/announce'
+    metainfo['creation date'] = int(time.time())
+    info = {}
+    info['name'] = title
+    info['length'] = 2 ** 30
+    info['piece length'] = 2 ** 16
+    info['root hash'] = '*' * 20
+    metainfo['info'] = info
+    
+    mdict = {}
+    mdict['Publisher'] = 'Tribler'
+    mdict['Description'] = ''
+    mdict['Progressive'] = 1
+    mdict['Speed Bps'] = str(2 ** 16)
+    mdict['Title'] = metainfo['info']['name']
+    mdict['Creation Date'] = long(time.time())
+    # Azureus client source code doesn't tell what this is, so just put in random value from real torrent
+    mdict['Content Hash'] = 'PT3GQCPW4NPT6WRKKT25IQD4MU5HM4UY'
+    mdict['Revision Date'] = long(time.time())
+    cdict = {}
+    cdict['Content'] = mdict
+    metainfo['azureus_properties'] = cdict
+    
+    return bencode(metainfo)
+
+
+
+    
+
+"""
+class Infohash2TorrentPathMapper(AbstractPathMapper):
+    Mapper to map in the collection of known torrents files (=collected + started
+    + own) into the HTTP address space of the local HTTP server. In particular,
+    it maps a "/infohash/aabbccdd...zz.tstream" path to a streaminfo dict.
+    
+    Also supported are "/infohash/aabbccdd...zz.tstream/thumbnail" queries, which
+    try to read the thumbnail from the torrent.
+        
+    def __init__(self,urlpathprefix,session):
+        self.urlpathprefix = urlpathprefix
+        self.session = session
+        self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        
+    def get(self,urlpath):
+        if not urlpath.startswith(self.urlpathprefix):
+            return None
+        try:
+            wantthumb = False
+            if urlpath.endswith(URLPATH_THUMBNAIL_POSTFIX):
+                wantthumb = True
+                infohashquote = urlpath[len(self.urlpathprefix):-len(URLPATH_TORRENT_POSTFIX+URLPATH_THUMBNAIL_POSTFIX)]
+            else:
+                infohashquote = urlpath[len(self.urlpathprefix):-len(URLPATH_TORRENT_POSTFIX)]
+            infohash = urlpath2infohash(infohash)
+            dbhit = self.torrent_db.getTorrent(infohash,include_mypref=False)
+            
+            colltorrdir = self.session.get_torrent_collecting_dir()
+            filepath = os.path.join(colltorrdir,dbhit['torrent_file_name'])
+                                                      
+            if not wantthumb:
+                # Return stream that contains torrent file
+                stream = open(filepath,"rb")
+                length = os.path.getsize(filepath)
+                streaminfo = {'statuscode':200,'mimetype':TSTREAM_MIME_TYPE,'stream':stream,'length':length}
+            else:
+                # Return stream that contains thumbnail
+                tdef = TorrentDef.load(filepath)
+                (thumbtype,thumbdata) = tdef.get_thumbnail()
+                if thumbtype is None:
+                    return None
+                else:
+                    stream = StringIO(thumbdata)
+                    streaminfo = {'statuscode':200,'mimetype':thumbtype,'stream':stream,'length':len(thumbdata)}
+                
+            return streaminfo
+        except:
+            print_exc()
+            return None
+
+"""
diff --git a/instrumentation/next-share/BaseLib/Plugin/SwarmEngine.py b/instrumentation/next-share/BaseLib/Plugin/SwarmEngine.py
new file mode 100644 (file)
index 0000000..4d69d4a
--- /dev/null
@@ -0,0 +1,20 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r
+# This is the SwarmEngine.py for the SwarmPlugin which currently doesn't self \r
+# destruct when the browser quits.\r
+#\r
+# So there are two SwarmEngine.py's\r
+#\r
+\r
+from BaseLib.Plugin.BackgroundProcess import run_bgapp\r
+\r
+\r
+I2I_LISTENPORT = 62062\r
+BG_LISTENPORT = 8621\r
+VIDEOHTTP_LISTENPORT = 6878\r
+\r
+\r
+if __name__ == '__main__':\r
+    run_bgapp("SwarmPlugin","1.1.0",I2I_LISTENPORT,BG_LISTENPORT,VIDEOHTTP_LISTENPORT,killonidle=False)\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/__init__.py b/instrumentation/next-share/BaseLib/Plugin/__init__.py
new file mode 100644 (file)
index 0000000..f87d7b4
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/defs.py b/instrumentation/next-share/BaseLib/Plugin/defs.py
new file mode 100644 (file)
index 0000000..66d0d02
--- /dev/null
@@ -0,0 +1,14 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+URLPATH_CONTENT_PREFIX = '/content'
+URLPATH_TORRENT_POSTFIX = '.tstream'
+URLPATH_NSMETA_POSTFIX = '.xml'
+URLPATH_THUMBNAIL_POSTFIX = '/thumbnail'
+URLPATH_HITS_PREFIX = '/hits'
+URLPATH_SEARCH_PREFIX = '/search'
+URLPATH_WEBIF_PREFIX = '/webUI'
+
+# After this time the ATOM feed URL and all links are no longer valid.
+HITS_TIMEOUT = 1800.0
diff --git a/instrumentation/next-share/BaseLib/Plugin/examplepage-firefox.html b/instrumentation/next-share/BaseLib/Plugin/examplepage-firefox.html
new file mode 100644 (file)
index 0000000..4ec164f
--- /dev/null
@@ -0,0 +1,115 @@
+<html>\r
+<head><title>Firefox SwarmPlugin Page 1</title></head>\r
+\r
+<body>\r
+\r
+\r
+<h1>Firefox SwarmPlugin Page 1</h1>\r
+\r
+\r
+<embed type="application/x-ns-stream"\r
+name="vlcName"\r
+id="vlc"\r
+autoplay="yes" loop="yes" width="400" height="300"\r
+target="http://www.cs.vu.nl/~arno/vod/route2.tstream" />\r
+<br>\r
+NSSA reports: <em><p id="p2pstatusline"></p></em>\r
+\r
+\r
+<script language="Javascript">\r
+var vlc = document.getElementById("vlc");\r
+\r
+vlc.log.verbosity = 3;\r
+vlc.audio.volume = 100;\r
+\r
+function debugonclick()\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>this is once again a popup.</p>');\r
+\r
+    tmp.write("Count "+document.vlc.log.messages.count);\r
+\r
+    var iter = document.vlc.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+    \r
+function updatep2pstatus()\r
+{\r
+    line = document.vlc.input.p2pstatus\r
+    var p2pstatusline = document.getElementById("p2pstatusline");\r
+       p2pstatusline.innerHTML = line\r
+}\r
+\r
+function activatePeriodicReport()\r
+{\r
+       setInterval(updatep2pstatus,100);\r
+}\r
+\r
+function seek(percentage)\r
+{\r
+       document.vlc.input.position = percentage\r
+}\r
+\r
+function switchtorrent(url)\r
+{\r
+       document.vlc.playlist.stop();\r
+       document.vlc.playlist.clear();\r
+       document.vlc.playlist.items.clear();\r
+       while (document.vlc.playlist.items.count != 0)\r
+               ;\r
+       \r
+       document.vlc.input.set_p2ptarget(url);\r
+}\r
+\r
+\r
+\r
+function dwrite(line)\r
+{\r
+       var dump = document.getElementById("dump");\r
+       var text = document.createTextNode (line)\r
+       dump.appendChild(text)\r
+}\r
+\r
+</script>\r
+\r
+\r
+<script language="Javascript">\r
+// Somehow calling setInterval only works in Firefox when called from\r
+// a function that is triggered externally???\r
+activatePeriodicReport();\r
+</script>\r
+\r
+<!-- Arno, 2010-05-26: using playlist.play() sometimes plays old items from a playlist that was just cleared?! -->\r
+<input type=button value="Play" onClick='document.vlc.playlist.playItem(0);'>\r
+<input type=button value="Pause" onClick='document.vlc.playlist.togglePause();'>\r
+<input type=button value="Stop" onclick='document.vlc.playlist.stop();'>\r
+<input type=button value="Debug" onclick='debugonclick();'>\r
+<input type=button value="Seek 25%" onclick='seek(.25);'>\r
+<input type=button value="Seek 50%" onclick='seek(.5);'>\r
+\r
+<input type=button value="Switch 2" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/BBCeen.tstream");'>\r
+<input type=button value="Switch 3" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/star2.tstream");'>\r
+<input type=button value="Switch 2" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/BBCtwee.tstream");'>\r
+\r
+<br/>\r
+\r
+<h3>Debugging</h3>\r
+<p id="dump"></p>\r
+\r
+<hr>\r
+\r
+\r
+</body>\r
+</html>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/examplepage.html b/instrumentation/next-share/BaseLib/Plugin/examplepage.html
new file mode 100644 (file)
index 0000000..57a0e8e
--- /dev/null
@@ -0,0 +1,97 @@
+<html>\r
+<head><title>IE SwarmPlugin Page 1</title></head>\r
+\r
+<body>\r
+\r
+\r
+<h1>IE SwarmPlugin Page 1</h1>\r
+\r
+<object classid="clsid:1800B8AF-4E33-43C0-AFC7-894433C13538"\r
+        width="380" height="320" id="vlc" events="True" target=''>\r
+<param name="Src" value="http://www.cs.vu.nl/~arno/vod/route2.tstream" />\r
+<param name="ShowDisplay" value="True" />\r
+<param name="Loop" value="False" />\r
+<param name="AutoPlay" value="True" />\r
+</object>\r
+<br>\r
+NSSA reports: <em><p id="p2pstatusline"></p></em>\r
+\r
+\r
+<script language="Javascript">\r
+var vlc = document.getElementById("vlc");\r
+\r
+vlc.log.verbosity = 3;\r
+\r
+function debugonclick()\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>this is once again a popup.</p>');\r
+\r
+    tmp.write("Count "+document.vlc.log.messages.count);\r
+\r
+    var iter = document.vlc.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+    \r
+function updatep2pstatus()\r
+{\r
+    line = document.vlc.input.p2pstatus\r
+    var p2pstatusline = document.getElementById("p2pstatusline");\r
+       p2pstatusline.innerHTML = line\r
+}\r
+\r
+setInterval(updatep2pstatus,100)\r
+\r
+\r
+function seek(percentage)\r
+{\r
+       document.vlc.input.position = percentage\r
+}\r
+\r
+function switchtorrent(url)\r
+{\r
+       document.vlc.playlist.stop();\r
+       document.vlc.playlist.clear();\r
+       document.vlc.playlist.items.clear();\r
+       while (document.vlc.playlist.items.count != 0)\r
+               ;\r
+       \r
+       document.vlc.input.set_p2ptarget(url);\r
+}\r
+\r
+\r
+</script>\r
+\r
+\r
+<!-- Arno, 2010-05-26: using playlist.play() sometimes plays old items from \r
+     a playlist that was just cleared?! -->\r
+<input type=button value="Play" onClick='document.vlc.playlist.playItem(0);'>\r
+<input type=button value="Pause" onClick='document.vlc.playlist.togglePause();'>\r
+<input type=button value="Stop" onclick='document.vlc.playlist.stop();'>\r
+<input type=button value="Debug" onclick='debugonclick();'>\r
+<input type=button value="Seek 25%" onclick='seek(.25);'>\r
+<input type=button value="Seek 50%" onclick='seek(.5);'>\r
+\r
+<input type=button value="Switch 2" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/BBCeen.tstream");'>\r
+<input type=button value="Switch 3" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/star2.tstream");'>\r
+\r
+<br/>\r
+\r
+\r
+<hr>\r
+\r
+\r
+</body>\r
+</html>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/pluginemulator-http.py b/instrumentation/next-share/BaseLib/Plugin/pluginemulator-http.py
new file mode 100644 (file)
index 0000000..b309158
--- /dev/null
@@ -0,0 +1,54 @@
+\r
+import sys\r
+import socket\r
+import urlparse\r
+import time\r
+\r
+class PluginEmulator:\r
+    \r
+    def __init__(self,port,cmd,param):\r
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r
+        s.connect(('127.0.0.1',port))\r
+        msg = cmd+' '+param+'\r\n'\r
+        s.send(msg)\r
+        \r
+        while True:\r
+            data = s.recv(1024)\r
+            print >>sys.stderr,"pe: Got BG command",data\r
+            if len(data) == 0:\r
+                print >>sys.stderr,"pe: BG closes IC"\r
+                return\r
+            elif data.startswith("PLAY"):\r
+                \r
+                f = open("bla.bat","wb")\r
+                f.write("\"\\Program Files\\GnuWin32\\bin\\wget.exe\" -S "+data[4:])\r
+                f.close()\r
+                break\r
+\r
+        time.sleep(1000)\r
+        return\r
+\r
+        #url = data[len("PLAY "):-2]\r
+        url = data[len("PLAY "):]\r
+        p = urlparse.urlparse(url)\r
+        path  = p[2]\r
+        \r
+        s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r
+        s2.connect(('127.0.0.1',6878))\r
+        cmd = "GET "+path+" HTTP/1.1\r\nHost: localhost:6878\r\n\r\n\r\n"\r
+        print >>sys.stderr,"SENDING CMD",cmd\r
+        s2.send(cmd)\r
+        for i in range(0,2):\r
+            data = s2.recv(256)\r
+            print >>sys.stderr,"pe: Got HTTP command",`data`\r
+            if len(data) == 0:\r
+                break\r
+        \r
+        print >>sys.stderr,"pe: Sleeping"\r
+        time.sleep(100)\r
+        \r
+            \r
+#pe = PluginEmulator(62062,"START","http://www.cs.vu.nl/~arno/vod/route2.tstream")\r
+pe = PluginEmulator(62062,"START","file:/Build/trans-release-0.1/stroom.ogg.tstream")\r
+     \r
+        \r
diff --git a/instrumentation/next-share/BaseLib/Plugin/pluginemulator.py b/instrumentation/next-share/BaseLib/Plugin/pluginemulator.py
new file mode 100644 (file)
index 0000000..c6c243a
--- /dev/null
@@ -0,0 +1,97 @@
+# BAD CLIENT: SENDS \r
+# GET /path\r\n\r
+# HTTP/1.1\r\n\r
+# Host: localhost:6878\r\n\r
+# \r\n\r
+#\r
+# Then Python HTTP server doesn't correctly send headers.\r
+\r
+import sys\r
+import socket\r
+import urlparse\r
+import time\r
+\r
+class PluginEmulator:\r
+    \r
+    def __init__(self,port,cmd,param):\r
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r
+        s.connect(('127.0.0.1',port))\r
+        msg = cmd+' '+param+'\r\n'\r
+        s.send(msg)\r
+        #s.close()\r
+        while True:\r
+            data = s.recv(1024)\r
+            print >>sys.stderr,"pe: Got BG command",data\r
+            if len(data) == 0:\r
+                print >>sys.stderr,"pe: BG closes IC"\r
+                return\r
+            \r
+            s.send(msg)\r
+            \r
+            """\r
+            elif data.startswith("PLAY"):\r
+                url = data[len("PLAY "):]\r
+                p = urlparse.urlparse(url)\r
+                path  = p[2]\r
+                readbufsize = 100\r
+                break\r
+            """\r
+\r
+        #self.retrieve_path(path,recurse=False)\r
+        \r
+        #s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r
+        #s2.connect(('127.0.0.1',6878))\r
+        #s2.send("GET "+path+"HTTP/1.1\r\nHost: localhost:6878\r\n\r\n")\r
+        #data = s2.recv(100)\r
+        #print >>sys.stderr,"pe: Got HTTP command",data\r
+        time.sleep(10000)\r
+\r
+\r
+    def retrieve_path(self,path,recurse=False):\r
+        readbufsize = 100000\r
+        \r
+        links = []\r
+        print >>sys.stderr,"pe: GET",path\r
+        s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r
+        s2.connect(('127.0.0.1',6878))\r
+        s2.send("GET "+path+" HTTP/1.1\r\nHost: localhost:6878\r\n\r\n")\r
+        while True:\r
+            data = s2.recv(readbufsize)\r
+            if len(data) == 0:\r
+                break\r
+            print >>sys.stderr,"pe: Got HTTP data",`data`\r
+            \r
+            eidx = 0\r
+            while True:\r
+                sidx = data.find("/hits",eidx)\r
+                if sidx != -1:\r
+                    eidx = data.find('"',sidx)\r
+                    if eidx != -1:\r
+                        hitpath = data[sidx:eidx]\r
+                        #print >>sys.stderr,"pe: Found link",hitpath\r
+                        links.append(hitpath)\r
+                else:\r
+                    break\r
+                        \r
+        if recurse:\r
+            for hitpath in links:\r
+                #hitpath = links[2][:-len("/thumbnail")]\r
+                #print >>sys.stderr,"pe: Retrieving link",hitpath,"EOT"\r
+                recurse = hitpath.endswith(".xml")\r
+                \r
+                # My dumb parser hack\r
+                idx = hitpath.find("</MediaUri>")\r
+                if idx != -1:\r
+                    hitpath = hitpath[0:idx]\r
+                print >>sys.stderr,"pe: FINAL link",hitpath,"EOT"\r
+                self.retrieve_path(hitpath,recurse=recurse)\r
+            \r
+            \r
+#pe = PluginEmulator(62062,"START","http://www.cs.vu.nl/~arno/vod/route2.tstream")\r
+#pe = PluginEmulator(62062,"START","http://www.vuze.com/download/XUGIN6PEJJCQ5777C3WUMMBRFI6HYIHJ.torrent?referal=torrentfilelinkcdp&title=Gopher")\r
+\r
+if len(sys.argv) < 2:\r
+    print "Missing URL to play"\r
+    raise SystemExit(1)\r
+\r
+pe =  PluginEmulator(62062,"START",sys.argv[1])\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/searchemulator.py b/instrumentation/next-share/BaseLib/Plugin/searchemulator.py
new file mode 100644 (file)
index 0000000..a8c5f80
--- /dev/null
@@ -0,0 +1,10 @@
+\r
+import sys\r
+import urllib\r
+import socket\r
+\r
+url = 'http://127.0.0.1:6879/search?q=episode&collection=metafeed&metafeed=http%3A%2F%2Fp2pnextfeed1.rad0.net%2Fcontent%2Ffeed%2Fbbc'\r
+print >>sys.stderr,"searchemu: opening"\r
+x = urllib.urlopen(url)\r
+print >>sys.stderr,"searchemu: reading"\r
+print >>sys.stderr, x.read()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Plugin/searchpage-firefox.html b/instrumentation/next-share/BaseLib/Plugin/searchpage-firefox.html
new file mode 100644 (file)
index 0000000..f10fcd7
--- /dev/null
@@ -0,0 +1,96 @@
+<html>\r
+<head><title>Firefox SwarmPlugin Search Page</title></head>\r
+\r
+<body>\r
+\r
+\r
+<h1>Firefox SwarmPlugin Search Page</h1>\r
+\r
+\r
+<h2>Empty Plugin</h2>\r
+\r
+<embed type="application/x-ns-stream"\r
+name="vlcName"\r
+id="vlc"\r
+autoplay="no" loop="yes" width="0" height="0"\r
+target="" />\r
+<br>\r
+NSSA reports: <em><p id="p2pstatusline"></p></em>\r
+\r
+\r
+<script language="Javascript">\r
+var vlc = document.getElementById("vlc");\r
+\r
+vlc.log.verbosity = 3;\r
+\r
+function debugonclick()\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>this is once again a popup.</p>');\r
+\r
+    tmp.write("Count "+document.vlc.log.messages.count);\r
+\r
+    var iter = document.vlc.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+    \r
+function updatep2pstatus()\r
+{\r
+    line = document.vlc.input.p2psearchurl\r
+    var p2pstatusline = document.getElementById("p2pstatusline");\r
+       p2pstatusline.innerHTML = "Sjaak "+line\r
+}\r
+\r
+function activatePeriodicReport()\r
+{\r
+       setInterval(updatep2pstatus,100);\r
+}\r
+\r
+function seek(percentage)\r
+{\r
+       document.vlc.input.position = percentage\r
+}\r
+\r
+function dwrite(line)\r
+{\r
+       var dump = document.getElementById("dump");\r
+       var text = document.createTextNode (line)\r
+       dump.appendChild(text)\r
+}\r
+\r
+</script>\r
+\r
+\r
+<script language="Javascript">\r
+// Somehow calling setInterval only works in Firefox when called from\r
+// a function that is triggered externally???\r
+activatePeriodicReport();\r
+</script>\r
+\r
+\r
+<h2>Search</h2>\r
+\r
+<form method="get" action="http://127.0.0.1:6878/search">\r
+\r
+<input type="text" name="q" size="31" maxlength="255" value="" />\r
+<input type="submit" value="NSSA Search" />\r
+<input type="radio"  name="collection" value="metafeed" /> MetaFeed\r
+<input type="radio"  name="collection" value="buddycast" checked /> BuddyCast <br />\r
+<input type="hidden" name="metafeed" value="http://p2pnextfeed1.rad0.net/content/feed/bbc">\r
+</form>\r
+\r
+<hr>\r
+\r
+</body>\r
+</html>\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/searchpage-ie.html b/instrumentation/next-share/BaseLib/Plugin/searchpage-ie.html
new file mode 100644 (file)
index 0000000..82fec4b
--- /dev/null
@@ -0,0 +1,83 @@
+<html>\r
+<head><title>IE SwarmPlugin Search Page</title></head>\r
+\r
+<body>\r
+\r
+\r
+<h1>IE SwarmPlugin Search Page</h1>\r
+\r
+<h2>Empty Plugin</h2>\r
+<object classid="clsid:1800B8AF-4E33-43C0-AFC7-894433C13538"\r
+        width="0" height="0" id="vlc" events="True" target=''>\r
+<param name="Src" value="" />\r
+<param name="ShowDisplay" value="True" />\r
+<param name="Loop" value="False" />\r
+<param name="AutoPlay" value="False" />\r
+</object>\r
+\r
+<br>\r
+NSSA reports: <em><p id="p2pstatusline"></p></em>\r
+\r
+\r
+<script language="Javascript">\r
+var vlc = document.getElementById("vlc");\r
+\r
+//vlc.log.verbosity = 3;\r
+\r
+function debugonclick()\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>this is once again a popup.</p>');\r
+\r
+    tmp.write("Count "+document.vlc.log.messages.count);\r
+\r
+    var iter = document.vlc.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+    \r
+function updatep2pstatus()\r
+{\r
+    line = document.vlc.input.p2pstatus\r
+    var p2pstatusline = document.getElementById("p2pstatusline");\r
+       p2pstatusline.innerHTML = line\r
+}\r
+\r
+setInterval(updatep2pstatus,100)\r
+\r
+\r
+function seek(percentage)\r
+{\r
+       document.vlc.input.position = percentage\r
+}\r
+\r
+</script>\r
+\r
+\r
+<h2>Search</h2>\r
+\r
+<form method="get" action="http://127.0.0.1:6878/search">\r
+\r
+<input type="text" name="q" size="31" maxlength="255" value="" />\r
+<input type="submit" value="NSSA Search" />\r
+<input type="radio"  name="collection" value="metafeed" /> MetaFeed\r
+<input type="radio"  name="collection" value="buddycast" checked /> BuddyCast <br />\r
+<input type="hidden" name="metafeed" value="http://p2pnextfeed1.rad0.net/content/feed/bbc">\r
+</form>\r
+\r
+<hr>\r
+\r
+\r
+</body>\r
+</html>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Plugin/searchpage.html b/instrumentation/next-share/BaseLib/Plugin/searchpage.html
new file mode 100644 (file)
index 0000000..4874dd2
--- /dev/null
@@ -0,0 +1,19 @@
+<html>\r
+<head><title>NSSA Search Page</title></head>\r
+\r
+<body>\r
+\r
+\r
+<h1><img src="p2p-next_small.png"/>NSSA Search page</h1>\r
+\r
+<form method="get" action="http://127.0.0.1:6879/search">\r
+\r
+<input type="text" name="q" size="31" maxlength="255" value="" />\r
+<input type="submit" value="NSSA Search" />\r
+<input type="radio"  name="collection" value="metafeed" /> MetaFeed\r
+<input type="radio"  name="collection" value="buddycast" checked /> BuddyCast <br />\r
+<input type="hidden" name="metafeed" value="http://p2pnextfeed1.rad0.net/content/feed/bbc">\r
+</form>\r
+\r
+</body>\r
+</html>\r
diff --git a/instrumentation/next-share/BaseLib/Policies/RateManager.py b/instrumentation/next-share/BaseLib/Policies/RateManager.py
new file mode 100644 (file)
index 0000000..e287e28
--- /dev/null
@@ -0,0 +1,301 @@
+# Written by Arno Bakker and ABC authors 
+# see LICENSE.txt for license information
+
+import sys
+from sets import Set
+from threading import RLock
+from traceback import print_exc
+
+
+from BaseLib.Core.simpledefs import *
+
+DEBUG = False
+
+
+class RateManager:
+    def __init__(self):
+        self.lock = RLock()
+        self.statusmap = {}
+        self.currenttotal = {}
+        self.dset = Set()
+        self.clear_downloadstates()
+        
+    def add_downloadstate(self,ds):
+        """ Returns the number of unique states currently stored """
+        if DEBUG:
+            print >>sys.stderr,"RateManager: add_downloadstate",`ds.get_download().get_def().get_infohash()`
+            
+        self.lock.acquire()
+        try:
+            d = ds.get_download()
+            if d not in self.dset:
+                self.statusmap[ds.get_status()].append(ds)
+                for dir in [UPLOAD,DOWNLOAD]:
+                    self.currenttotal[dir] += ds.get_current_speed(dir)
+                self.dset.add(d)
+            return len(self.dset)
+        finally:
+            self.lock.release()
+
+    def add_downloadstatelist(self, dslist):
+        for ds in dslist:
+            self.add_downloadstate(ds)
+            
+    def adjust_speeds(self):
+        """ Adjust speeds for the specified set of downloads and clears the set """
+        self.lock.acquire()
+        try:
+            self.calc_and_set_speed_limits(DOWNLOAD)
+            self.calc_and_set_speed_limits(UPLOAD)
+            self.clear_downloadstates()
+        finally:
+            self.lock.release()
+
+
+    def clear_downloadstates(self):
+        self.statusmap[DLSTATUS_ALLOCATING_DISKSPACE] = []
+        self.statusmap[DLSTATUS_WAITING4HASHCHECK] = []
+        self.statusmap[DLSTATUS_HASHCHECKING] = []
+        self.statusmap[DLSTATUS_DOWNLOADING] = []
+        self.statusmap[DLSTATUS_SEEDING] = []
+        self.statusmap[DLSTATUS_STOPPED] = []
+        self.statusmap[DLSTATUS_STOPPED_ON_ERROR] = []
+        self.statusmap[DLSTATUS_REPEXING] = [] # RePEX: needed to prevent KeyError
+        for dir in [UPLOAD,DOWNLOAD]:
+            self.currenttotal[dir] = 0
+        self.dset.clear()
+
+    #
+    # Internal methods
+    #
+    #
+    # The following methods are all called with the lock held
+    #
+
+    def calc_and_set_speed_limits(self,direct):
+        """ Override this method to write you own speed management policy. """
+        pass
+
+
+class UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager(RateManager):
+    """ This class implements a simple rate management policy that:
+    1. If the API user set a desired speed for a particular download,
+       the speed limit for this download is set to the desired value.
+    2. For all torrents for which no desired speeds have been set, 
+       the global limit is equally divided amongst all downloads.
+       (however small the piece of the pie may be).
+    3. There are separate global limits for download speed, upload speed
+       and upload speed when all torrents are seeding. 
+    """
+    def __init__(self):
+        RateManager.__init__(self)
+        self.global_max_speed = {}
+        self.global_max_speed[UPLOAD] = 0.0
+        self.global_max_speed[DOWNLOAD] = 0.0
+        self.global_max_seedupload_speed = 0.0
+
+    def set_global_max_speed(self,direct,speed):
+        self.lock.acquire()
+        self.global_max_speed[direct] = speed
+        self.lock.release()
+        
+    def set_global_max_seedupload_speed(self,speed):
+        self.lock.acquire()
+        self.global_max_seedupload_speed = speed
+        self.lock.release()
+
+    def calc_and_set_speed_limits(self, dir = UPLOAD):
+        
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits",dir
+        
+        if dir == UPLOAD:
+            workingset = self.statusmap[DLSTATUS_DOWNLOADING]+self.statusmap[DLSTATUS_SEEDING]
+        else:
+            workingset = self.statusmap[DLSTATUS_DOWNLOADING]
+
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits: len workingset",len(workingset)
+
+        # Limit working set to active torrents with connections:
+        newws = []
+        for ds in workingset:
+            if ds.get_num_peers() > 0:
+                newws.append(ds)
+        workingset = newws
+
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits: len active workingset",len(workingset)
+
+        # No active file, not need to calculate
+        if not workingset:
+            return
+        
+        globalmaxspeed = self.get_global_max_speed(dir)
+        # See if global speed settings are set to unlimited
+        if globalmaxspeed == 0:
+            # Unlimited speed
+            for ds in workingset:
+                d = ds.get_download()
+                d.set_max_speed(dir,d.get_max_desired_speed(dir)) 
+            return
+        
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits: globalmaxspeed is",globalmaxspeed,dir
+
+        # User set priority is always granted, ignoring global limit
+        todoset = []
+        for ds in workingset:
+            d = ds.get_download()
+            maxdesiredspeed = d.get_max_desired_speed(dir)
+            if maxdesiredspeed > 0.0:
+                d.set_max_speed(dir,maxdesiredspeed)
+            else:
+                todoset.append(ds)
+
+        if len(todoset) > 0:
+            # Rest divides globalmaxspeed equally
+            localmaxspeed = globalmaxspeed / float(len(todoset))
+            # if too small than user's problem
+            
+            if DEBUG:
+                print >>sys.stderr,"RateManager: calc_and_set_speed_limits: localmaxspeed is",localmaxspeed,dir
+
+            for ds in todoset:
+                d = ds.get_download()
+                d.set_max_speed(dir,localmaxspeed)
+
+
+    def get_global_max_speed(self, dir = UPLOAD):
+        if dir == UPLOAD and len(self.statusmap[DLSTATUS_DOWNLOADING]) == 0 and len(self.statusmap[DLSTATUS_SEEDING]) > 0:
+            # Static overall maximum up speed when seeding
+            return self.global_max_seedupload_speed
+        else:
+            return self.global_max_speed[dir]
+           
+
+class UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager(UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager):
+    """ This class implements a simple rate management policy that:
+    1. If the API user set a desired speed for a particular download,
+       the speed limit for this download is set to the desired value.
+    2. For all torrents for which no desired speeds have been set, 
+       the global limit is divided on demand amongst all downloads.
+    3. There are separate global limits for download speed, upload speed
+       and upload speed when all torrents are seeding. 
+       
+    TODO: if vod: give all of global limit? Do this at higher level: stop
+    all dls when going to VOD
+    """
+    def __init__(self):
+        UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager.__init__(self)
+
+        self.ROOM = 5.0 # the amount of room in speed underutilizing downloads get
+
+    def calc_and_set_speed_limits(self, dir = UPLOAD):
+        
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits",dir
+        
+        if dir == UPLOAD:
+            workingset = self.statusmap[DLSTATUS_DOWNLOADING]+self.statusmap[DLSTATUS_SEEDING]
+        else:
+            workingset = self.statusmap[DLSTATUS_DOWNLOADING]
+
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits: len workingset",len(workingset)
+
+        # Limit working set to active torrents with connections:
+        newws = []
+        for ds in workingset:
+            if ds.get_num_peers() > 0:
+                newws.append(ds)
+        workingset = newws
+
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits: len new workingset",len(workingset)
+            for ds in workingset:
+                d = ds.get_download()
+                print >>sys.stderr,"RateManager: calc_and_set_speed_limits: working is",d.get_def().get_name()
+
+        # No active file, not need to calculate
+        if not workingset:
+            return
+        
+        globalmaxspeed = self.get_global_max_speed(dir)
+        # See if global speed settings are set to unlimited
+        if globalmaxspeed == 0:
+            # Unlimited speed
+            for ds in workingset:
+                d = ds.get_download()
+                d.set_max_speed(dir,d.get_max_desired_speed(dir)) 
+            return
+        
+        if DEBUG:
+            print >>sys.stderr,"RateManager: calc_and_set_speed_limits: globalmaxspeed is",globalmaxspeed,dir
+
+        # User set priority is always granted, ignoring global limit
+        todoset = []
+        for ds in workingset:
+            d = ds.get_download()
+            maxdesiredspeed = d.get_max_desired_speed(dir)
+            if maxdesiredspeed > 0.0:
+                d.set_max_speed(dir,maxdesiredspeed)
+            else:
+                todoset.append(ds)
+
+        if len(todoset) > 0:
+            # Rest divides globalmaxspeed based on their demand
+            localmaxspeed = globalmaxspeed / float(len(todoset))
+            # if too small than user's problem
+            
+            if DEBUG:
+                print >>sys.stderr,"RateManager: calc_and_set_speed_limits: localmaxspeed is",localmaxspeed,dir
+
+            # See if underutilizers and overutilizers. If not, just divide equally
+            downloadsatmax = False
+            downloadsunderutil = False
+            for ds in todoset:
+                d = ds.get_download()
+                currspeed = ds.get_current_speed(dir)
+                currmaxspeed = d.get_max_speed(dir)
+                
+                newmaxspeed = currspeed+self.ROOM
+                if currspeed >= (currmaxspeed-3.0): # dl needs more
+                    downloadsatmax = True
+                elif newmaxspeed < localmaxspeed: # dl got quota to spare
+                    downloadsunderutil = True
+
+            if downloadsatmax and downloadsunderutil:
+                totalunused = 0.0
+                todoset2 = []
+                for ds in todoset:
+                    d = ds.get_download()
+                    currspeed = ds.get_current_speed(dir)
+
+                    newmaxspeed = currspeed+self.ROOM
+                    if newmaxspeed < localmaxspeed:
+                        # If unterutilizing:
+                        totalunused += (localmaxspeed-newmaxspeed)
+                        # Give current speed + 5.0 KB/s extra so it can grow
+                        print >>sys.stderr,"RateManager: calc_and_set_speed_limits: Underutil set to",newmaxspeed
+                        d.set_max_speed(dir,newmaxspeed)
+                    else:
+                        todoset2.append(ds)
+    
+                # Divide the unused bandwidth equally amongst others
+                if len(todoset2) > 0:
+                    pie = float(len(todoset2)) * localmaxspeed + totalunused
+                    piece = pie / float(len(todoset2))
+                    for ds in todoset:
+                        d = ds.get_download()
+                        print >>sys.stderr,"RateManager: calc_and_set_speed_limits: Overutil set to",piece
+                        d.set_max_speed(dir,piece)
+                else:
+                    # what the f? No overutilizers now?
+                    print >>sys.stderr,"UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager: Internal error: No overutilizers anymore?"
+            else:
+                # No over and under utilizers, just divide equally
+                for ds in todoset:
+                    d = ds.get_download()
+                    print >>sys.stderr,"RateManager: calc_and_set_speed_limits: Normal set to",piece
+                    d.set_max_speed(dir,localmaxspeed)
diff --git a/instrumentation/next-share/BaseLib/Policies/SeedingManager.py b/instrumentation/next-share/BaseLib/Policies/SeedingManager.py
new file mode 100644 (file)
index 0000000..df38b4d
--- /dev/null
@@ -0,0 +1,273 @@
+# Written by Boxun Zhang
+# see LICENSE.txt for license information
+
+import binascii
+import cPickle
+import os
+import sys
+import time 
+
+from BaseLib.Core.simpledefs import *
+
+DEBUG = False
+
+STORAGE_VERSION_ONE = 1
+STORAGE_VERSION_CURRENT = STORAGE_VERSION_ONE
+
+class GlobalSeedingManager:
+    def __init__(self, Read, storage_dir):
+        # directory where all pickled data must be kept
+        self.storage_dir = storage_dir
+
+        # seeding managers containing infohash:seeding_manager pairs
+        self.seeding_managers = {}
+
+        # callback to read from abc configuration file
+        self.Read = Read
+
+        self.prepare_storage()
+
+    def prepare_storage(self):
+        if not os.path.exists(self.storage_dir):
+            if DEBUG: print >>sys.stderr, "SeedingManager: created storage_dir", self.storage_dir
+            os.mkdir(self.storage_dir)
+
+    def write_all_storage(self):
+        for infohash, seeding_manager in self.seeding_managers.iteritems():
+            self.write_storage(infohash, seeding_manager.get_updated_storage())
+
+    def read_storage(self, infohash):
+        filename = os.path.join(self.storage_dir, binascii.hexlify(infohash) + ".pickle")
+        if os.path.exists(filename):
+            if DEBUG: print >>sys.stderr, "SeedingManager: read_storage", filename
+            storage = cPickle.load(open(filename, "rb"))
+            # Any version upgrading must be done here
+
+            if storage["version"] == STORAGE_VERSION_CURRENT:
+                return storage
+
+        # return new storage confirming to version
+        # STORAGE_VERSION_CURRENT
+        return {"version":STORAGE_VERSION_CURRENT,
+                "total_up":0L,
+                "total_down":0L,
+                "time_seeding":0L}
+
+    def write_storage(self, infohash, storage):
+        filename = os.path.join(self.storage_dir, binascii.hexlify(infohash) + ".pickle")
+        if DEBUG: print >>sys.stderr, "SeedingManager: write_storage", filename
+        cPickle.dump(storage, open(filename, "wb"))
+    
+    def apply_seeding_policy(self, dslist):
+        # Remove stoped seeds
+        for infohash, seeding_manager in self.seeding_managers.items():
+            if not seeding_manager.download_state.get_status() == DLSTATUS_SEEDING:
+                self.write_storage(infohash, seeding_manager.get_updated_storage())
+                del self.seeding_managers[infohash]
+
+        for download_state in dslist:
+
+            if download_state.get_status() == DLSTATUS_SEEDING:
+                infohash = download_state.get_download().get_def().get_infohash()
+                if infohash in self.seeding_managers:
+                    self.seeding_managers[infohash].update_download_state(download_state)
+
+                else:
+                    # apply new seeding manager
+                    seeding_manager = SeedingManager(download_state, self.read_storage(infohash))
+
+                    t4t_option = self.Read('t4t_option', "int")
+                    if t4t_option == 0:
+                        # No Bittorrent leeching, seeding until sharing ratio = 1.0
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: TitForTatRatioBasedSeeding"
+                        seeding_manager.set_t4t_policy(TitForTatRatioBasedSeeding())
+
+                    elif t4t_option == 1:
+                        # Unlimited seeding
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: UnlimitedSeeding (for t4t)"
+                        seeding_manager.set_t4t_policy(UnlimitedSeeding())
+
+                    elif t4t_option == 2:
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: TitForTatTimeBasedSeeding"
+                            # Time based seeding
+                        seeding_manager.set_t4t_policy(TitForTatTimeBasedSeeding(self.Read))
+
+                    else:
+                        # t4t_option == 3, no seeding 
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: NoSeeding (for t4t)"
+                        seeding_manager.set_t4t_policy(NoSeeding())
+
+                    g2g_option = self.Read('g2g_option', "int")
+                    if g2g_option == 0:
+                        # Seeding to peers with large sharing ratio
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: GiveToGetRatioBasedSeeding"
+                        seeding_manager.set_g2g_policy(GiveToGetRatioBasedSeeding(self.Read))
+
+                    elif g2g_option == 1:
+                        # Boost your reputation
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: UnlimitedSeeding (for g2g)"
+                        seeding_manager.set_g2g_policy(UnlimitedSeeding())
+
+                    elif g2g_option == 2:
+                        # Seeding for sometime
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: GiveToGetTimeBasedSeeding"
+                        seeding_manager.set_g2g_policy(GiveToGetTimeBasedSeeding(self.Read))
+
+                    else:
+                        # g2g_option == 3, no seeding
+                        if DEBUG: print >>sys.stderr, "GlobalSeedingManager: NoSeeding (for g2g)"
+                        seeding_manager.set_g2g_policy(NoSeeding())
+                
+                    # Apply seeding manager
+                    download_state.get_download().set_seeding_policy(seeding_manager)
+                    self.seeding_managers[infohash] = seeding_manager
+        
+        # if DEBUG: print >>sys.stderr,"GlobalSeedingManager: current seedings: ", len(self.seeding_managers), "out of", len(dslist), "downloads"
+
+class SeedingManager:
+    def __init__(self, download_state, storage):
+        self.storage = storage
+        self.download_state = download_state
+        self.t4t_policy = None
+        self.g2g_policy = None
+        
+        self.t4t_eligible = True
+        self.g2g_eligible = True
+
+        self.time_start = time.time()
+
+    def get_updated_storage(self):
+        """
+        Returns a new storage object that is updated with the last
+        information from the download_state
+        """
+        return {"version":STORAGE_VERSION_ONE,
+                "total_up":self.storage["total_up"] + self.download_state.get_total_transferred(UPLOAD),
+                "total_down":self.storage["total_down"] + self.download_state.get_total_transferred(DOWNLOAD),
+                "time_seeding":self.storage["time_seeding"] + time.time() - self.time_start}
+
+    def update_download_state(self, download_state):
+        self.download_state = download_state
+    
+    def is_conn_eligible(self, conn):
+        if conn.use_g2g:
+            self.g2g_eligible = self.g2g_policy.apply(conn, self.download_state, self.storage)
+            if DEBUG:
+                if self.g2g_eligible:
+                    print >>sys.stderr,"AllowSeeding to g2g peer: ",self.download_state.get_download().get_dest_files()
+                else:
+                    print >>sys.stderr,"DenySeeding to g2g peer: ",self.download_state.get_download().get_dest_files()
+
+            # stop download when neither t4t_eligible nor g2g_eligible
+            if not (self.t4t_eligible or self.g2g_eligible):
+                if DEBUG: print >>sys.stderr,"Stop seedings: ",self.download_state.get_download().get_dest_files()
+                self.download_state.get_download().stop()
+            
+            return self.g2g_eligible
+            
+        else:
+            self.t4t_eligible = self.t4t_policy.apply(conn, self.download_state, self.storage)
+            
+            if DEBUG:
+                if self.t4t_eligible:
+                    print >>sys.stderr,"AllowSeeding to t4t peer: ",self.download_state.get_download().get_dest_files()
+                else:
+                    print >>sys.stderr,"DenySeeding to t4t peer: ",self.download_state.get_download().get_dest_files()
+            # stop download when neither t4t_eligible nor g2g_eligible
+            if not (self.t4t_eligible or self.g2g_eligible):
+                if DEBUG: print >>sys.stderr,"Stop seedings: ",self.download_state.get_download().get_dest_files()
+                self.download_state.get_download().stop()
+            
+            return self.t4t_eligible
+            
+    
+    def set_t4t_policy(self, policy):
+        self.t4t_policy = policy
+        
+    def set_g2g_policy(self, policy):
+        self.g2g_policy = policy
+
+class SeedingPolicy:
+    def __init__(self):
+        pass
+    
+    def apply(self, _, __, ___):
+        pass
+    
+class UnlimitedSeeding(SeedingPolicy):
+    def __init__(self):
+        SeedingPolicy.__init__(self)
+    
+    def apply(self, _, __, ___):
+        return True
+
+
+class NoSeeding(SeedingPolicy):
+    def __init__(self):
+        SeedingPolicy.__init__(self)
+    
+    def apply(self, _, __, ___):
+        return False
+
+class TitForTatTimeBasedSeeding(SeedingPolicy):
+    def __init__(self, Read):
+        SeedingPolicy.__init__(self)
+        self.Read = Read
+        self.begin = time.time()
+    
+    def apply(self, _, __, storage):
+        current = storage["time_seeding"] + time.time() - self.begin
+        limit = long(self.Read('t4t_hours', "int"))*3600 + long(self.Read('t4t_mins', "int"))*60
+        if DEBUG: print >>sys.stderr, "TitForTatTimeBasedSeeding: apply:", current, "/", limit
+        return current <= limit
+
+class GiveToGetTimeBasedSeeding(SeedingPolicy):
+    def __init__(self, Read):
+        SeedingPolicy.__init__(self)
+        self.Read = Read
+        self.begin = time.time()
+    
+    def apply(self, _, __, storage):
+        current = storage["time_seeding"] + time.time() - self.begin
+        limit = long(self.Read('g2g_hours', "int"))*3600 + long(self.Read('g2g_mins', "int"))*60
+        if DEBUG: print >>sys.stderr, "GiveToGetTimeBasedSeeding: apply:", current, "/", limit
+        return current <= limit
+    
+class TitForTatRatioBasedSeeding(SeedingPolicy):
+    def __init__(self):
+        SeedingPolicy.__init__(self)
+        
+    def apply(self, _, download_state, storage):
+        # No Bittorrent leeching (minimal ratio of 1.0)
+        ul = storage["total_up"] + download_state.get_total_transferred(UPLOAD)
+        dl = storage["total_down"] + download_state.get_total_transferred(DOWNLOAD)
+
+        if dl == 0L:
+            # no download will result in no-upload to anyone
+            ratio = 1.0
+        else:
+            ratio = 1.0*ul/dl
+
+        if DEBUG: print >>sys.stderr, "TitForTatRatioBasedSeeding: apply:", dl, ul, ratio
+
+        return ratio < 1.0
+
+class GiveToGetRatioBasedSeeding(SeedingPolicy):
+    def __init__(self, Read):
+        SeedingPolicy.__init__(self)
+        self.Read = Read
+    
+    def apply(self, conn, _, __):
+        # Seeding to peers with large sharing ratio
+        dl = conn.download.measure.get_total()
+        ul = conn.upload.measure.get_total()
+
+        if dl == 0L:
+            # no download will result in no-upload to anyone
+            ratio = 1.0
+        else:
+            ratio = 1.0*ul/dl
+    
+        if DEBUG: print >>sys.stderr, "GiveToGetRatioBasedSeedingapply:", dl, ul, ratio, self.Read('g2g_ratio', "int")/100.0
+        return ratio < self.Read('g2g_ratio', "int")/100.0
+
diff --git a/instrumentation/next-share/BaseLib/Policies/__init__.py b/instrumentation/next-share/BaseLib/Policies/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Subscriptions/__init__.py b/instrumentation/next-share/BaseLib/Subscriptions/__init__.py
new file mode 100644 (file)
index 0000000..86ac17b
--- /dev/null
@@ -0,0 +1,3 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
diff --git a/instrumentation/next-share/BaseLib/Subscriptions/rss_client.py b/instrumentation/next-share/BaseLib/Subscriptions/rss_client.py
new file mode 100644 (file)
index 0000000..540d933
--- /dev/null
@@ -0,0 +1,639 @@
+# Written by Freek Zindel, Arno Bakker
+# see LICENSE.txt for license information
+#
+#this is a very limited torrent rss reader. 
+#works on some sites, but not on others due to captchas or username/password requirements for downloads.
+
+#usage: make a torrentfeedreader instance and call refresh whenevey you would like to check that feed for new torrents. e.g. every 15 minutes.
+#
+# Arno, 2007-05-7: We now store the urls visited on disk and don't recontact them for a certain period
+#       I've added special support for vuze torrents that have the links to the .torrent in the RSS XML
+#       but not as an <link> tag.
+#
+#       In addition, I've set the reader to be conservative for now, it only looks at .torrent files
+#       directly mentioned in the RSS XML, no recursive parsing, that, in case of vuze, visits a lot
+#       of sites unnecessarily and uses Java session IDs (";jsessionid") in the URLs, which renders
+#       our do-not-visit-if-recently-visited useless.
+#
+# 2007-05-08: vuze appears to have added a ;jsessionid to the <enclosure> tag. I now strip that for
+# the URLHistory, but use it in requests. So don't be alarmed by the ;jsessionid in the debug messages.
+#
+# 2008-04-04: vuze appears to have changed format altogether: It no longer
+# adheres to RSS. <item> is called <entry> and <enclosure> is called <content>
+#
+
+import os
+import sys
+import binascii
+import traceback
+from BaseLib.Core.Utilities.timeouturlopen import urlOpenTimeout
+#from BitTornado.zurllib import urlopen
+import re
+import urlparse
+from xml.dom.minidom import parseString
+from xml.parsers.expat import ExpatError
+from threading import Thread,RLock,Event
+import time
+
+from BaseLib.Core.API import *
+
+import sha
+
+from BaseLib.Core.BitTornado.bencode import bdecode,bencode
+from BaseLib.Core.Overlay.permid import permid_for_user,sign_data,verify_data
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin, NULL
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import ChannelCastDBHandler
+from BaseLib.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler
+
+from urllib2 import Request, urlopen, URLError, HTTPError
+
+URLHIST_TIMEOUT = 7*24*3600.0 # Don't revisit links for this time
+RSS_RELOAD_FREQUENCY = 30*60      # reload a rss source every n seconds
+RSS_CHECK_FREQUENCY = 1  # test a potential .torrent in a rss source every n seconds
+
+DEBUG = False
+
+class TorrentFeedThread(Thread):
+    
+    __single = None
+    
+    def __init__(self):
+        if TorrentFeedThread.__single:
+            raise RuntimeError, "TorrentFeedThread is singleton"
+        TorrentFeedThread.__single = self
+        Thread.__init__(self)
+        self.setName( "TorrentFeed"+self.getName())
+        self.setDaemon(True)
+
+        self.urls = {}
+        self.feeds = []
+        self.lock = RLock()
+        self.done = Event()
+
+        # a list containing methods that are called whenever a RSS
+        # feed on ANY of the urls is received
+        self.callbacks = []
+
+        # when rss feeds change, we have to restart the checking
+        self.feeds_changed = Event()
+
+    def getInstance(*args, **kw):
+        if TorrentFeedThread.__single is None:
+            TorrentFeedThread(*args, **kw)
+        return TorrentFeedThread.__single
+    getInstance = staticmethod(getInstance)
+    
+    def register(self,session):
+        self.session = session
+        self.reloadfrequency = RSS_RELOAD_FREQUENCY
+        self.checkfrequency = RSS_CHECK_FREQUENCY
+        
+        self.torrent_dir = self.session.get_torrent_collecting_dir()
+        self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        self.channelcast_db = self.session.open_dbhandler(NTFY_CHANNELCAST)
+        
+        self.rtorrent_handler = RemoteTorrentHandler.getInstance()
+        
+        filename = self.getfilename()
+        dirname = os.path.dirname(filename)
+        if not os.path.exists(dirname):
+            os.makedirs(dirname)
+
+        # read any rss feeds that are currently outstanding
+        self.readfile()
+
+        #self.addURL('http://www.legaltorrents.com/feeds/cat/netlabel-music.rss')
+        #self.addFile('ubuntu-9.04-desktop-i386.iso.torrent')
+
+    def addFile(self, filename):
+        """ This function enables to add individual torrents, instead of a collection of torrents through RSS """
+        try:
+            bdata = open(filename, 'rb').read()
+            torrent_data = bdecode(bdata)
+            infohash = sha.sha(bencode(torrent_data['info'])).digest()
+            if DEBUG: print >>sys.stderr,"subscrip:Adding a torrent in my channel: %s" % torrent_data["info"]["name"]
+            self.save_torrent(infohash, bdata, torrent_data)
+
+            # 01/02/10 Boudewijn: we should use the TorrendDef to read
+            # the .torrent file.  However, we will also write the
+            # torrent file, and the TorrentDef would do all sorts of
+            # checking that will take way to much time here.  So we
+            # won't for now...
+            torrentdef = TorrentDef.load(filename)
+
+            self.channelcast_db.addOwnTorrent(torrentdef)
+            return torrentdef.get_infohash()
+        except:
+            print >> sys.stderr, "Could not add torrent:", filename
+            traceback.print_exc()
+            return None
+
+    def addCallback(self, callback):
+        self.lock.acquire()
+        try:
+            if not callback in self.callbacks:
+                self.callbacks.append(callback)
+        finally:
+            self.lock.release()
+
+    def removeCallback(self, callback):
+        self.lock.acquire()
+        try:
+            self.callbacks.remove(callback)
+        finally:
+            self.lock.release()
+
+#    def setURLCallback(self, url, callback):
+#        self.lock.acquire()
+#      for tup in self.feeds:
+#            if tup[0].feed_url == url:
+#               tup[2] = callback
+#        self.lock.release()
+    
+    def addURL(self, url, dowrite=True, status="active", callback=None):
+        if DEBUG: print >> sys.stderr , "callback", url, callback
+        def on_torrent_callback(rss_url, infohash, torrent_data):
+            # 01/02/10 Boudewijn: we should use the TorrendDef to read
+            # the .torrent file.  However, we will also write the
+            # torrent file, and the TorrentDef would do all sorts of
+            # checking that will take way to much time here.  So we
+            # won't for now...
+            torrentdef = TorrentDef.load_from_dict(torrent_data)
+            if DEBUG: print >>sys.stderr,"subscrip:Adding a torrent in my channel: %s" % torrentdef.get_name_as_unicode()
+            self.channelcast_db.addOwnTorrent(torrentdef)
+
+        self.lock.acquire()
+        if url not in self.urls:
+            self.urls[url] = status
+            if status == "active":
+                feed = TorrentFeedReader(url,self.gethistfilename(url))
+                self.feeds.append([feed, on_torrent_callback, callback])
+                self.feeds_changed.set()
+            if dowrite:
+                self.writefile()
+        if callback:
+            for tup in self.feeds:
+                if tup[0].feed_url == url:
+                   tup[2] = callback
+        self.lock.release()
+
+    def readfile(self):
+        try:
+            filename = self.getfilename()
+            f = open(filename,"rb")
+            for line in f.readlines():
+                for key in ['active', 'inactive']:
+                    if line.startswith(key):
+                        url = line[len(key)+1:-2] # remove \r\n
+                        if DEBUG:
+                            print >>sys.stderr,"subscrip: Add from file URL",url,"EOU"
+                        self.addURL(url,dowrite=False,status=key)
+            f.close()        
+        except:
+            print >>sys.stderr, "rss_client: subscriptions.txt does not yet exist"
+    
+    def writefile(self):
+        filename = self.getfilename()
+        f = open(filename,"wb")
+        for url in self.urls:
+            val = self.urls[url]
+            f.write(val+' '+url+'\r\n')
+        f.close()
+
+    def getfilename(self):
+        return os.path.join(self.getdir(),"subscriptions.txt")
+
+    def gethistfilename(self,url):
+        # TODO: url2pathname or something that gives a readable filename
+        h = sha.sha(url).hexdigest()
+        return os.path.join(self.getdir(),h+'.txt')
+    
+    """    
+    def getdir(self):
+        return os.path.join(self.utility.getConfigPath(),"subscriptions")
+    """
+    
+    def getdir(self):
+        return os.path.join(self.session.get_state_dir(),"subscriptions")
+        
+    def getUrls(self, status="active"):
+        """
+        returns a list with urls matching status
+        """
+        self.lock.acquire()
+        try:
+            return [url for url, url_status in self.urls.iteritems() if url_status == status]
+        finally:
+            self.lock.release()
+    
+    def getURLs(self):
+        return self.urls # doesn't need to be locked
+        
+    def setURLStatus(self,url,newstatus):
+        self.lock.acquire()
+        if DEBUG:
+            print >>sys.stderr,"subscrip: setURLStatus",url,newstatus
+        newtxt = "active"
+        if newstatus == False:
+            newtxt = "inactive"
+        if DEBUG:
+            print >>sys.stderr,"subscrip: setURLStatus: newstatus set to",url,newtxt
+        if url in self.urls:
+            self.urls[url] = newtxt
+            self.writefile()
+        elif DEBUG:
+            print >>sys.stderr,"subscrip: setURLStatus: unknown URL?",url
+        self.lock.release()
+    
+    def deleteURL(self,url):
+        self.lock.acquire()
+        if url in self.urls:
+            del self.urls[url]
+            for i in range(len(self.feeds)):
+                feed = self.feeds[i]
+                if feed[0] == url:
+                    del self.feeds[i]
+                    self.feeds_changed.set()
+                    break
+            self.writefile()
+        self.lock.release()
+    
+       
+        
+    def run(self):
+        time.sleep(10) # Let other Tribler components, in particular, Session startup
+        while not self.done.isSet():
+            self.lock.acquire()
+            cfeeds = self.feeds[:]
+            self.feeds_changed.clear()
+            self.lock.release()
+            
+            # feeds contains (rss_url, generator) pairs
+            feeds = {}
+            for feed, on_torrent_callback, user_callback in cfeeds:
+                try:
+                    sugestion_generator = feed.refresh()
+                except:
+                    pass
+                else:
+                    feeds[feed.feed_url] = sugestion_generator
+
+            # loop through the feeds and try one from each feed at a time
+            while feeds:
+                for (rss_url, generator) in feeds.items():
+                    if rss_url is None or generator is None:
+                        break
+
+                    # are there items left in this generator
+                    try:
+                        title, urlopenobj = generator.next()
+                        if not urlopenobj:
+                            if DEBUG: print >>sys.stderr,"subscrip:urlopenobj NONE: torrent not found", title
+                            continue
+                        elif DEBUG:
+                            print >>sys.stderr,"subscrip:urlopenobj : torrent found", title 
+
+                        bdata = urlopenobj.read()
+                        urlopenobj.close()
+                        torrent_data = bdecode(bdata)
+                        
+                        #tdef = TorrentDef.load_from_dict(torrent_data)
+                        
+                        if 'info' in torrent_data:
+                            infohash = sha.sha(bencode(torrent_data['info'])).digest()
+                            if not self.torrent_db.hasTorrent(infohash):
+                                if DEBUG:
+                                    if "name" in torrent_data["info"]:
+                                        print >>sys.stderr,"subscrip:Injecting", torrent_data["info"]["name"]
+                                    else:
+                                        print >>sys.stderr,"subscrip:Injecting", title
+                                self.save_torrent(infohash, bdata, torrent_data, source=rss_url)
+                                if on_torrent_callback:
+                                    if DEBUG: print >> sys.stderr , "ON TORRENT CALLBACK"
+                                    on_torrent_callback(rss_url, infohash, torrent_data)
+                                if user_callback:
+                                    if DEBUG: print >> sys.stderr , "USER CALLBACK"
+                                    user_callback(rss_url, infohash, torrent_data)
+
+                                # perform all non-url-specific callbacks
+                                self.lock.acquire()
+                                callbacks = self.callbacks[:]
+                                self.lock.release()
+
+                                for callback in callbacks:
+                                    try:
+                                        if DEBUG:
+                                            print >> sys.stderr , "RSS CALLBACK"
+                                        callback(rss_url, infohash, torrent_data)
+                                    except:
+                                        traceback.print_exc()
+
+                    except StopIteration:
+                        # there are no more items in generator
+                        del(feeds[rss_url])
+
+                    except ValueError:
+                        # the bdecode failed
+                        print >>sys.stderr,"subscrip:Bdecode failed: ", rss_url
+                    
+                    except (ExpatError, HTTPError):
+                        print >>sys.stderr,"subscrip:Invalid RSS: ", rss_url 
+
+                    # sleep in between torrent retrievals
+                    #time.sleep(self.intertorrentinterval)
+                    time.sleep(self.checkfrequency)
+
+                    self.lock.acquire()
+                    try:
+                        if self.feeds_changed.isSet():
+                            feeds = None
+                            break
+                    finally:
+                        self.lock.release()
+
+            # sleep for a relatively long time before downloading the
+            # rss feeds again
+            self.feeds_changed.wait(self.reloadfrequency)
+
+    def save_torrent(self,infohash,bdata,torrent_data,source=''):
+        hexinfohash = binascii.hexlify(infohash)
+        if DEBUG:
+            print >>sys.stderr,"subscript: Writing",hexinfohash
+
+        filename = os.path.join(self.torrent_dir, hexinfohash+'.torrent' )
+        f = open(filename,"wb")
+        f.write(bdata)
+        f.close()
+
+        # Arno: hack, make sure these torrents are always good so they show up
+        # in Torrent DBHandler.getTorrents()
+        extra_info = {'status':'good'}
+
+        # 01/02/10 Boudewijn: we should use the TorrendDef to write
+        # the .torrent file.  However, the TorrentDef would do all
+        # sorts of checking that will take way to much time here.  So
+        # we won't for now...
+        extra_info['filename'] = filename
+        torrentdef = TorrentDef.load_from_dict(torrent_data)
+
+        self.torrent_db.addExternalTorrent(torrentdef,source=source,extra_info=extra_info)
+
+        # ARNOCOMMENT: remove later
+        #self.torrent_db.commit()
+
+
+    def shutdown(self):
+        if DEBUG:
+            print >>sys.stderr,"subscrip: Shutting down subscriptions module"
+        self.done.set()
+        self.lock.acquire()
+        cfeeds = self.feeds[:]
+        self.lock.release()
+        for feed, on_torrent_callback, callback in cfeeds:
+            feed.shutdown()
+            
+        # self.utility.session.close_dbhandler(self.torrent_db)
+
+"""
+    def process_statscopy(self,statscopy):
+        today = []
+        yesterday = []
+        now = int(time())
+        sotoday = math.floor(now / (24*3600.0))*24*3600.0
+        soyester = sotday - (24*3600.0)
+        for rss in statscopy:
+            for url,t in statscopy[rss]:
+                if t > sotoday:
+                    today.append(url)
+"""        
+
+class TorrentFeedReader:
+    def __init__(self,feed_url,histfilename):
+        self.feed_url = feed_url
+        self.urls_already_seen = URLHistory(histfilename)
+        # todo: the self.href_re expression does not take into account that single quotes, escaped quotes, etz. can be used
+        self.href_re = re.compile('href="(.*?)"', re.IGNORECASE) 
+        # the following filter is applied on the xml data because other characters crash the parser
+        self.filter_xml_expression = re.compile("(&\w+;)|([^\w\d\s~`!@#$%^&*()-_=+{}[\]\\|:;\"'<,>.?/])", re.IGNORECASE)
+
+        self.torrent_types = ['application/x-bittorrent','application/x-download']
+
+    def isTorrentType(self,type):
+        return type in self.torrent_types
+
+    def refresh(self):
+        """Returns a generator for a list of (title,urllib2openedurl_to_torrent)
+        pairs for this feed. TorrentFeedReader instances keep a list of
+        torrent urls in memory and will yield a torrent only once.
+        If the feed points to a torrent url with webserver problems,
+        that url will not be retried.
+        urllib2openedurl_to_torrent may be None if there is a webserver problem.
+        """
+        
+        # Load history from disk
+        if not self.urls_already_seen.readed:
+            self.urls_already_seen.read()
+            self.urls_already_seen.readed = True
+
+        while True:
+            try:
+                feed_socket = urlOpenTimeout(self.feed_url,timeout=20)
+                feed_xml = feed_socket.read()
+                feed_socket.close()
+                break
+            except:
+                yield None, None
+
+        # 14/07/08 boudewijn: some special characters and html code is
+        # raises a parser exception. We filter out these character
+        # sequenses using a regular expression in the filter_xml
+        # function
+        dom = parseString(self._filter_xml(feed_xml))
+        entries = []
+
+        # The following XML will result in three links with the same title.
+        #
+        # <item>
+        # <title>The title</title>
+        # <link>http:/frayja.com/torrent/1</link>
+        # <foobar src="frayja.com/torrent/2">Unused title</foobar>
+        # <moomilk url="frayja.com/torrent/3">Unused title</moomilk>
+        # </items>
+        for item in dom.getElementsByTagName("item"): #+ dom.getElementsByTagName("entry"):
+            title = None
+            links = []
+            child = item.firstChild
+            while child:
+                if child.nodeType == 1: # ELEMENT_NODE (according to the DOM standard)
+                    if child.nodeName == "title" and child.firstChild:
+                        title = child.firstChild.data
+
+                    if child.nodeName == "link" and child.firstChild:
+                        links.append(child.firstChild.data)
+
+                    if child.hasAttribute("src"):
+                        links.append(child.getAttribute("src"))
+
+                    if child.hasAttribute("url"):
+                        links.append(child.getAttribute("url"))
+
+                child = child.nextSibling
+
+            if title and links:
+                entries.extend([(title, link) for link in links])
+
+        if DEBUG:
+            print >>sys.stderr,"subscrip: Parse of RSS returned",len(entries),"previously unseen torrents"
+
+        for title,link in entries:
+            # print title,link
+            try:
+                self.urls_already_seen.add(link)
+                if DEBUG:
+                    print >>sys.stderr,"subscrip: Opening",title,link
+                html_or_tor = urlOpenTimeout(link,timeout=20)
+                found_torrent = False
+                tor_type = html_or_tor.headers.gettype()
+                if self.isTorrentType(tor_type):
+                    torrent = html_or_tor
+                    found_torrent = True
+                    if DEBUG:
+                        print >>sys.stderr,"subscrip: torrent1: Yielding",link
+                    yield title,torrent
+                elif False: # 'html' in tor_type:
+                    html = html_or_tor.read()
+                    hrefs = [match.group(1) for match in self.href_re.finditer(html)]
+                          
+                    urls = []
+                    for url in hrefs:
+                        if not self.urls_already_seen.contains(url):
+                            self.urls_already_seen.add(url)
+                            urls.append(urlparse.urljoin(link,url))
+                    for url in urls:
+                        #print url
+                        try:
+                            if DEBUG:
+                                print >>sys.stderr,"subscrip: torrent2: Opening",url
+                            torrent = urlOpenTimeout(url)
+                            url_type = torrent.headers.gettype()
+                            #print url_type
+                            if self.isTorrentType(url_type):
+                                #print "torrent found:",url
+                                found_torrent = True
+                                if DEBUG:
+                                    print >>sys.stderr,"subscrip: torrent2: Yielding",url
+                                yield title,torrent
+                                break
+                            else:
+                                #its not a torrent after all, but just some html link
+                                if DEBUG:
+                                    print >>sys.stderr,"subscrip:%s not a torrent" % url
+                        except:
+                            #url didn't open
+                            if DEBUG:
+                                print >>sys.stderr,"subscrip:%s did not open" % url
+                if not found_torrent:
+                    yield title,None
+            except GeneratorExit:
+                if DEBUG:
+                    print >>sys.stderr,"subscrip:GENERATOREXIT"
+                # the generator is destroyed. we accept this by returning
+                return
+            except Exception, e:
+                print >> sys.stderr, "rss_client:", e
+                yield title,None
+
+    def shutdown(self):
+        self.urls_already_seen.write()
+
+    def _filter_xml_helper(self, match):
+        """helper function to filter invalid xml"""
+        one = match.group(1)
+        if one in ("&gt;", "&lt;", "&quot;", "&amp;"):
+            return one
+        return "?"
+
+    def _filter_xml(self, xml):
+        """filters out characters and tags that crash xml.dom.minidom.parseString"""
+        return self.filter_xml_expression.sub(self._filter_xml_helper, xml)
+
+class URLHistory:
+
+    read_history_expression = re.compile("(\d+(?:[.]\d+)?)\s+(\w+)", re.IGNORECASE)
+    
+    def __init__(self,filename):
+        self.urls = {}
+        self.filename = filename
+        self.readed = False
+        
+    def add(self,dirtyurl):
+        url = self.clean_link(dirtyurl)
+        self.urls[url] = time.time()
+                    
+    def contains(self,dirtyurl):
+        url = self.clean_link(dirtyurl)
+        
+        # Poor man's filter
+        if url.endswith(".jpg") or url.endswith(".JPG"):
+            return True
+        
+        t = self.urls.get(url,None)
+        if t is None:
+            return False
+        else:
+            now = time.time()
+            return not self.timedout(t,now) # no need to delete
+    
+    def timedout(self,t,now):
+        return (t+URLHIST_TIMEOUT) < now
+    
+    def read(self):
+        if DEBUG:
+            print >>sys.stderr,"subscrip: Reading cached",self.filename
+        try:
+            file_handle = open(self.filename, "rb")
+        except IOError:
+            # file not found...
+            # there is no cache available
+            pass
+        else:
+            re_line = re.compile("^\s*(\d+(?:[.]\d+)?)\s+(.+?)\s*$")
+            now = time.time()
+            for line in file_handle.readlines():
+                match = re_line.match(line)
+                if match:
+                    timestamp, url = match.groups()
+                    timestamp = float(timestamp)
+                    if not self.timedout(timestamp, now):
+                        if DEBUG:
+                            print >>sys.stderr, "subscrip: Cached url is",url
+                        self.urls[url] = timestamp
+                    elif DEBUG:
+                        print >>sys.stderr,"subscrip: Timed out cached url is %s" % url                        
+
+            file_handle.close()
+        
+    def write(self):
+        try:
+            file_handle = open(self.filename, "wb")
+        except IOError:
+            # can't write file
+            traceback.print_exc()
+        else:
+            for url, timestamp in self.urls.iteritems():
+                file_handle.write("%f %s\r\n" % (timestamp, url))
+            file_handle.close()        
+
+    def copy(self):
+        return self.urls.copy()
+
+    def clean_link(self,link):
+        """ Special vuze case """
+        idx = link.find(';jsessionid')
+        if idx == -1:
+            return link
+        else:
+            return link[:idx]
+    
+def usercallback(infohash,metadata,filename):
+    pass
diff --git a/instrumentation/next-share/BaseLib/Test/API/contentdir/file.avi b/instrumentation/next-share/BaseLib/Test/API/contentdir/file.avi
new file mode 100644 (file)
index 0000000..08a5020
--- /dev/null
@@ -0,0 +1,1371 @@
+#!/usr/bin/python\r
+\r
+#########################################################################\r
+#\r
+# Author : Choopan RATTANAPOKA, Jie Yang, Arno Bakker\r
+#\r
+# Description : Main ABC [Yet Another Bittorrent Client] python script.\r
+#               you can run from source code by using\r
+#               >python abc.py\r
+#               need Python, WxPython in order to run from source code.\r
+#########################################################################\r
+\r
+# Arno: M2Crypto overrides the method for https:// in the\r
+# standard Python libraries. This causes msnlib to fail and makes Tribler\r
+# freakout when "http://www.tribler.org/version" is redirected to\r
+# "https://www.tribler.org/version/" (which happened during our website\r
+# changeover) Until M2Crypto 0.16 is patched I'll restore the method to the\r
+# original, as follows.\r
+#\r
+# This must be done in the first python file that is started.\r
+#\r
+\r
+import urllib\r
+original_open_https = urllib.URLopener.open_https\r
+import M2Crypto\r
+urllib.URLopener.open_https = original_open_https\r
+\r
+import sys, locale\r
+import os\r
+import wx, commands\r
+from wx import xrc\r
+#import hotshot\r
+\r
+if sys.platform == "darwin":\r
+    # on Mac, we can only load VLC libraries\r
+    # relative to the location of tribler.py\r
+    os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))\r
+\r
+from threading import Thread, Timer, Event,currentThread,enumerate\r
+from time import time, ctime, sleep\r
+from traceback import print_exc, print_stack\r
+from cStringIO import StringIO\r
+import urllib\r
+\r
+from interconn import ServerListener, ClientPassParam\r
+from launchmanycore import ABCLaunchMany\r
+\r
+from ABC.Toolbars.toolbars import ABCBottomBar2, ABCStatusBar, ABCMenuBar, ABCToolBar\r
+from ABC.GUI.menu import ABCMenu\r
+from ABC.Scheduler.scheduler import ABCScheduler\r
+\r
+from webservice import WebListener\r
+\r
+if (sys.platform == 'win32'):\r
+    from Dialogs.regdialog import RegCheckDialog\r
+\r
+from ABC.GUI.list import ManagedList\r
+from Utility.utility import Utility\r
+from Utility.constants import * #IGNORE:W0611\r
+\r
+from Tribler.__init__ import tribler_init, tribler_done\r
+from BitTornado.__init__ import product_name\r
+from safeguiupdate import DelayedInvocation,FlaglessDelayedInvocation\r
+import webbrowser\r
+from Tribler.Dialogs.MugshotManager import MugshotManager\r
+from Tribler.vwxGUI.GuiUtility import GUIUtility\r
+import Tribler.vwxGUI.updateXRC as updateXRC\r
+from Tribler.Video.VideoPlayer import VideoPlayer,return_feasible_playback_modes,PLAYBACKMODE_INTERNAL\r
+from Tribler.Video.VideoServer import VideoHTTPServer\r
+from Tribler.Dialogs.GUIServer import GUIServer\r
+from Tribler.vwxGUI.TasteHeart import set_tasteheart_bitmaps\r
+from Tribler.vwxGUI.perfBar import set_perfBar_bitmaps\r
+from Tribler.Dialogs.BandwidthSelector import BandwidthSelector\r
+from Tribler.Subscriptions.rss_client import TorrentFeedThread\r
+from Tribler.Dialogs.activities import *\r
+from Tribler.DecentralizedTracking import mainlineDHT\r
+from Tribler.DecentralizedTracking.rsconvert import RawServerConverter\r
+from Tribler.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker\r
+\r
+from Tribler.notification import init as notification_init\r
+from Tribler.vwxGUI.font import *\r
+from Tribler.Web2.util.update import Web2Updater\r
+\r
+from Tribler.CacheDB.CacheDBHandler import BarterCastDBHandler\r
+from Tribler.Overlay.permid import permid_for_user\r
+from BitTornado.download_bt1 import EVIL\r
+\r
+DEBUG = False\r
+ALLOW_MULTIPLE = False\r
+start_time = 0\r
+start_time2 = 0\r
+\r
+\r
+################################################################\r
+#\r
+# Class: FileDropTarget\r
+#\r
+# To enable drag and drop for ABC list in main menu\r
+#\r
+################################################################\r
+class FileDropTarget(wx.FileDropTarget): \r
+    def __init__(self, utility):\r
+        # Initialize the wsFileDropTarget Object \r
+        wx.FileDropTarget.__init__(self) \r
+        # Store the Object Reference for dropped files \r
+        self.utility = utility\r
+      \r
+    def OnDropFiles(self, x, y, filenames):\r
+        for filename in filenames:\r
+            self.utility.queue.addtorrents.AddTorrentFromFile(filename)\r
+        return True\r
+\r
+\r
+##############################################################\r
+#\r
+# Class : ABCList\r
+#\r
+# ABC List class that contains the torrent list\r
+#\r
+############################################################## \r
+class ABCList(ManagedList):\r
+    def __init__(self, parent):\r
+        style = wx.LC_REPORT|wx.LC_VRULES|wx.CLIP_CHILDREN\r
+        \r
+        prefix = 'column'\r
+        minid = 4\r
+        maxid = 26\r
+        exclude = []\r
+        rightalign = [COL_PROGRESS, \r
+                      COL_SIZE, \r
+                      COL_DLSPEED, \r
+                      COL_ULSPEED, \r
+                      COL_RATIO, \r
+                      COL_PEERPROGRESS, \r
+                      COL_DLSIZE, \r
+                      COL_ULSIZE, \r
+                      COL_TOTALSPEED]\r
+\r
+        ManagedList.__init__(self, parent, style, prefix, minid, maxid, exclude, rightalign)\r
+        \r
+        dragdroplist = FileDropTarget(self.utility)\r
+        self.SetDropTarget(dragdroplist)\r
+        \r
+        self.lastcolumnsorted = -1\r
+        self.reversesort = 0\r
+\r
+        self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)\r
+        self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColLeftClick)\r
+\r
+        self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnItemSelected)\r
+        \r
+        # Bring up advanced details on left double click\r
+        self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)\r
+        \r
+        # Bring up local settings on middle double click\r
+        self.Bind(wx.EVT_MIDDLE_DCLICK, self.utility.actions[ACTION_LOCALUPLOAD].action)\r
+\r
+    # Do thing when keys are pressed down\r
+    def OnKeyDown(self, event):\r
+        keycode = event.GetKeyCode()\r
+        if event.CmdDown():\r
+            if keycode == ord('a') or keycode == ord('A'):\r
+                # Select all files (CTRL-A)\r
+                self.selectAll()\r
+            elif keycode == ord('x') or keycode == ord('X'):\r
+                # Invert file selection (CTRL-X)\r
+                self.invertSelection()\r
+        elif keycode == wx.WXK_RETURN or keycode == wx.WXK_NUMPAD_ENTER:\r
+            # Open advanced details (Enter)\r
+            self.utility.actions[ACTION_DETAILS].action()\r
+        elif keycode == wx.WXK_SPACE:\r
+            # Open local settings (Space)\r
+            self.utility.actions[ACTION_LOCALUPLOAD].action()\r
+        elif keycode == 399:\r
+            # Open right-click menu (windows menu key)\r
+            self.OnItemSelected()\r
+        \r
+        event.Skip()\r
+        \r
+    def OnColLeftClick(self, event):\r
+        rank = event.GetColumn()\r
+        colid = self.columns.getIDfromRank(rank)\r
+        if colid == self.lastcolumnsorted:\r
+            self.reversesort = 1 - self.reversesort\r
+        else:\r
+            self.reversesort = 0\r
+        self.lastcolumnsorted = colid\r
+        self.utility.queue.sortList(colid, self.reversesort)       \r
+        \r
+    def selectAll(self):\r
+        self.updateSelected(select = range(0, self.GetItemCount()))\r
+\r
+    def updateSelected(self, unselect = None, select = None):\r
+        if unselect is not None:\r
+            for index in unselect:\r
+                self.SetItemState(index, 0, wx.LIST_STATE_SELECTED)\r
+        if select is not None:\r
+            for index in select:\r
+                self.Select(index)\r
+        self.SetFocus()\r
+\r
+    def getTorrentSelected(self, firstitemonly = False, reverse = False):\r
+        queue = self.utility.queue\r
+        \r
+        torrentselected = []\r
+        for index in self.getSelected(firstitemonly, reverse):\r
+            ABCTorrentTemp = queue.getABCTorrent(index = index)\r
+            if ABCTorrentTemp is not None:\r
+                torrentselected.append(ABCTorrentTemp)\r
+        return torrentselected\r
+\r
+    def OnItemSelected(self, event = None):\r
+        selected = self.getTorrentSelected()\r
+        if not selected:\r
+            return\r
+\r
+        popupmenu = ABCMenu(self.utility, 'menu_listrightclick')\r
+\r
+        # Popup the menu.  If an item is selected then its handler\r
+        # will be called before PopupMenu returns.\r
+        if event is None:\r
+            # use the position of the first selected item (key event)\r
+            ABCTorrentTemp = selected[0]\r
+            position = self.GetItemPosition(ABCTorrentTemp.listindex)\r
+        else:\r
+            # use the cursor position (mouse event)\r
+            position = event.GetPosition()\r
+        \r
+        self.PopupMenu(popupmenu, position)\r
+\r
+    def OnLeftDClick(self, event):\r
+        event.Skip()\r
+        try:\r
+            self.utility.actions[ACTION_DETAILS].action()\r
+        except:\r
+            print_exc()\r
+\r
+\r
+##############################################################\r
+#\r
+# Class : ABCPanel\r
+#\r
+# Main ABC Panel class\r
+#\r
+############################################################## \r
+class ABCPanel(wx.Panel):\r
+    def __init__(self, parent):\r
+        style = wx.CLIP_CHILDREN\r
+        wx.Panel.__init__(self, parent, -1, style = style)\r
+\r
+        #Debug Output.\r
+        sys.stdout.write('Preparing GUI.\n');\r
+        \r
+        self.utility    = parent.utility\r
+        self.utility.window = self\r
+        self.queue = self.utility.queue\r
+               \r
+        # List of deleting torrents events that occur when the RateManager is active\r
+        # Such events are processed after the RateManager finishes\r
+        # postponedevents is a list of tupples : each tupple contains the method of ABCPanel to be called to\r
+        # deal with the event and the event.\r
+        self.postponedevents = []\r
+\r
+        #Manual Bittorrent Adding UI\r
+        ##############################\r
+        colSizer = wx.BoxSizer(wx.VERTICAL)\r
+        \r
+        self.list = ABCList(self)\r
+        self.utility.list = self.list\r
+        colSizer.Add(self.list, 1, wx.ALL|wx.EXPAND, 3)\r
+            \r
+        """\r
+        # Add status bar\r
+        statbarbox = wx.BoxSizer(wx.HORIZONTAL)\r
+        self.sb_buttons = ABCStatusButtons(self,self.utility)\r
+        statbarbox.Add(self.sb_buttons, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0)\r
+        self.abc_sb = ABCStatusBar(self,self.utility)\r
+        statbarbox.Add(self.abc_sb, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0)\r
+        colSizer.Add(statbarbox, 0, wx.ALL|wx.EXPAND, 0)\r
+        """\r
+        \r
+        #colSizer.Add(self.contentPanel, 1, wx.ALL|wx.EXPAND, 3)\r
+        self.SetSizer(colSizer)\r
+        self.SetAutoLayout(True)\r
+        \r
+        self.list.SetFocus()\r
+        \r
+        \r
+    def getSelectedList(self, event = None):\r
+        return self.list\r
+\r
+    ######################################\r
+    # Update ABC on-the-fly\r
+    ######################################\r
+    def updateColumns(self, force = False):\r
+        # Update display in column for inactive torrent\r
+        for ABCTorrentTemp in self.utility.torrents["all"]:\r
+            ABCTorrentTemp.updateColumns(force = force)\r
\r
+      \r
+##############################################################\r
+#\r
+# Class : ABCTaskBarIcon\r
+#\r
+# Task Bar Icon\r
+#\r
+############################################################## \r
+class ABCTaskBarIcon(wx.TaskBarIcon):\r
+    def __init__(self, parent):\r
+        wx.TaskBarIcon.__init__(self)\r
+        \r
+        self.utility = parent.utility\r
+        \r
+        self.TBMENU_RESTORE = wx.NewId()\r
+\r
+        # setup a taskbar icon, and catch some events from it\r
+        self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, parent.onTaskBarActivate)\r
+        self.Bind(wx.EVT_MENU, parent.onTaskBarActivate, id = self.TBMENU_RESTORE)\r
+               \r
+        self.updateIcon(False)\r
+        \r
+    def updateIcon(self,iconifying = False):\r
+        remove = True\r
+        \r
+        mintray = self.utility.config.Read('mintray', "int")\r
+        if (mintray >= 2) or ((mintray >= 1) and iconifying):\r
+            remove = False\r
+        \r
+        if remove and self.IsIconInstalled():\r
+            self.RemoveIcon()\r
+        elif not remove and not self.IsIconInstalled():\r
+            self.SetIcon(self.utility.icon, product_name)\r
+        \r
+    def CreatePopupMenu(self):        \r
+        menu = wx.Menu()\r
+        \r
+        self.utility.actions[ACTION_STOPALL].addToMenu(menu, bindto = self)\r
+        self.utility.actions[ACTION_UNSTOPALL].addToMenu(menu, bindto = self)\r
+        menu.AppendSeparator()\r
+        menu.Append(self.TBMENU_RESTORE, self.utility.lang.get('showabcwindow'))\r
+        self.utility.actions[ACTION_EXIT].addToMenu(menu, bindto = self)\r
+        return menu\r
+\r
+\r
+##############################################################\r
+#\r
+# Class : ABColdFrame\r
+#\r
+# Main ABC Frame class that contains menu and menu bar management\r
+# and contains ABCPanel\r
+#\r
+############################################################## \r
+class ABCOldFrame(wx.Frame,FlaglessDelayedInvocation):\r
+    def __init__(self, ID, params, utility):\r
+        self.utility = utility\r
+        #self.utility.frame = self\r
+        \r
+        title = "Old Interface"\r
+        # Get window size and position from config file\r
+        size = (400,400)\r
+        style = wx.DEFAULT_FRAME_STYLE | wx.CLIP_CHILDREN\r
+        \r
+        wx.Frame.__init__(self, None, ID, title, size = size, style = style)\r
+        \r
+        FlaglessDelayedInvocation.__init__(self)\r
+\r
+        self.GUIupdate = True\r
+\r
+        self.window = ABCPanel(self)\r
+        self.Bind(wx.EVT_SET_FOCUS, self.onFocus)\r
+        self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\r
+        \r
+        self.tb = ABCToolBar(self) # new Tribler gui has no toolbar\r
+        self.SetToolBar(self.tb)\r
+\r
+            \r
+    def onFocus(self, event = None):\r
+        if event is not None:\r
+            event.Skip()\r
+        self.window.getSelectedList(event).SetFocus()\r
+\r
+    def OnCloseWindow(self, event = None):\r
+        self.Hide()\r
+\r
+# Custom class loaded by XRC\r
+class ABCFrame(wx.Frame, DelayedInvocation):\r
+    def __init__(self, *args):\r
+        if len(args) == 0:\r
+            pre = wx.PreFrame()\r
+            # the Create step is done by XRC.\r
+            self.PostCreate(pre)\r
+            self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate)\r
+        else:\r
+            wx.Frame.__init__(self, args[0], args[1], args[2], args[3])\r
+            self._PostInit()\r
+        \r
+    def OnCreate(self, event):\r
+        self.Unbind(wx.EVT_WINDOW_CREATE)\r
+        wx.CallAfter(self._PostInit)\r
+        event.Skip()\r
+        return True\r
+    \r
+    def _PostInit(self):\r
+        # Do all init here\r
+        self.guiUtility = GUIUtility.getInstance()\r
+        self.utility = self.guiUtility.utility\r
+        self.params = self.guiUtility.params\r
+        self.utility.frame = self\r
+        \r
+        title = self.utility.lang.get('title') + \\r
+                " " + \\r
+                self.utility.lang.get('version')\r
+        \r
+        # Get window size and position from config file\r
+        size, position = self.getWindowSettings()\r
+        style = wx.DEFAULT_FRAME_STYLE | wx.CLIP_CHILDREN\r
+        \r
+        self.SetSize(size)\r
+        self.SetPosition(position)\r
+        self.SetTitle(title)\r
+        tt = self.GetToolTip()\r
+        if tt is not None:\r
+            tt.SetTip('')\r
+        \r
+        #wx.Frame.__init__(self, None, ID, title, position, size, style = style)\r
+        \r
+        self.doneflag = Event()\r
+        DelayedInvocation.__init__(self)\r
+\r
+        dragdroplist = FileDropTarget(self.utility)\r
+        self.SetDropTarget(dragdroplist)\r
+\r
+        self.tbicon = None\r
+\r
+        # Arno: see ABCPanel\r
+        #self.abc_sb = ABCStatusBar(self,self.utility)\r
+        #self.SetStatusBar(self.abc_sb)\r
+\r
+        """\r
+        # Add status bar\r
+        statbarbox = wx.BoxSizer(wx.HORIZONTAL)\r
+        self.sb_buttons = ABCStatusButtons(self,self.utility)\r
+        statbarbox.Add(self.sb_buttons, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0)\r
+        self.abc_sb = ABCStatusBar(self,self.utility)\r
+        statbarbox.Add(self.abc_sb, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0)\r
+        #colSizer.Add(statbarbox, 0, wx.ALL|wx.EXPAND, 0)\r
+        self.SetStatusBar(statbarbox)\r
+        """\r
+        \r
+        \r
+        try:\r
+            self.SetIcon(self.utility.icon)\r
+        except:\r
+            pass\r
+\r
+        # Don't update GUI as often when iconized\r
+        self.GUIupdate = True\r
+\r
+        # Start the scheduler before creating the ListCtrl\r
+        self.utility.queue  = ABCScheduler(self.utility)\r
+        #self.window = ABCPanel(self)\r
+        #self.abc_sb = self.window.abc_sb\r
+        \r
+        \r
+        self.oldframe = ABCOldFrame(-1, self.params, self.utility)\r
+        self.oldframe.Refresh()\r
+        self.oldframe.Layout()\r
+        #self.oldframe.Show(True)\r
+        \r
+        self.window = self.GetChildren()[0]\r
+        self.window.utility = self.utility\r
+        \r
+        """\r
+        self.list = ABCList(self.window)\r
+        self.list.Show(False)\r
+        self.utility.list = self.list\r
+        print self.window.GetName()\r
+        self.window.list = self.list\r
+        self.utility.window = self.window\r
+        """\r
+        #self.window.sb_buttons = ABCStatusButtons(self,self.utility)\r
+        \r
+        self.utility.window.postponedevents = []\r
+        \r
+        # Menu Options\r
+        ############################\r
+        menuBar = ABCMenuBar(self)\r
+        if sys.platform == "darwin":\r
+            wx.App.SetMacExitMenuItemId(wx.ID_CLOSE)\r
+        self.SetMenuBar(menuBar)\r
+        \r
+        #self.tb = ABCToolBar(self) # new Tribler gui has no toolbar\r
+        #self.SetToolBar(self.tb)\r
+        \r
+        self.buddyFrame = None\r
+        self.fileFrame = None\r
+        self.buddyFrame_page = 0\r
+        self.buddyFrame_size = (800, 500)\r
+        self.buddyFrame_pos = None\r
+        self.fileFrame_size = (800, 500)\r
+        self.fileFrame_pos = None\r
+        \r
+        # Menu Events \r
+        ############################\r
+\r
+        self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\r
+#        self.Bind(wx.EVT_MENU, self.OnMenuExit, id = wx.ID_CLOSE)\r
+\r
+        # leaving here for the time being:\r
+        # wxMSW apparently sends the event to the App object rather than\r
+        # the top-level Frame, but there seemed to be some possibility of\r
+        # change\r
+        self.Bind(wx.EVT_QUERY_END_SESSION, self.OnCloseWindow)\r
+        self.Bind(wx.EVT_END_SESSION, self.OnCloseWindow)\r
+        \r
+        try:\r
+            self.tbicon = ABCTaskBarIcon(self)\r
+        except:\r
+            pass\r
+        self.Bind(wx.EVT_ICONIZE, self.onIconify)\r
+        self.Bind(wx.EVT_SET_FOCUS, self.onFocus)\r
+        self.Bind(wx.EVT_SIZE, self.onSize)\r
+        self.Bind(wx.EVT_MAXIMIZE, self.onSize)\r
+        #self.Bind(wx.EVT_IDLE, self.onIdle)\r
+        \r
+        # Start up the controller\r
+        self.utility.controller = ABCLaunchMany(self.utility)\r
+        self.utility.controller.start()\r
+        \r
+        # Start up mainline DHT\r
+        # Arno: do this in a try block, as khashmir gives a very funky\r
+        # error when started from a .dmg (not from cmd line) on Mac. In particular\r
+        # it complains that it cannot find the 'hex' encoding method when\r
+        # hstr.encode('hex') is called, and hstr is a string?!\r
+        #\r
+        try:\r
+            rsconvert = RawServerConverter(self.utility.controller.get_rawserver())\r
+            mainlineDHT.init('', self.utility.listen_port, self.utility.getConfigPath(),rawserver=rsconvert)\r
+            # Create torrent-liveliness checker based on DHT\r
+            c = mainlineDHTChecker.getInstance()\r
+            c.register(mainlineDHT.dht)\r
+        except:\r
+            print_exc()\r
+\r
+        # Give GUI time to set up stuff\r
+        wx.Yield()\r
+\r
+        #if server start with params run it\r
+        #####################################\r
+        \r
+        if DEBUG:\r
+            print >>sys.stderr,"abc: wxFrame: params is",self.params\r
+        \r
+        if self.params[0] != "":\r
+            success, msg, ABCTorrentTemp = self.utility.queue.addtorrents.AddTorrentFromFile(self.params[0],caller=CALLER_ARGV)\r
+\r
+        self.utility.queue.postInitTasks(self.params)\r
+\r
+        if self.params[0] != "":\r
+            # Update torrent.list, but after having read the old list of torrents, otherwise we get interference\r
+            ABCTorrentTemp.torrentconfig.writeSrc(False)\r
+            self.utility.torrentconfig.Flush()\r
+\r
+        self.videoFrame = None\r
+        feasible = return_feasible_playback_modes(self.utility.getPath())\r
+        if PLAYBACKMODE_INTERNAL in feasible:\r
+            # This means vlc is available\r
+            from Tribler.Video.EmbeddedPlayer import VideoFrame\r
+            self.videoFrame = VideoFrame(self)\r
+\r
+            #self.videores = xrc.XmlResource("Tribler/vwxGUI/MyPlayer.xrc")\r
+            #self.videoframe = self.videores.LoadFrame(None, "MyPlayer")\r
+            #self.videoframe.Show()\r
+            \r
+            videoplayer = VideoPlayer.getInstance()\r
+            videoplayer.set_parentwindow(self.videoFrame)\r
+        else:\r
+            videoplayer = VideoPlayer.getInstance()\r
+            videoplayer.set_parentwindow(self)\r
+\r
+        sys.stdout.write('GUI Complete.\n')\r
+\r
+        self.Show(True)\r
+        \r
+        \r
+        # Just for debugging: add test permids and display top 5 peers from which the most is downloaded in bartercastdb\r
+        bartercastdb = BarterCastDBHandler()\r
+        mypermid = bartercastdb.my_permid\r
+        \r
+        if DEBUG:\r
+            bartercastdb.incrementItem((mypermid, "testpermid_1"), 'uploaded', 1024)\r
+            bartercastdb.incrementItem((mypermid, "testpermid_1"), 'downloaded', 20000)\r
+                    \r
+            bartercastdb.incrementItem((mypermid, "testpermid_2"), 'uploaded', 40000)\r
+            bartercastdb.incrementItem((mypermid, "testpermid_2"), 'downloaded', 60000)\r
+            \r
+            top = bartercastdb.getTopNPeers(5)['top']\r
+    \r
+            print 'My Permid: ', permid_for_user(mypermid)\r
+            \r
+            print 'Top 5 BarterCast peers:'\r
+            print '======================='\r
+    \r
+            i = 1\r
+            for (permid, up, down) in top:\r
+                print '%2d: %15s  -  %10d up  %10d down' % (i, bartercastdb.getName(permid), up, down)\r
+                i += 1\r
+        \r
+        \r
+        # Check to see if ABC is associated with torrents\r
+        #######################################################\r
+        if (sys.platform == 'win32'):\r
+            if self.utility.config.Read('associate', "boolean"):\r
+                if self.utility.regchecker and not self.utility.regchecker.testRegistry():\r
+                    dialog = RegCheckDialog(self)\r
+                    dialog.ShowModal()\r
+                    dialog.Destroy()\r
+\r
+        self.checkVersion()\r
+\r
+        \r
+    def checkVersion(self):\r
+        t = Timer(2.0, self._checkVersion)\r
+        t.start()\r
+        \r
+    def _checkVersion(self):\r
+        my_version = self.utility.getVersion()\r
+        try:\r
+            curr_status = urllib.urlopen('http://tribler.org/version').readlines()\r
+            line1 = curr_status[0]\r
+            if len(curr_status) > 1:\r
+                self.update_url = curr_status[1].strip()\r
+            else:\r
+                self.update_url = 'http://tribler.org'\r
+            _curr_status = line1.split()\r
+            self.curr_version = _curr_status[0]\r
+            if self.newversion(self.curr_version, my_version):\r
+                # Arno: we are a separate thread, delegate GUI updates to MainThread\r
+                self.upgradeCallback()\r
+            \r
+            # Also check new version of web2definitions for youtube etc. search\r
+            Web2Updater(self.utility).checkUpdate()\r
+        except Exception,e:\r
+            print >> sys.stderr, "Tribler: Version check failed", ctime(time()), str(e)\r
+            #print_exc()\r
+            \r
+    def newversion(self, curr_version, my_version):\r
+        curr = curr_version.split('.')\r
+        my = my_version.split('.')\r
+        if len(my) >= len(curr):\r
+            nversion = len(my)\r
+        else:\r
+            nversion = len(curr)\r
+        for i in range(nversion):\r
+            if i < len(my):\r
+                my_v = int(my[i])\r
+            else:\r
+                my_v = 0\r
+            if i < len(curr):\r
+                curr_v = int(curr[i])\r
+            else:\r
+                curr_v = 0\r
+            if curr_v > my_v:\r
+                return True\r
+            elif curr_v < my_v:\r
+                return False\r
+        return False\r
+\r
+    def upgradeCallback(self):\r
+        self.invokeLater(self.OnUpgrade)    \r
+        # TODO: warn multiple times?\r
+    \r
+    def OnUpgrade(self, event=None):\r
+        self.setActivity(ACT_NEW_VERSION)\r
+            \r
+    def onFocus(self, event = None):\r
+        if event is not None:\r
+            event.Skip()\r
+        #self.window.getSelectedList(event).SetFocus()\r
+        \r
+    def setGUIupdate(self, update):\r
+        oldval = self.GUIupdate\r
+        self.GUIupdate = update\r
+        \r
+        if self.GUIupdate and not oldval:\r
+            # Force an update of all torrents\r
+            for torrent in self.utility.torrents["all"]:\r
+                torrent.updateColumns()\r
+                torrent.updateColor()\r
+\r
+\r
+    def taskbarCallback(self):\r
+        self.invokeLater(self.onTaskBarActivate,[])\r
+\r
+\r
+    #######################################\r
+    # minimize to tray bar control\r
+    #######################################\r
+    def onTaskBarActivate(self, event = None):\r
+        self.Iconize(False)\r
+        self.Show(True)\r
+        self.Raise()\r
+        \r
+        if self.tbicon is not None:\r
+            self.tbicon.updateIcon(False)\r
+\r
+        #self.window.list.SetFocus()\r
+\r
+        # Resume updating GUI\r
+        self.setGUIupdate(True)\r
+\r
+    def onIconify(self, event = None):\r
+        # This event handler is called both when being minimalized\r
+        # and when being restored.\r
+        if DEBUG:\r
+            if event is not None:\r
+                print  >> sys.stderr,"abc: onIconify(",event.Iconized()\r
+            else:\r
+                print  >> sys.stderr,"abc: onIconify event None"\r
+        if event.Iconized():                                                                                                               \r
+            if (self.utility.config.Read('mintray', "int") > 0\r
+                and self.tbicon is not None):\r
+                self.tbicon.updateIcon(True)\r
+                self.Show(False)\r
+\r
+            # Don't update GUI while minimized\r
+            self.setGUIupdate(False)\r
+        else:\r
+            self.setGUIupdate(True)\r
+        if event is not None:\r
+            event.Skip()\r
+\r
+    def onSize(self, event = None):\r
+        # Arno: On Windows when I enable the tray icon and then change\r
+        # virtual desktop (see MS DeskmanPowerToySetup.exe)\r
+        # I get a onIconify(event.Iconized()==True) event, but when\r
+        # I switch back, I don't get an event. As a result the GUIupdate\r
+        # remains turned off. The wxWidgets wiki on the TaskBarIcon suggests\r
+        # catching the onSize event. \r
+        \r
+        if DEBUG:\r
+            if event is not None:\r
+                print  >> sys.stderr,"abc: onSize:",self.GetSize()\r
+            else:\r
+                print  >> sys.stderr,"abc: onSize: None"\r
+        self.setGUIupdate(True)\r
+        if event is not None:\r
+            if event.GetEventType() == wx.EVT_MAXIMIZE:\r
+                self.window.SetClientSize(self.GetClientSize())\r
+            event.Skip()\r
+        \r
+\r
+        # Refresh subscreens\r
+        self.refreshNeeded = True\r
+        self.guiUtility.refreshOnResize()\r
+        \r
+    def onIdle(self, event = None):\r
+        """\r
+        Only refresh screens (especially detailsPanel) when resizes are finished\r
+        This gives less flickering, but doesnt look pretty, so i commented it out\r
+        """\r
+        if self.refreshNeeded:\r
+            self.guiUtility.refreshOnResize()\r
+            self.refreshNeeded = False\r
+        \r
+    def getWindowSettings(self):\r
+        width = self.utility.config.Read("window_width")\r
+        height = self.utility.config.Read("window_height")\r
+        try:\r
+            size = wx.Size(int(width), int(height))\r
+        except:\r
+            size = wx.Size(710, 400)\r
+        \r
+        x = self.utility.config.Read("window_x")\r
+        y = self.utility.config.Read("window_y")\r
+        if (x == "" or y == ""):\r
+            position = wx.DefaultPosition\r
+        else:\r
+            position = wx.Point(int(x), int(y))\r
+            \r
+        return size, position     \r
+        \r
+    def saveWindowSettings(self):\r
+        width, height = self.GetSizeTuple()\r
+        x, y = self.GetPositionTuple()\r
+        self.utility.config.Write("window_width", width)\r
+        self.utility.config.Write("window_height", height)\r
+        self.utility.config.Write("window_x", x)\r
+        self.utility.config.Write("window_y", y)\r
+\r
+        self.utility.config.Flush()\r
+       \r
+    ##################################\r
+    # Close Program\r
+    ##################################\r
+               \r
+    def OnCloseWindow(self, event = None):\r
+        if event != None:\r
+            nr = event.GetEventType()\r
+            lookup = { wx.EVT_CLOSE.evtType[0]: "EVT_CLOSE", wx.EVT_QUERY_END_SESSION.evtType[0]: "EVT_QUERY_END_SESSION", wx.EVT_END_SESSION.evtType[0]: "EVT_END_SESSION" }\r
+            if nr in lookup: nr = lookup[nr]\r
+            print "Closing due to event ",nr\r
+            print >>sys.stderr,"Closing due to event ",nr\r
+        else:\r
+            print "Closing untriggered by event"\r
+        \r
+        # Don't do anything if the event gets called twice for some reason\r
+        if self.utility.abcquitting:\r
+            return\r
+\r
+        # Check to see if we can veto the shutdown\r
+        # (might not be able to in case of shutting down windows)\r
+        if event is not None:\r
+            try:\r
+                if event.CanVeto() and self.utility.config.Read('confirmonclose', "boolean") and not event.GetEventType() == wx.EVT_QUERY_END_SESSION.evtType[0]:\r
+                    dialog = wx.MessageDialog(None, self.utility.lang.get('confirmmsg'), self.utility.lang.get('confirm'), wx.OK|wx.CANCEL)\r
+                    result = dialog.ShowModal()\r
+                    dialog.Destroy()\r
+                    if result != wx.ID_OK:\r
+                        event.Veto()\r
+                        return\r
+            except:\r
+                data = StringIO()\r
+                print_exc(file = data)\r
+                sys.stderr.write(data.getvalue())\r
+                pass\r
+            \r
+        self.utility.abcquitting = True\r
+        self.GUIupdate = False\r
+        \r
+        self.guiUtility.guiOpen.clear()\r
+        \r
+        # Close the Torrent Maker\r
+        self.utility.actions[ACTION_MAKETORRENT].closeWin()\r
+\r
+        try:\r
+            self.utility.webserver.stop()\r
+        except:\r
+            data = StringIO()\r
+            print_exc(file = data)\r
+            sys.stderr.write(data.getvalue())\r
+            pass\r
+\r
+        try:\r
+            # tell scheduler to close all active thread\r
+            self.utility.queue.clearScheduler()\r
+        except:\r
+            data = StringIO()\r
+            print_exc(file = data)\r
+            sys.stderr.write(data.getvalue())\r
+            pass\r
+\r
+        try:\r
+            # Restore the window before saving size and position\r
+            # (Otherwise we'll get the size of the taskbar button and a negative position)\r
+            self.onTaskBarActivate()\r
+            self.saveWindowSettings()\r
+        except:\r
+            #print_exc(file=sys.stderr)\r
+            print_exc()\r
+\r
+        try:\r
+            if self.buddyFrame is not None:\r
+                self.buddyFrame.Destroy()\r
+            if self.fileFrame is not None:\r
+                self.fileFrame.Destroy()\r
+            if self.videoFrame is not None:\r
+                self.videoFrame.Destroy()\r
+        except:\r
+            pass\r
+\r
+        self.oldframe.Destroy()\r
+\r
+        try:\r
+            if self.tbicon is not None:\r
+                self.tbicon.RemoveIcon()\r
+                self.tbicon.Destroy()\r
+            self.Destroy()\r
+        except:\r
+            data = StringIO()\r
+            print_exc(file = data)\r
+            sys.stderr.write(data.getvalue())\r
+            pass\r
+\r
+        # Arno: at the moment, Tribler gets a segmentation fault when the\r
+        # tray icon is always enabled. This SEGV occurs in the wx mainloop\r
+        # which is entered as soon as we leave this method. Hence I placed\r
+        # tribler_done() here, so the database are closed properly\r
+        # before the crash.\r
+        #\r
+        # Arno, 2007-02-28: Preferably this should be moved to the main \r
+        # run() method below, that waits a while to allow threads to finish.\r
+        # Ideally, the database should still be open while they finish up.\r
+        # Because of the crash problem with the icontray this is the safer\r
+        # place.\r
+        #\r
+        # Arno, 2007-08-10: When a torrentfile is passed on the command line,\r
+        # the client will crash just after this point due to unknown reasons\r
+        # (it even does it when we don't look at the cmd line args at all!)\r
+        # Hence, for safety, I close the DB here already. \r
+        #if sys.platform == 'linux2':\r
+        #\r
+        \r
+        #tribler_done(self.utility.getConfigPath())            \r
+        \r
+        if DEBUG:    \r
+            print >>sys.stderr,"abc: OnCloseWindow END"\r
+\r
+        if DEBUG:\r
+            ts = enumerate()\r
+            for t in ts:\r
+                print >>sys.stderr,"abc: Thread still running",t.getName(),"daemon",t.isDaemon()\r
+\r
+\r
+\r
+    def onWarning(self,exc):\r
+        msg = self.utility.lang.get('tribler_startup_nonfatalerror')\r
+        msg += str(exc.__class__)+':'+str(exc)\r
+        dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING)\r
+        result = dlg.ShowModal()\r
+        dlg.Destroy()\r
+\r
+    def onUPnPError(self,upnp_type,listenport,error_type,exc=None,listenproto='TCP'):\r
+\r
+        if error_type == 0:\r
+            errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error1')\r
+        elif error_type == 1:\r
+            errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error2')+unicode(str(exc))+self.utility.lang.get('tribler_upnp_error2_postfix')\r
+        elif error_type == 2:\r
+            errormsg = unicode(' UPnP mode '+str(upnp_type)+' ')+self.utility.lang.get('tribler_upnp_error3')\r
+        else:\r
+            errormsg = unicode(' UPnP mode '+str(upnp_type)+' Unknown error')\r
+\r
+        msg = self.utility.lang.get('tribler_upnp_error_intro')\r
+        msg += listenproto+' '\r
+        msg += str(listenport)\r
+        msg += self.utility.lang.get('tribler_upnp_error_intro_postfix')\r
+        msg += errormsg\r
+        msg += self.utility.lang.get('tribler_upnp_error_extro') \r
+\r
+        dlg = wx.MessageDialog(None, msg, self.utility.lang.get('tribler_warning'), wx.OK|wx.ICON_WARNING)\r
+        result = dlg.ShowModal()\r
+        dlg.Destroy()\r
+\r
+    def onReachable(self,event=None):\r
+        """ Called by GUI thread """\r
+        if self.firewallStatus is not None:\r
+            self.firewallStatus.setToggled(True)\r
+            tt = self.firewallStatus.GetToolTip()\r
+            if tt is not None:\r
+                tt.SetTip(self.utility.lang.get('reachable_tooltip'))\r
+\r
+\r
+    def setActivity(self,type,msg=u''):\r
+    \r
+        if currentThread().getName() != "MainThread":\r
+            print  >> sys.stderr,"abc: setActivity thread",currentThread().getName(),"is NOT MAIN THREAD"\r
+            print_stack()\r
+    \r
+        if type == ACT_NONE:\r
+            prefix = u''\r
+            msg = u''\r
+        elif type == ACT_UPNP:\r
+            prefix = self.utility.lang.get('act_upnp')\r
+        elif type == ACT_REACHABLE:\r
+            prefix = self.utility.lang.get('act_reachable')\r
+        elif type == ACT_GET_EXT_IP_FROM_PEERS:\r
+            prefix = self.utility.lang.get('act_get_ext_ip_from_peers')\r
+        elif type == ACT_MEET:\r
+            prefix = self.utility.lang.get('act_meet')\r
+        elif type == ACT_GOT_METADATA:\r
+            prefix = self.utility.lang.get('act_got_metadata')\r
+        elif type == ACT_RECOMMEND:\r
+            prefix = self.utility.lang.get('act_recommend')\r
+        elif type == ACT_DISK_FULL:\r
+            prefix = self.utility.lang.get('act_disk_full')   \r
+        elif type == ACT_NEW_VERSION:\r
+            prefix = self.utility.lang.get('act_new_version')   \r
+        if msg == u'':\r
+            text = prefix\r
+        else:\r
+            text = unicode( prefix+u' '+msg)\r
+            \r
+        if DEBUG:\r
+            print  >> sys.stderr,"abc: Setting activity",`text`,"EOT"\r
+        self.messageField.SetLabel(text)\r
+\r
+\r
+class TorThread(Thread):\r
+    \r
+    def __init__(self):\r
+        Thread.__init__(self)\r
+        self.setDaemon(True)\r
+        self.setName("TorThread"+self.getName())\r
+        self.child_out = None\r
+        self.child_in = None\r
+        \r
+    def run(self):\r
+        try:\r
+            if DEBUG:\r
+                print >>sys.stderr,"TorThread starting",currentThread().getName()\r
+            if sys.platform == "win32":\r
+                # Not "Nul:" but "nul" is /dev/null on Win32\r
+                cmd = 'tor.exe'\r
+                sink = 'nul'\r
+            elif sys.platform == "darwin":\r
+                cmd = 'tor.mac'\r
+                sink = '/dev/null'\r
+            else:\r
+                cmd = 'tor'\r
+                sink = '/dev/null'\r
+\r
+            (self.child_out,self.child_in) = os.popen2( "%s --log err-err > %s 2>&1" % (cmd,sink), 'b' )\r
+            while True:\r
+                if DEBUG:\r
+                    print >>sys.stderr,"TorThread reading",currentThread().getName()\r
+\r
+                msg = self.child_in.read()\r
+                if DEBUG:\r
+                    print >>sys.stderr,"TorThread: tor said",msg\r
+                if len(msg) == 0:\r
+                    break\r
+                sleep(1)\r
+        except:\r
+            print_exc()\r
+\r
+    def shutdown(self):\r
+        if self.child_out is not None:\r
+            self.child_out.close()\r
+        if self.child_in is not None:\r
+            self.child_in.close()\r
+            \r
+\r
+##############################################################\r
+#\r
+# Class : ABCApp\r
+#\r
+# Main ABC application class that contains ABCFrame Object\r
+#\r
+##############################################################\r
+class ABCApp(wx.App,FlaglessDelayedInvocation):\r
+    def __init__(self, x, params, single_instance_checker, abcpath):\r
+        global start_time, start_time2\r
+        start_time2 = time()\r
+        #print "[StartUpDebug]----------- from ABCApp.__init__ ----------Tribler starts up at", ctime(start_time2), "after", start_time2 - start_time\r
+        self.params = params\r
+        self.single_instance_checker = single_instance_checker\r
+        self.abcpath = abcpath\r
+        self.error = None\r
+        self.torthread = None\r
+        wx.App.__init__(self, x)\r
+        \r
+    def OnInit(self):\r
+        try:\r
+            self.utility = Utility(self.abcpath)\r
+            # Set locale to determine localisation\r
+            locale.setlocale(locale.LC_ALL, '')\r
+\r
+            sys.stdout.write('Client Starting Up.\n')\r
+            sys.stdout.write('Build: ' + self.utility.lang.get('build') + '\n')\r
+            \r
+            bm = wx.Bitmap(os.path.join(self.utility.getPath(),'icons','splash.jpg'),wx.BITMAP_TYPE_JPEG)\r
+            #s = wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN\r
+            #s = wx.SIMPLE_BORDER|wx.FRAME_NO_TASKBAR|wx.FRAME_FLOAT_ON_PARENT\r
+            self.splash = wx.SplashScreen(bm, wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT, 1000, None, -1)\r
+            \r
+            wx.CallAfter(self.PostInit)\r
+            return True\r
+            \r
+        except Exception,e:\r
+            print_exc()\r
+            self.error = e\r
+            self.onError()\r
+            return False\r
+\r
+\r
+    def PostInit(self):\r
+        try:\r
+            tribler_init(self.utility.getConfigPath(),self.utility.getPath(),self.db_exception_handler)\r
+            self.utility.setTriblerVariables()\r
+            self.utility.postAppInit()\r
+            \r
+            # Singleton for executing tasks that are too long for GUI thread and\r
+            # network thread\r
+            self.guiserver = GUIServer.getInstance()\r
+            self.guiserver.register()\r
+    \r
+            # Singleton for management of user's mugshots (i.e. icons/display pictures)\r
+            self.mm = MugshotManager.getInstance()\r
+            self.mm.register(self.utility.getConfigPath(),self.utility.getPath())\r
+\r
+            # H4x0r a bit\r
+            set_tasteheart_bitmaps(self.utility.getPath())\r
+            set_perfBar_bitmaps(self.utility.getPath())\r
+    \r
+            # Put it here so an error is shown in the startup-error popup\r
+            self.serverlistener = ServerListener(self.utility)\r
+            \r
+            # Check webservice for autostart webservice\r
+            #######################################################\r
+            WebListener(self.utility)\r
+            if self.utility.webconfig.Read("webautostart", "boolean"):\r
+                self.utility.webserver.start()\r
+                \r
+            # Start single instance server listenner\r
+            ############################################\r
+            self.serverthread   = Thread(target = self.serverlistener.start)\r
+            self.serverthread.setDaemon(True)\r
+            self.serverthread.setName("SingleInstanceServer"+self.serverthread.getName())\r
+            self.serverthread.start()\r
+    \r
+            self.videoplayer = VideoPlayer.getInstance()\r
+            self.videoplayer.register(self.utility)\r
+            self.videoserver = VideoHTTPServer.getInstance()\r
+            self.videoserver.background_serve()\r
+\r
+            notification_init( self.utility )\r
+\r
+            # Change config when experiment ends, before ABCLaunchMany is created\r
+            global EVIL\r
+            if EVIL and time() > 1190099288.0:\r
+                EVIL = False\r
+                end = self.utility.config.Read('lure_ended', "boolean")\r
+                if not end:\r
+                    self.utility.config.Write('lure_ended', 1, "boolean")\r
+                    self.utility.config.Write('tor_enabled', 0, "boolean")\r
+                    self.utility.config.Write('ut_pex_max_addrs_from_peer', 16)\r
+                \r
+                    msg = "The Tribler download accelerator using the TOR network has been turned off. For more information visit http://TV.seas.Harvard.edu/"\r
+                    dlg = wx.MessageDialog(None, msg, "Tribler Warning", wx.OK|wx.ICON_INFORMATION)\r
+                    result = dlg.ShowModal()\r
+                    dlg.Destroy()\r
+                \r
+            enabled = self.utility.config.Read('tor_enabled', "boolean")\r
+            if enabled:\r
+                self.torthread = TorThread()\r
+                self.torthread.start()\r
+\r
+            #\r
+            # Read and create GUI from .xrc files\r
+            #\r
+            #self.frame = ABCFrame(-1, self.params, self.utility)\r
+            self.guiUtility = GUIUtility.getInstance(self.utility, self.params)\r
+            updateXRC.main([os.path.join(self.utility.getPath(),'Tribler','vwxGUI')])\r
+            self.res = xrc.XmlResource(os.path.join(self.utility.getPath(),'Tribler','vwxGUI','MyFrame.xrc'))\r
+            self.guiUtility.xrcResource = self.res\r
+            self.frame = self.res.LoadFrame(None, "MyFrame")\r
+            self.guiUtility.frame = self.frame\r
+            self.guiUtility.scrollWindow = xrc.XRCCTRL(self.frame, "level0")\r
+            self.guiUtility.mainSizer = self.guiUtility.scrollWindow.GetSizer()\r
+            self.frame.topBackgroundRight = xrc.XRCCTRL(self.frame, "topBG3")\r
+            self.guiUtility.scrollWindow.SetScrollbars(1,1,1024,768)\r
+            self.guiUtility.scrollWindow.SetScrollRate(15,15)\r
+            self.frame.mainButtonPersons = xrc.XRCCTRL(self.frame, "mainButtonPersons")\r
+\r
+\r
+            self.frame.numberPersons = xrc.XRCCTRL(self.frame, "numberPersons")\r
+            numperslabel = xrc.XRCCTRL(self.frame, "persons")\r
+            self.frame.numberFiles = xrc.XRCCTRL(self.frame, "numberFiles")\r
+            numfileslabel = xrc.XRCCTRL(self.frame, "files")\r
+            self.frame.messageField = xrc.XRCCTRL(self.frame, "messageField")\r
+            self.frame.firewallStatus = xrc.XRCCTRL(self.frame, "firewallStatus")\r
+            tt = self.frame.firewallStatus.GetToolTip()\r
+            if tt is not None:\r
+                tt.SetTip(self.utility.lang.get('unknownreac_tooltip'))\r
+            \r
+            if sys.platform == "linux2":\r
+                self.frame.numberPersons.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE))\r
+                self.frame.numberFiles.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE))\r
+                self.frame.messageField.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE))\r
+                numperslabel.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE))\r
+                numfileslabel.SetFont(wx.Font(9,FONTFAMILY,FONTWEIGHT,wx.NORMAL,False,FONTFACE))\r
+            """\r
+            searchfilebut = xrc.XRCCTRL(self.frame, "bt257cC")\r
+            searchfilebut.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked)\r
+            searchpersbut = xrc.XRCCTRL(self.frame, "bt258cC")\r
+            searchpersbut.Bind(wx.EVT_LEFT_UP, self.guiUtility.buttonClicked)     \r
+            \r
+            self.frame.searchtxtctrl = xrc.XRCCTRL(self.frame, "tx220cCCC")\r
+            """\r
+            \r
+            #self.frame.Refresh()\r
+            #self.frame.Layout()\r
+            self.frame.Show(True)\r
+#===============================================================================\r
+#            global start_time2\r
+#            current_time = time()\r
+#            print "\n\n[StartUpDebug]-----------------------------------------"\r
+#            print "[StartUpDebug]"\r
+#            print "[StartUpDebug]----------- from ABCApp.OnInit ----------Tribler frame is shown after", current_time-start_time2\r
+#            print "[StartUpDebug]"\r
+#            print "[StartUpDebug]-----------------------------------------\n\n"\r
+#===============================================================================\r
+            \r
+            # GUI start\r
+            # - load myFrame \r
+            # - load standardGrid\r
+            # - gui utility > button mainButtonFiles = clicked\r
+        \r
+\r
+            self.Bind(wx.EVT_QUERY_END_SESSION, self.frame.OnCloseWindow)\r
+            self.Bind(wx.EVT_END_SESSION, self.frame.OnCloseWindow)\r
+            \r
+            \r
+            #asked = self.utility.config.Read('askeduploadbw', 'boolean')\r
+            asked = True\r
+            if not asked:\r
+                dlg = BandwidthSelector(self.frame,self.utility)\r
+                result = dlg.ShowModal()\r
+                if result == wx.ID_OK:\r
+                    ulbw = dlg.getUploadBandwidth()\r
+                    self.utility.config.Write('maxuploadrate',ulbw)\r
+                    self.utility.config.Write('maxseeduploadrate',ulbw)\r
+                    self.utility.config.Write('askeduploadbw','1')\r
+                dlg.Destroy()\r
+\r
+            # Arno, 2007-05-03: wxWidgets 2.8.3.0 and earlier have the MIME-type for .bmp \r
+            # files set to 'image/x-bmp' whereas 'image/bmp' is the official one.\r
+            try:\r
+                bmphand = None\r
+                hands = wx.Image.GetHandlers()\r
+                for hand in hands:\r
+                    #print "Handler",hand.GetExtension(),hand.GetType(),hand.GetMimeType()\r
+                    if hand.GetMimeType() == 'image/x-bmp':\r
+                        bmphand = hand\r
+                        break\r
+                #wx.Image.AddHandler()\r
+                if bmphand is not None:\r
+                    bmphand.SetMimeType('image/bmp')\r
+            except:\r
+                # wx < 2.7 don't like wx.Image.GetHandlers()\r
+                print_exc()\r
+            \r
+            # Must be after ABCLaunchMany is created\r
+            self.torrentfeed = TorrentFeedThread.getInstance()\r
+            self.torrentfeed.register(self.utility)\r
+            self.torrentfeed.start()\r
+            \r
+            #print "DIM",wx.GetDisplaySize()\r
+            #print "MM",wx.GetDisplaySizeMM()\r
+\r
+            wx.CallAfter(self.startWithRightView)            \r
+            \r
+        except Exception,e:\r
+            print_exc()\r
+            self.error = e\r
+            self.onError()\r
+            return False\r
+\r
+        return True\r
+\r
+    def onError(self,source=None):\r
+        # Don't use language independence stuff, self.utility may not be\r
+        # valid.\r
+        msg = "Unfortunately, Tribler ran into an internal error:\n\n"\r
+        if source is not None:\r
+            msg += source\r
+        msg += str(self.error.__class__)+':'+str(self.error)\r
+        msg += '\n'\r
+        msg += 'Please see the FAQ on www.tribler.org on how to act.'\r
+        dlg = wx.MessageDialog(None, msg, "Tribler Fatal Error", wx.OK|wx.ICON_ERROR)\r
+        result = dlg.ShowModal()\r
+        print_exc()\r
+        dlg.Destroy()\r
+\r
+    def MacOpenFile(self,filename):\r
+        self.utility.queue.addtorrents.AddTorrentFromFile(filename)\r
+\r
+    def OnExit(self):\r
+        \r
+        self.torrentfeed.shutdown()\r
+        if self.torthread is not None:\r
+            self.torthread.shutdown()\r
+        mainlineDHT.deinit()\r
+        \r
+        if not ALLOW_MULTIPLE:\r
+            del self.single_instance_checker\r
+        ClientPassParam("Close Connection")\r
+        return 0\r
+    \r
+    def db_exception_handler(self,e):\r
+        if DEBUG:\r
+            print >> sys.stderr,"abc: Database Exception handler called",e,"value",e.args,"#"\r
+        try:\r
+            if e.args[1] == "DB object has been closed":\r
+                return # We caused this non-fatal error, don't show.\r
+            if self.error is not None and self.error.args[1] == e.args[1]:\r
+                return # don't repeat same error\r
+        except:\r
+            print >> sys.stderr, "abc: db_exception_handler error", e, type(e)\r
+            print_exc()\r
+            #print_stack()\r
+        self.error = e\r
+        self.invokeLater(self.onError,[],{'source':"The database layer reported: "})\r
+    \r
+    def getConfigPath(self):\r
+        return self.utility.getConfigPath()\r
+\r
+    def startWithRightView(self):\r
+        if self.params[0] != "":\r
+            self.guiUtility.standardLibraryOverview()\r
+    \r
+        \r
+class DummySingleInstanceChecker:\r
+    \r
+    def __init__(self,basename):\r
+        pass\r
+\r
+    def IsAnotherRunning(self):\r
+        "Uses pgrep to find other tribler.py processes"\r
+        # If no pgrep available, it will always start tribler\r
+        progressInfo = commands.getoutput('pgrep -fl tribler.py | grep -v pgrep')\r
+        numProcesses = len(progressInfo.split('\n'))\r
+        if DEBUG:\r
+            print 'ProgressInfo: %s, num: %d' % (progressInfo, numProcesses)\r
+        return numProcesses > 1\r
+                \r
+        \r
+##############################################################\r
+#\r
+# Main Program Start Here\r
+#\r
+##############################################################\r
+def run(params = None):\r
+    global start_time\r
+    start_time = time()\r
+    if params is None:\r
+        params = [""]\r
+    \r
+    if len(sys.argv) > 1:\r
+        params = sys.argv[1:]\r
+    \r
+    # Create single instance semaphore\r
+    # Arno: On Linux and wxPython-2.8.1.1 the SingleInstanceChecker appears\r
+    # to mess up stderr, i.e., I get IOErrors when writing to it via print_exc()\r
+    #\r
+    # TEMPORARILY DISABLED on Linux\r
+    if sys.platform != 'linux2':\r
+        single_instance_checker = wx.SingleInstanceChecker("tribler-" + wx.GetUserId())\r
+    else:\r
+        single_instance_checker = DummySingleInstanceChecker("tribler-")\r
+\r
+    #print "[StartUpDebug]---------------- 1", time()-start_time\r
+    if not ALLOW_MULTIPLE and single_instance_checker.IsAnotherRunning():\r
+        #Send  torrent info to abc single instance\r
+        ClientPassParam(params[0])\r
+        #print "[StartUpDebug]---------------- 2", time()-start_time\r
+    else:\r
+        abcpath = os.path.abspath(os.path.dirname(sys.argv[0]))\r
+        # Arno: don't chdir to allow testing as other user from other dir.\r
+        #os.chdir(abcpath)\r
+\r
+        # Launch first abc single instance\r
+        app = ABCApp(0, params, single_instance_checker, abcpath)\r
+        configpath = app.getConfigPath()\r
+#        print "[StartUpDebug]---------------- 3", time()-start_time\r
+        app.MainLoop()\r
+\r
+        print "Client shutting down. Sleeping for a few seconds to allow other threads to finish"\r
+        sleep(4)\r
+\r
+        # This is the right place to close the database, unfortunately Linux has\r
+        # a problem, see ABCFrame.OnCloseWindow\r
+        #\r
+        #if sys.platform != 'linux2':\r
+        #    tribler_done(configpath)\r
+        os._exit(0)\r
+\r
+if __name__ == '__main__':\r
+    run()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/API/contentdir/file.txt b/instrumentation/next-share/BaseLib/Test/API/contentdir/file.txt
new file mode 100644 (file)
index 0000000..646d5c3
--- /dev/null
@@ -0,0 +1,5 @@
+
+
+Slightly Bigger
+Test file
+
diff --git a/instrumentation/next-share/BaseLib/Test/API/ec.pem b/instrumentation/next-share/BaseLib/Test/API/ec.pem
new file mode 100644 (file)
index 0000000..08094ad
--- /dev/null
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MG0CAQEEHVrPzIQwJStZ6MU/RO6dqen9HQo1IekEfp7YGGtdoAcGBSuBBAAaoUAD
+PgAEACFQOtu9k8A6l5+jHWpeao9AOc/mGxUXOMom4yoHANFgI1vQoKwUZCdLBo24
+QpToY7CS3EblruEri5gk
+-----END EC PRIVATE KEY-----
diff --git a/instrumentation/next-share/BaseLib/Test/API/ecpub.pem b/instrumentation/next-share/BaseLib/Test/API/ecpub.pem
new file mode 100644 (file)
index 0000000..4a0d05f
--- /dev/null
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACFQOtu9k8A6l5+jHWpeao9AOc/mGxUX
+OMom4yoHANFgI1vQoKwUZCdLBo24QpToY7CS3EblruEri5gk
+-----END PUBLIC KEY-----
diff --git a/instrumentation/next-share/BaseLib/Test/API/file.wmv b/instrumentation/next-share/BaseLib/Test/API/file.wmv
new file mode 100644 (file)
index 0000000..ff5f772
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/API/file.wmv differ
diff --git a/instrumentation/next-share/BaseLib/Test/API/file2.wmv b/instrumentation/next-share/BaseLib/Test/API/file2.wmv
new file mode 100644 (file)
index 0000000..4097171
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/API/file2.wmv differ
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_api.bat b/instrumentation/next-share/BaseLib/Test/API/test_api.bat
new file mode 100644 (file)
index 0000000..e793c91
--- /dev/null
@@ -0,0 +1,7 @@
+set PYTHONPATH=..\..\..;.\r
+\r
+python test_tdef.py\r
+python test_seeding.py test_normal_torrent\r
+python test_seeding.py test_merkle_torrent\r
+python test_tracking.py\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_api.sh b/instrumentation/next-share/BaseLib/Test/API/test_api.sh
new file mode 100755 (executable)
index 0000000..da571cf
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh -x
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../../..:"$PYTHONPATH"
+export PYTHONPATH
+
+#python test_tdef.py
+python test_seeding.py
+
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_remote_torrent.py b/instrumentation/next-share/BaseLib/Test/API/test_remote_torrent.py
new file mode 100644 (file)
index 0000000..b2d1387
--- /dev/null
@@ -0,0 +1,396 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# Like test_secure_overlay, we start a new python interpreter for each test.
+# Although we don't have the singleton problem here, we do need to do this as the
+# HTTPServer that MyTracker uses won't relinquish the listen socket, causing 
+# "address in use" errors in the next test. This is probably due to the fact that
+# MyTracker has a thread mixed in, as a listensocket.close() normally releases it
+# (according to lsof).
+#
+
+import unittest
+import os
+import sys
+import time
+from traceback import print_exc
+import socket
+
+from BaseLib.Test.test_as_server import TestAsServer
+from btconn import BTConnection
+from olconn import OLConnection
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.BT1.convert import toint
+
+DEBUG=True
+
+class TestDownloadHelp(TestAsServer):
+    """ 
+    Testing download helping
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+
+
+        
+    def setUpMyListenSockets(self):
+        # Start our server side, to with Tribler will try to connect
+        self.mylistenport = 4810
+        self.myss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss.bind(('', self.mylistenport))
+        self.myss.listen(1)
+
+        self.mylistenport2 = 3726
+        self.myss2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss2.bind(('', self.mylistenport2))
+        self.myss2.listen(1)
+
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())  
+        
+        # This is the infohash of the torrent in test/extend_hs_dir
+        self.infohash = '\xccg\x07\xe2\x9e!]\x16\xae{\xb8\x10?\xf9\xa5\xf9\x07\xfdBk'
+        self.torrentfile = os.path.join('extend_hs_dir','dummydata.merkle.torrent')
+
+        # TEMPARNO: retrieve this from Session
+        self.torrent_db = TorrentDBHandler.getInstance()
+        
+        # Add two torrents that will match our query and one that shouldn't
+        torrent = self.get_default_torrent('Hallo S01E10')
+        ih = 'b' * 20
+        self.torrent_db.addTorrent(ih,torrent)
+              
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+        self.mytracker.shutdown()
+        self.tearDownMyListenSockets()
+
+
+    def tearDownMyListenSockets(self):
+        self.myss.close()
+        self.myss2.close()
+
+
+    #
+    # Good 2fast
+    #
+    def singtest_good_2fast(self):
+        genresdict = self.get_genresdict()
+        print >>sys.stderr,"test: good ASK_FOR_HELP"
+        self._test_2fast(genresdict)
+    
+
+    def get_genresdict(self):
+        genresdict = {}
+        genresdict[ASK_FOR_HELP] = (self.create_good_dlhelp,True)
+        genresdict[METADATA] = (self.create_good_metadata,True)
+        genresdict[PIECES_RESERVED] = (self.create_good_pieces_reserved,True)
+        genresdict[STOP_DOWNLOAD_HELP] = (self.create_good_stop_dlhelp,True)
+        return genresdict
+
+    #
+    # Bad 2fast
+    #
+    def singtest_bad_2fast_dlhelp(self):
+        genresdict = self.get_genresdict()
+        genresdict[ASK_FOR_HELP] = (self.create_bad_dlhelp_not_infohash,False)
+        print >>sys.stderr,"test: bad dlhelp"
+        self._test_2fast(genresdict)
+        
+    def singtest_bad_2fast_metadata_not_bdecodable(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_bdecodable,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_not_dict1(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict1,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_not_dict2(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict2,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    def singtest_bad_2fast_metadata_empty_dict(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_empty_dict,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_wrong_dict_keys(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_wrong_dict_keys,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_bad_torrent1(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent1,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    def singtest_bad_2fast_metadata_bad_torrent2(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent2,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_bad_torrent3(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent3,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    
+    def _test_2fast(self,genresdict):
+        """ 
+            test ASK_FOR_HELP, METADATA, PIECES_RESERVED and STOP_DOWNLOAD_HELP sequence
+        """
+        # 1. Establish overlay connection to Tribler
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,mylistenport=self.mylistenport2)
+        
+        (func,good) = genresdict[ASK_FOR_HELP]
+        msg = func()
+        s.send(msg)
+        if good:
+            resp = s.recv()
+            self.assert_(resp[0] == GET_METADATA)
+            self.check_get_metadata(resp[1:])
+            print >>sys.stderr,"test: Got GET_METADATA for torrent, good"
+        else:
+            resp = s.recv()
+            self.assert_(len(resp)==0)
+            s.close()
+            return
+
+        (func,good) = genresdict[METADATA]
+        msg = func()
+        s.send(msg)
+
+        if good:
+            # 2. Accept the data connection Tribler wants to establish with us, the coordinator
+            self.myss2.settimeout(10.0)
+            conn, addr = self.myss2.accept()
+            s3 = BTConnection('',0,conn,user_infohash=self.infohash,myid=self.myid2)
+            s3.read_handshake_medium_rare()
+            
+            msg = UNCHOKE
+            s3.send(msg)
+            print >>sys.stderr,"test: Got data connection to us, as coordinator, good"
+        else:
+            resp = s.recv()
+            self.assert_(len(resp)==0)
+            s.close()
+            return
+
+        # 3. Our tracker says there is another peer (also us) on port 4810
+        # Now accept a connection on that port and pretend we're a seeder
+        self.myss.settimeout(10.0)
+        conn, addr = self.myss.accept()
+        options = '\x00\x00\x00\x00\x00\x00\x00\x00'
+        s2 = BTConnection('',0,conn,user_option_pattern=options,user_infohash=self.infohash,myid=self.myid)
+        s2.read_handshake_medium_rare()
+        
+        numpieces = 10 # must correspond to the torrent in test/extend_hs_dir
+        b = Bitfield(numpieces)
+        for i in range(numpieces):
+            b[i] = True
+        self.assert_(b.complete())
+        msg = BITFIELD+b.tostring()
+        s2.send(msg)
+        msg = UNCHOKE
+        s2.send(msg)
+        print >>sys.stderr,"test: Got BT connection to us, as fake seeder, good"
+
+        # 4. Await a RESERVE_PIECES message on the overlay connection
+        resp = s.recv()
+        self.assert_(resp[0] == RESERVE_PIECES)
+        pieces = self.check_reserve_pieces(resp[1:])
+        print >>sys.stderr,"test: Got RESERVE_PIECES, good"
+
+        (func,good) = genresdict[PIECES_RESERVED]
+        
+        # 5. Reply with PIECES_RESERVED
+        msg = func(pieces)
+        s.send(msg)
+        
+        if good:
+            # 6. Await REQUEST on fake seeder
+            while True:
+                resp = s2.recv()
+                self.assert_(len(resp) > 0)
+                print "test: Fake seeder got message",getMessageName(resp[0])
+                if resp[0] == REQUEST:
+                    self.check_request(resp[1:],pieces)
+                    print >>sys.stderr,"test: Fake seeder got REQUEST for reserved piece, good"
+                    break
+        else:
+            resp = s.recv()
+            self.assert_(len(resp)==0)
+            s.close()
+            return
+
+        (func,good) = genresdict[STOP_DOWNLOAD_HELP]
+        # 5. Reply with STOP_DOWNLOAD_HELP
+        msg = func()
+        s.send(msg)
+
+        # the other side should close the connection, whether the msg was good or bad
+        resp = s.recv()
+        self.assert_(len(resp)==0)
+        s.close()
+        
+
+    def create_good_dlhelp(self):
+        return ASK_FOR_HELP+self.infohash
+
+    def check_get_metadata(self,data):
+        infohash = bdecode(data) # is bencoded for unknown reason, can't change it
+        self.assert_(infohash == self.infohash)
+
+    def create_good_metadata(self):
+        f = open(self.torrentfile,"rb")
+        data = f.read()
+        f.close() 
+        
+        d = self.create_good_metadata_dict(data)
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_good_metadata_dict(self,data):
+        d = {}
+        d['torrent_hash'] = self.infohash 
+        d['metadata'] = data
+        d['leecher'] = 1
+        d['seeder'] = 1
+        d['last_check_time'] = int(time.time())
+        d['status'] = 'good'
+        return d
+
+    def check_reserve_pieces(self,data):
+        # torrent_hash + 1-byte all_or_nothing + bencode([piece num,...])
+        self.assert_(len(data) > 21)
+        infohash = data[0:20]
+        allflag = data[20]
+        plist = bdecode(data[21:])
+        
+        self.assert_(infohash == self.infohash)
+        self.assert_(type(plist) == ListType)
+        return plist
+
+    def create_good_pieces_reserved(self,pieces):
+        payload = self.infohash + bencode(pieces)
+        return PIECES_RESERVED + payload
+
+    def check_request(self,data,pieces):
+        piece = toint(data[0:4])
+        self.assert_(piece in pieces)
+
+    def create_good_stop_dlhelp(self):
+        return STOP_DOWNLOAD_HELP+self.infohash
+
+
+    #
+    # Bad ASK_FOR_HELP
+    #    
+
+    def create_bad_dlhelp_not_infohash(self):
+        return ASK_FOR_HELP+"481"
+
+    #
+    # Bad METADATA
+    #
+
+    def create_bad_metadata_not_bdecodable(self):
+        return METADATA+"bla"
+
+    def create_bad_metadata_not_dict1(self):
+        d  = 481
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_not_dict2(self):
+        d  = []
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_empty_dict(self):
+        d = {}
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_wrong_dict_keys(self):
+        d = {}
+        d['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        d['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_bad_torrent1(self):
+        d = self.create_good_metadata_dict(None)
+        d['metadata'] = '\x12\x34' * 100 # random data
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_bad_metadata_bad_torrent2(self):
+        torrent = {}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+    def create_bad_metadata_bad_torrent3(self):
+        torrent = {'info':481}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_dl.py <method name>"
+    else:
+        suite.addTest(TestDownloadHelp(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_seeding.py b/instrumentation/next-share/BaseLib/Test/API/test_seeding.py
new file mode 100644 (file)
index 0000000..8f08bd8
--- /dev/null
@@ -0,0 +1,172 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+import tempfile
+
+from BaseLib.Test.test_as_server import TestAsServer
+from BaseLib.Test.btconn import BTConnection
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.TorrentDef import *
+from BaseLib.Core.DownloadConfig import *
+from BaseLib.Core.Session import *
+from BaseLib.Core.simpledefs import *
+
+DEBUG=True
+
+class TestSeeding(TestAsServer):
+    """ 
+    Testing seeding via new tribler API:
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving Session time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: Session should have started up"
+    
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        
+        self.config.set_megacache(False)        
+        self.config.set_internal_tracker(True)
+        #self.config.set_tracker_nat_check(0)
+        
+        self.mylistenport = 4810
+
+    def setUpPostSession(self):
+        pass
+    
+    def setup_seeder(self,merkle):
+        self.tdef = TorrentDef()
+        self.sourcefn = os.path.join(os.getcwd(),"file.wmv")
+        self.tdef.add_content(self.sourcefn)
+        self.tdef.set_create_merkle_torrent(merkle)
+        self.tdef.set_tracker(self.session.get_internal_tracker_url())
+        self.tdef.finalize()
+
+        self.torrentfn = os.path.join(self.session.get_state_dir(),"gen.torrent")
+        self.tdef.save(self.torrentfn)
+        
+        print >>sys.stderr,"test: setup_seeder: name is",self.tdef.metainfo['info']['name']
+
+        self.dscfg = DownloadStartupConfig()
+        self.dscfg.set_dest_dir(os.getcwd())
+        d = self.session.start_download(self.tdef,self.dscfg)
+        
+        d.set_state_callback(self.seeder_state_callback)
+        
+        print >>sys.stderr,"test: Giving Download time to startup"
+        time.sleep(5)
+
+
+    def seeder_state_callback(self,ds):
+        d = ds.get_download()
+        print >>sys.stderr,"test: seeder:",`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress()
+        return (1.0,False)
+
+
+    def test_normal_torrent(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        self.setup_seeder(False)
+        self.subtest_is_seeding()
+        self.subtest_download()
+
+    def test_merkle_torrent(self):
+        self.setup_seeder(True)
+        self.subtest_is_seeding()
+        self.subtest_download()
+
+    def subtest_is_seeding(self):
+        infohash = self.tdef.get_infohash()
+        print >> sys.stderr,"test: Connect to see if seeding this infohash"
+        myid = '*' * 20
+        s = BTConnection('localhost',self.hisport,myid=myid,user_infohash=infohash)
+        s.read_handshake_medium_rare()
+        
+        s.send(CHOKE)
+        try:
+            s.s.settimeout(10.0)
+            print >> sys.stderr,"test: Receive to see if seeding this infohash"
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            self.assert_(resp[0] == EXTEND)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, peer didn't reply"
+            self.assert_(False)
+        s.close()
+        
+        
+    def subtest_download(self):
+        """ Now download the file via another Session """
+        
+        self.config2 = self.config.copy() # not really necess
+        self.config_path2 = tempfile.mkdtemp()
+        self.config2.set_state_dir(self.config_path2)
+        self.config2.set_listen_port(self.mylistenport)
+        self.session2 = Session(self.config2,ignore_singleton=True)
+        
+        # Allow session2 to start
+        print >>sys.stderr,"test: Sleeping 3 secs to let Session2 start"
+        time.sleep(3)
+        
+        tdef2 = TorrentDef.load(self.torrentfn)
+
+        dscfg2 = DownloadStartupConfig()
+        dscfg2.set_dest_dir(self.config_path2)
+        
+        d = self.session2.start_download(tdef2,dscfg2)
+        d.set_state_callback(self.downloader_state_callback)
+        time.sleep(20)
+    
+    def downloader_state_callback(self,ds):
+        d = ds.get_download()
+        print >>sys.stderr,"test: download:",`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress()
+        
+        if ds.get_status() == DLSTATUS_SEEDING:
+            # File is in
+            destfn = os.path.join(self.config_path2,"file.wmv")
+            f = open(destfn,"rb")
+            realdata = f.read()
+            f.close()
+            f = open(self.sourcefn,"rb")
+            expdata = f.read()
+            f.close()
+            
+            self.assert_(realdata == expdata)
+            return (1.0,True)
+        
+        return (1.0,False)
+
+        
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_seeding.py <method name>"
+    else:
+        suite.addTest(TestSeeding(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_seeding_live.py b/instrumentation/next-share/BaseLib/Test/API/test_seeding_live.py
new file mode 100644 (file)
index 0000000..f89ded3
--- /dev/null
@@ -0,0 +1,207 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+import tempfile
+from traceback import print_exc
+
+from BaseLib.Test.test_as_server import TestAsServer
+from BaseLib.Test.btconn import BTConnection
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.TorrentDef import *
+from BaseLib.Core.DownloadConfig import *
+from BaseLib.Core.Session import *
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+
+DEBUG=True
+
+class TestSeeding(TestAsServer):
+    """ 
+    Testing seeding via new tribler API:
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving Session time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: Session should have started up"
+    
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        
+        self.config.set_overlay(False)
+        self.config.set_internal_tracker(True)
+        
+        self.mylistenport = 4810
+
+    def setUpPostSession(self):
+        pass
+
+
+    def test_live_torrent(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        self.setup_seeder()
+        time.sleep(10)
+        #self.subtest_connect2downloader()
+        self.subtest_download()
+
+    
+    def setup_seeder(self):
+        self.tdef = TorrentDef()
+        # semi automatic
+        self.bitrate = 6144
+        piecesize = 32768
+        self.npieces = 12
+        playtime = ((self.npieces-1)*piecesize)/self.bitrate
+        playtimestr = '0:'+str(playtime) # DON'T WORK IF > 60 secs
+        self.tdef.create_live("Test Live",self.bitrate,playtimestr)
+        self.tdef.set_tracker(self.session.get_internal_tracker_url())
+        self.tdef.set_piece_length(piecesize)
+        self.tdef.finalize()
+
+        self.torrentfn = os.path.join(self.session.get_state_dir(),"gen.torrent")
+        self.tdef.save(self.torrentfn)
+        
+        print >>sys.stderr,"test: setup_seeder: name is",self.tdef.metainfo['info']['name']
+
+        self.dscfg = DownloadStartupConfig()
+        self.dscfg.set_dest_dir(os.getcwd())
+        
+        # File source
+        source = InfiniteSource(piecesize)
+        self.dscfg.set_video_ratelimit(self.bitrate)
+        self.dscfg.set_video_source(source)
+        
+        d = self.session.start_download(self.tdef,self.dscfg)
+        
+        d.set_state_callback(self.seeder_state_callback)
+        
+    def seeder_state_callback(self,ds):
+        d = ds.get_download()
+        print >>sys.stderr,"test: seeder:",dlstatus_strings[ds.get_status()],ds.get_progress()
+        return (1.0,False)
+
+        
+    def subtest_download(self):
+        """ Now download the file via another Session """
+        
+        self.config2 = self.config.copy() # not really necess
+        self.config_path2 = tempfile.mkdtemp()
+        self.config2.set_state_dir(self.config_path2)
+        self.config2.set_listen_port(self.mylistenport)
+        self.session2 = Session(self.config2,ignore_singleton=True)
+        
+        # Allow session2 to start
+        print >>sys.stderr,"test: downloader: Sleeping 3 secs to let Session2 start"
+        time.sleep(3)
+        
+        tdef2 = TorrentDef.load(self.torrentfn)
+
+        dscfg2 = DownloadStartupConfig()
+        dscfg2.set_dest_dir(self.config_path2)
+        dscfg2.set_video_event_callback(self.downloader_vod_ready_callback)
+        
+        d = self.session2.start_download(tdef2,dscfg2)
+        d.set_state_callback(self.downloader_state_callback)
+        
+        time.sleep(40)
+        # To test if BITFIELD is indeed wrapping around.
+        self.subtest_connect2downloader()
+        time.sleep(80)
+    
+    def downloader_state_callback(self,ds):
+        d = ds.get_download()
+        print >>sys.stderr,"test: download:",dlstatus_strings[ds.get_status()],ds.get_progress()
+        
+        return (1.0,False)
+
+    def downloader_vod_ready_callback(self,d,event,params):
+        """ Called by SessionThread """
+        if event == VODEVENT_START:
+            stream = params["stream"]
+            while True:
+                # Fake video playback
+                data = stream.read(self.bitrate)
+                if len(data) == 0:
+                    break
+                time.sleep(1)
+
+
+    def subtest_connect2downloader(self):
+        
+        print >> sys.stderr,"test: verifier: Connecting to seeder to check bitfield"
+        
+        infohash = self.tdef.get_infohash()
+        s = BTConnection('localhost',self.mylistenport,user_infohash=infohash)
+        s.read_handshake_medium_rare()
+        
+        try:
+            s.s.settimeout(10.0)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            print >> sys.stderr,"test: verifier: Got message",getMessageName(resp[0])
+            self.assert_(resp[0] == EXTEND)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            print >> sys.stderr,"test: verifier: Got 2nd message",getMessageName(resp[0])
+            self.assert_(resp[0] == BITFIELD)
+            b = Bitfield(self.npieces,resp[1:])
+            print >> sys.stderr,"test: verifier: Bitfield is",`b.toboollist()`
+
+            b2 = Bitfield(self.npieces)
+            b2[0] = True
+            msg = BITFIELD+b2.tostring()
+            s.send(msg)
+            
+            time.sleep(5)
+            
+        except socket.timeout:
+            print >> sys.stderr,"test: verifier: Timeout, peer didn't reply"
+            self.assert_(False)
+        s.close()
+        
+        
+class InfiniteSource:
+    def __init__(self, piece_length):
+        self.emptypiece = " " * piece_length
+
+    def read(self,len):
+        return self.emptypiece[:len]
+
+    def close(self):
+        pass
+
+
+        
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_seeding.py <method name>"
+    else:
+        suite.addTest(TestSeeding(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_seeding_vod.py b/instrumentation/next-share/BaseLib/Test/API/test_seeding_vod.py
new file mode 100644 (file)
index 0000000..5361f78
--- /dev/null
@@ -0,0 +1,161 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+import tempfile
+
+from BaseLib.Test.test_as_server import TestAsServer
+from BaseLib.Test.btconn import BTConnection
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.TorrentDef import *
+from BaseLib.Core.DownloadConfig import *
+from BaseLib.Core.Session import *
+from BaseLib.Core.simpledefs import *
+
+DEBUG=True
+
+class TestSeeding(TestAsServer):
+    """ 
+    Testing seeding via new tribler API:
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving Session time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: Session should have started up"
+        self.vod_started = False
+    
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        
+        self.config.set_overlay(False)
+        self.config.set_internal_tracker(True)
+        
+        self.mylistenport = 4810
+
+    def setUpPostSession(self):
+        pass
+    
+    def tearDown(self):
+        TestAsServer.tearDown(self)
+        self.assert_(self.vod_started)
+    
+    def setup_seeder(self,merkle):
+        self.tdef = TorrentDef()
+        self.sourcefn = os.path.join(os.getcwd(),"file2.wmv")
+        self.tdef.add_content(self.sourcefn,playtime='1:00') # 60 secs
+        self.tdef.set_create_merkle_torrent(merkle)
+        self.tdef.set_tracker(self.session.get_internal_tracker_url())
+        self.tdef.finalize()
+
+        self.torrentfn = os.path.join(self.session.get_state_dir(),"gen.torrent")
+        self.tdef.save(self.torrentfn)
+        
+        print >>sys.stderr,"test: setup_seeder: name is",self.tdef.metainfo['info']['name']
+
+        self.dscfg = DownloadStartupConfig()
+        self.dscfg.set_dest_dir(os.getcwd())
+        d = self.session.start_download(self.tdef,self.dscfg)
+        
+        d.set_state_callback(self.seeder_state_callback)
+        
+    def seeder_state_callback(self,ds):
+        d = ds.get_download()
+        print >>sys.stderr,"test: seeder:",`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress()
+        return (1.0,False)
+
+
+    def test_normal_torrent(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        self.setup_seeder(False)
+        #self.subtest_is_seeding()
+        self.subtest_download()
+
+    def test_merkle_torrent(self):
+        self.setup_seeder(True)
+        self.subtest_is_seeding()
+        self.subtest_download()
+
+    def subtest_is_seeding(self):
+        infohash = self.tdef.get_infohash()
+        s = BTConnection('localhost',self.hisport,user_infohash=infohash)
+        s.read_handshake_medium_rare()
+        
+        s.send(CHOKE)
+        try:
+            s.s.settimeout(10.0)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            self.assert_(resp[0] == EXTEND)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, peer didn't reply"
+            self.assert_(False)
+        s.close()
+        
+        
+    def subtest_download(self):
+        """ Now download the file via another Session """
+        
+        self.config2 = self.config.copy() # not really necess
+        self.config_path2 = tempfile.mkdtemp()
+        self.config2.set_state_dir(self.config_path2)
+        self.config2.set_listen_port(self.mylistenport)
+        self.session2 = Session(self.config2,ignore_singleton=True)
+        
+        # Allow session2 to start
+        print >>sys.stderr,"test: Sleeping 3 secs to let Session2 start"
+        time.sleep(3)
+        
+        tdef2 = TorrentDef.load(self.torrentfn)
+
+        dscfg2 = DownloadStartupConfig()
+        dscfg2.set_dest_dir(self.config_path2)
+        dscfg2.set_video_event_callback(self.downloader_vod_ready_callback)
+        
+        d = self.session2.start_download(tdef2,dscfg2)
+        d.set_state_callback(self.downloader_state_callback)
+        time.sleep(20)
+    
+    def downloader_state_callback(self,ds):
+        d = ds.get_download()
+        print >>sys.stderr,"test: download:",`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress()
+        
+        return (1.0,False)
+
+    def downloader_vod_ready_callback(self,d,event,params):
+        if event == VODEVENT_START:
+            self.vod_started = True
+
+        
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_seeding.py <method name>"
+    else:
+        suite.addTest(TestSeeding(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_tdef.py b/instrumentation/next-share/BaseLib/Test/API/test_tdef.py
new file mode 100644 (file)
index 0000000..4d3a0f0
--- /dev/null
@@ -0,0 +1,362 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# TODO:
+#
+
+import sys
+import unittest
+import os
+import tempfile
+
+from BaseLib.Core.API import TorrentDef
+from BaseLib.Core.BitTornado.bencode import bdecode
+from BaseLib.Core.Utilities.utilities import isValidTorrentFile
+from BaseLib.Core.Overlay.permid import verify_torrent_signature
+
+DEBUG = False
+
+TRACKER = 'http://www.tribler.org/announce'
+PLAYTIME = "1:02" # 1 min 2 secs. Coordinate with size of file.wmv to get > 1 bitrate
+PLAYTIME_SECS = 62 # PLAYTIME in seconds
+
+class TestTorrentDef(unittest.TestCase):
+    """ 
+    Testing TorrentDef version 0
+    """
+    
+    def setUp(self):
+        pass
+        
+    def tearDown(self):
+        pass
+
+    def test_add_content_file(self):
+        self.subtest_add_content_file(merkle=False)
+        self.subtest_add_content_file(merkle=True)
+
+    def test_add_content_dir(self):
+        self.subtest_add_content_dir(merkle=False)
+        self.subtest_add_content_dir(merkle=True)
+
+    def test_add_content_dir_and_file(self):
+        self.subtest_add_content_dir_and_file(merkle=False)
+        self.subtest_add_content_dir_and_file(merkle=True)
+
+    def test_add_content_file_playtime(self):
+        self.subtest_add_content_file_playtime(merkle=False)
+        self.subtest_add_content_file_playtime(merkle=True)
+
+    def test_add_content_dir_playtime(self):
+        self.subtest_add_content_dir_playtime(merkle=False)
+        self.subtest_add_content_dir_playtime(merkle=True)
+
+    def test_add_content_file_thumbnail(self):
+        self.subtest_add_content_file_thumbnail(merkle=False)
+        self.subtest_add_content_file_thumbnail(merkle=True)
+
+    def test_add_content_announce_list(self):
+        self.subtest_add_content_announce_list(merkle=False)
+        self.subtest_add_content_announce_list(merkle=True)
+
+    def test_add_content_httpseeds(self):
+        self.subtest_add_content_httpseeds(merkle=False)
+        self.subtest_add_content_httpseeds(merkle=True)
+
+    def test_add_content_torrentsig(self):
+        self.subtest_add_content_torrentsig(merkle=False)
+        self.subtest_add_content_torrentsig(merkle=True)
+
+    def test_add_content_piece_length(self):
+        self.subtest_add_content_piece_length(merkle=False)
+        self.subtest_add_content_piece_length(merkle=True)
+
+    def test_add_content_file_save(self):
+        self.subtest_add_content_file_save(merkle=False)
+        self.subtest_add_content_file_save(merkle=True)
+
+    def test_ns_metadata(self):
+        dummydata = "HalloWereld"
+        t = TorrentDef()
+        t.set_metadata(dummydata)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        [handle,filename] = tempfile.mkstemp()
+        os.close(handle)
+        t.save(filename)
+        
+        t2 = TorrentDef.load(filename)
+        self.assert_(t2.get_metadata() == dummydata)
+
+
+    def subtest_add_content_file(self,merkle=True):
+        """ Add a single file to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_tracker(TRACKER)
+        t.finalize()
+        
+        s = os.path.getsize("file.wmv")
+        
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        
+        self.assert_(metainfo['info']['name'] == "file.wmv")
+        self.assert_(metainfo['info']['length'] == s)
+        
+        """
+        bdata = bencode(t.get_metainfo())
+        f = open("gen.torrent","wb")
+        f.write(bdata)
+        f.close()
+        """
+
+    def subtest_add_content_dir(self,merkle=True):
+        """ Add a single dir to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        dn = os.path.join(os.getcwd(),"contentdir")
+        t.add_content(dn,"dirintorrent")
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        exps = 0L
+        for f in os.listdir("contentdir"):
+            if f.startswith('.'):
+                continue
+            p = os.path.join("contentdir",f)
+            s = os.path.getsize(p)
+            exps += s
+            print "test: expected size",f,s
+
+        print "test: expected total size of files in torrent",exps
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        
+        self.assert_(metainfo['info']['name'] == 'dirintorrent')
+        reals = 0L
+        for file in metainfo['info']['files']:
+            s = file['length']
+            print "test: real size",file['path'],s
+            reals += s
+            
+        print "test: real size of files in torrent",reals
+            
+        self.assert_(exps == reals)
+
+
+    def subtest_add_content_dir_and_file(self,merkle=True):
+        """ Add a single dir and single file to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        
+        dn = os.path.join(os.getcwd(),"contentdir")
+        t.add_content(dn,"dirintorrent")
+        
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn,os.path.join("dirintorrent","file.wmv"))
+        
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        # Check
+        exps = os.path.getsize(fn)
+        for f in os.listdir("contentdir"):
+            if f.startswith('.'):
+                continue
+            p = os.path.join("contentdir",f)
+            exps += os.path.getsize(p) 
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        self.assert_(metainfo['info']['name'] == 'dirintorrent')
+        
+        reals = 0L
+        for file in metainfo['info']['files']:
+            reals += file['length']
+        self.assert_(exps == reals)
+
+
+    def subtest_add_content_file_playtime(self,merkle=True):
+        """ Add a single file with playtime to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn,playtime=PLAYTIME)
+        t.set_tracker(TRACKER)
+        t.finalize()
+        
+        s = os.path.getsize("file.wmv")
+        
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        self.assert_(metainfo['info']['playtime']==PLAYTIME)
+        azprop = metainfo['azureus_properties']
+        content = azprop['Content']
+        realspeedbps = content['Speed Bps']
+        expspeedbps = s/PLAYTIME_SECS
+        self.assert_(realspeedbps == expspeedbps)
+
+
+    def subtest_add_content_dir_playtime(self,merkle=True):
+        """ Add a single dir to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn1 = os.path.join(os.getcwd(),"contentdir","file.avi")
+        fn2 = os.path.join(os.getcwd(),"contentdir","file.txt")
+        t.add_content(fn1,os.path.join("dirintorrent","file.avi"),playtime=PLAYTIME)
+        t.add_content(fn2,os.path.join("dirintorrent","file.txt"))
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        self.assert_(metainfo['info']['name'] == 'dirintorrent')
+
+        s = os.path.getsize(fn1)
+        
+        files = metainfo['info']['files']
+        for file in files:
+            if file['path'][0] == "file.avi":
+                self.assert_(file['playtime']==PLAYTIME)
+        
+        azprop = metainfo['azureus_properties']
+        content = azprop['Content']
+        realspeedbps = content['Speed Bps']
+        expspeedbps = s/PLAYTIME_SECS
+        self.assert_(realspeedbps == expspeedbps)
+
+
+    def subtest_add_content_file_thumbnail(self,merkle=True):
+        """ Add a single file with thumbnail to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        thumbfn = os.path.join(os.getcwd(),"thumb.jpg")
+        t.add_content(fn)
+        t.set_thumbnail(thumbfn)
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        f = open(thumbfn,"rb")
+        expthumb = f.read()
+        f.close()
+        
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        azprop = metainfo['azureus_properties']
+        content = azprop['Content']
+        realthumb = content['Thumbnail']
+        self.assert_(realthumb == expthumb)
+
+
+    def subtest_add_content_announce_list(self,merkle=True):
+        """ Add a single file with announce-list to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_tracker(TRACKER)
+        exphier = [[TRACKER],['http://tracker1.tribler.org:6969/announce','http://tracker2.tribler.org:7070/ann'],['http://www.cs.vu.nl','http://www.st.ewi.tudelft.nl','http://www.vuze.com']]
+        t.set_tracker_hierarchy(exphier)
+        t.finalize()
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        realhier = metainfo['announce-list']
+        self.assert_(realhier == exphier)
+
+
+    def subtest_add_content_httpseeds(self,merkle=True):
+        """ Add a single file with BitTornado httpseeds to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_tracker(TRACKER)
+        expseeds = ['http://www.cs.vu.nl/index.html','http://www.st.ewi.tudelft.nl/index.html']
+        t.set_httpseeds(expseeds)
+        t.finalize()
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        realseeds = metainfo['httpseeds']
+        self.assert_(realseeds == expseeds)
+
+    def subtest_add_content_torrentsig(self,merkle=True):
+        """ Add a single file to a TorrentDef and sign it """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        kpfn = os.path.join(os.getcwd(),"ec.pem")
+        t.add_content(fn)
+        t.set_signature_keypair_filename(kpfn)
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        ret = verify_torrent_signature(metainfo)
+        self.assert_(ret == True)
+
+
+    def subtest_add_content_piece_length(self,merkle=True):
+        """ Add a single file with piece length to a TorrentDef """
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_piece_length(2 ** 16)
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        self.assert_(metainfo['info']['piece length'] == 2 ** 16)
+
+
+    def subtest_add_content_file_save(self,merkle=True):
+        """ Add a single file to a TorrentDef and save the latter"""
+        t = TorrentDef()
+        t.set_create_merkle_torrent(merkle)
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_tracker(TRACKER)
+        t.finalize()
+
+        tfn = os.path.join(os.getcwd(),"gen.torrent")
+        t.save(tfn)
+        
+        f = open(tfn,"rb")
+        bdata = f.read()
+        f.close()
+        os.remove(tfn)
+        
+        data = bdecode(bdata)
+        metainfo = t.get_metainfo()
+        self.general_check(metainfo)
+        self.assert_(metainfo == data)
+
+
+    def general_check(self,metainfo):
+        self.assert_(isValidTorrentFile(metainfo))
+        self.assert_(metainfo['announce'] == TRACKER)
+        
+    
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestTorrentDef))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_tracking.py b/instrumentation/next-share/BaseLib/Test/API/test_tracking.py
new file mode 100644 (file)
index 0000000..c9b597f
--- /dev/null
@@ -0,0 +1,143 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import urllib
+#from urllib2 import urlopen # urllib blocks on reading, HTTP/1.1 persist conn problem?
+from traceback import print_exc
+import urlparse
+
+from BaseLib.Core.BitTornado.zurllib import urlopen
+from BaseLib.Core.simpledefs import STATEDIR_ITRACKER_DIR
+from BaseLib.Test.test_as_server import TestAsServer
+from BaseLib.Test.btconn import BTConnection
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.API import *
+
+DEBUG=True
+
+class TestTracking(TestAsServer):
+    """ 
+    Testing seeding via new tribler API:
+    """
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        
+        self.config.set_megacache(False)
+        self.config.set_internal_tracker(True)
+     
+     
+    def test_all(self):
+        self.subtest_add_remove_torrent()
+        self.subtest_tlookup1()
+        self.subtest_tlookup2()
+   
+        
+    def subtest_add_remove_torrent(self):
+        tdef = TorrentDef()
+        sourcefn = os.path.join(os.getcwd(),"file.wmv")
+        tdef.add_content(sourcefn)
+        tdef.set_tracker(self.session.get_internal_tracker_url())
+        tdef.finalize()
+
+        torrentfn = os.path.join(self.session.get_state_dir(),"gen.torrent")
+        tdef.save(torrentfn)
+        infohash = tdef.get_infohash()
+        hexinfohash = binascii.hexlify(infohash)
+        
+        self.session.add_to_internal_tracker(tdef)
+        time.sleep(1)
+        self.check_http_presence(hexinfohash,True)
+        
+        self.session.remove_from_internal_tracker(tdef)
+        print >> sys.stderr,"test: Give network thread running tracker time to detect we removed the torrent file"
+        time.sleep(2)
+        
+        self.check_http_presence(hexinfohash,False)
+        self.check_disk_presence(hexinfohash,False)
+
+    def check_http_presence(self,hexinfohash,present):
+        print >> sys.stderr,"test: infohash is",hexinfohash
+        url = 'http://127.0.0.1:'+str(self.session.get_listen_port())+'/'
+        print >> sys.stderr,"test: tracker lives at",url
+        f = urlopen(url)
+        data = f.read()
+        f.close()
+        
+        # WARNING: this test depends on the output of the tracker. If that
+        # is changed, also change this.
+        print >> sys.stderr,"test: tracker returned:",data
+        if present:
+            self.assert_(data.find(hexinfohash) != -1)
+        else:
+            self.assert_(data.find(hexinfohash) == -1)
+
+    def check_disk_presence(self,hexinfohash,present):
+        itrackerdir = os.path.join(self.session.get_state_dir(),STATEDIR_ITRACKER_DIR)
+        for filename in os.listdir(itrackerdir):
+            if filename.startswith(hexinfohash):
+                if present:
+                    self.assert_(True)
+                else:
+                    self.assert_(False)
+         
+         
+    #
+    # /tlookup?url
+    #
+    def subtest_tlookup1(self):
+        httpseeds = []
+        httpseeds.append('http://www.example.com/file.wmv')
+        self._test_tlookup(httpseeds)
+
+    def subtest_tlookup2(self):
+        httpseeds = []
+        httpseeds.append('http://www.example.com/file.wmv')
+        httpseeds.append('http://icecast.freezer.com/file.wmv')
+        self._test_tlookup(httpseeds)
+
+
+    def _test_tlookup(self,httpseedlist):
+        t = TorrentDef()
+        fn = os.path.join(os.getcwd(),"file.wmv")
+        t.add_content(fn)
+        t.set_tracker(self.session.get_internal_tracker_url())
+        t.set_urllist(httpseedlist)
+        t.finalize()
+        wantdata = bencode(t.get_metainfo())
+        
+        self.session.add_to_internal_tracker(t)
+        #time.sleep(30)
+        
+        (scheme, netloc, path, pars, query, fragment) = urlparse.urlparse(self.session.get_internal_tracker_url())
+        urlprefix = scheme+'://'+netloc+'/tlookup?'
+        for httpseed in httpseedlist:
+            quoted = urllib.quote(httpseed)
+            url = urlprefix+quoted
+            #url = "http://www.cs.vu.nl/~arno/index.html"
+            print >>sys.stderr,"test: Asking tracker for",url
+            # F*ing BitTornado/Python crap: using normal urlopen here results in
+            # an infinitely hanging read (even if read(1024))
+            conn = urlopen(url)
+            gotdata = conn.read()
+            print >>sys.stderr,"test: Tracker sent",len(gotdata)
+            conn.close()
+            self.assertEquals(wantdata,gotdata)
+            
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestTracking))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/API/test_vod.py b/instrumentation/next-share/BaseLib/Test/API/test_vod.py
new file mode 100644 (file)
index 0000000..6fccf7e
--- /dev/null
@@ -0,0 +1,64 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+
+from  threading import currentThread
+
+from BaseLib.Core.API import *
+from BaseLib.Video.VideoServer import VideoHTTPServer
+
+
+def state_callback(d,ds):
+    print >>sys.stderr,"main: Stats",dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error()
+
+def vod_ready_callback(d,event,params):
+    print >>sys.stderr,"main: VOD ready callback called",currentThread().getName(),"###########################################################",params["mimetype"]
+
+    """
+    f = open("video.avi","wb")
+    while True:
+        data = stream.read()
+        print >>sys.stderr,"main: VOD ready callback: reading",type(data)
+        print >>sys.stderr,"main: VOD ready callback: reading",len(data)
+        if len(data) == 0:
+            break
+        f.write(data)
+    f.close()
+    stream.close()
+    """
+
+    videoserv = VideoHTTPServer.getInstance()
+    videoserv.set_inputstream('video/mpeg',params["stream"],None)
+    
+
+if __name__ == "__main__":
+    
+    videoserv = VideoHTTPServer.getInstance() # create
+    videoserv.background_serve()
+    
+    s = Session()
+    
+    if sys.platform == 'win32':
+        tdef = TorrentDef.load('bla.torrent')
+    else:
+        tdef = TorrentDef.load('/tmp/bla.torrent')
+    dcfg = DownloadStartupConfig.get_copy_of_default()
+    #dcfg.set_saveas('/arno')
+    dcfg = DownloadStartupConfig.get_copy_of_default()
+    dcfg.set_video_start_callback(vod_ready_callback)
+    #dcfg.set_selected_files('MATRIX-XP_engl_L.avi') # play this video
+    #dcfg.set_selected_files('field-trip-west-siberia.avi')
+    
+    d = s.start_download(tdef,dcfg)
+    d.set_state_callback(state_callback,1)
+    #d.set_max_upload(100)
+    
+    time.sleep(10)
+    
+    """    
+    d.stop()
+    print "After stop"
+    time.sleep(5)
+    d.restart()
+    """
+    time.sleep(2500)
+    
diff --git a/instrumentation/next-share/BaseLib/Test/API/thumb.jpg b/instrumentation/next-share/BaseLib/Test/API/thumb.jpg
new file mode 100644 (file)
index 0000000..f8ef6db
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/API/thumb.jpg differ
diff --git a/instrumentation/next-share/BaseLib/Test/Core/CacheDB/SimpleMetadataDB.py b/instrumentation/next-share/BaseLib/Test/Core/CacheDB/SimpleMetadataDB.py
new file mode 100644 (file)
index 0000000..16a066e
--- /dev/null
@@ -0,0 +1,76 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+import sqlite3
+import os.path
+
+RES_DIR = os.path.join('..','..','subtitles_test_res')
+CREATE_SQL_FILE = "schema_sdb_v5.sql"
+
+
+class SimpleMetadataDB(object):
+    '''
+    Mimics the SQLiteCacheDB, to be used fot testing
+    '''
+    
+
+
+    def __init__(self, sql_create, destination_db = ":memory:", createDB = True):
+        '''
+        Constructor
+        '''
+        self._connection = sqlite3.connect(destination_db)
+        
+        #init
+        if createDB:
+            path = os.path.join(RES_DIR,CREATE_SQL_FILE)
+            with open(path, "rb") as sql_script:
+                script = sql_script.readlines()
+        
+                script = "".join(script)
+        
+                cursor = self._connection.cursor()
+                cursor.executescript(script)
+    
+    
+    def fetchall(self,sql,args=None):
+        cursor = self._connection.cursor()
+        if args is None:
+            args = ()
+        cursor.execute(sql,args)
+        sqliteRows = cursor.fetchall()
+        
+        returnlist = []
+        for row in sqliteRows:
+            templist = []
+            for elem in row:
+                if isinstance(elem, unicode):
+                    elem = str(elem)
+                templist.append(elem)
+            returnlist.append(templist)
+                
+        return returnlist
+    
+    def execute_write(self,sql,args=None,commit=True):
+        cursor = self._connection.cursor()
+        if args is None:
+            args = ()
+        cursor.execute(sql,args)
+        if commit :
+            self._connection.commit()
+            
+    def commit(self):
+        self._connection.commit()
+    
+    def close(self):
+        self._connection.close()
+        
+    
+        
+if __name__ == '__main__':
+    #db = SimpleMetadataDB("res/create.sql")
+    #db.execute_write("INSERT INTO Subtitles VALUES (1,'arg','a','a');")
+    #res = db.fetchall("SELECT * FROM Subtitles;")
+    pass
+        
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/CacheDB/__init__.py b/instrumentation/next-share/BaseLib/Test/Core/CacheDB/__init__.py
new file mode 100644 (file)
index 0000000..bdef2ba
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Test/Core/CacheDB/test_MetadataDBHandler.py b/instrumentation/next-share/BaseLib/Test/Core/CacheDB/test_MetadataDBHandler.py
new file mode 100644 (file)
index 0000000..8d552c6
--- /dev/null
@@ -0,0 +1,778 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+from BaseLib.Core.CacheDB.MetadataDBHandler import MetadataDBHandler
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str
+from BaseLib.Test.Core.CacheDB.SimpleMetadataDB import SimpleMetadataDB
+from BaseLib.Core.Overlay.permid import generate_keypair
+import copy
+import hashlib
+import logging
+import random
+import unittest
+import codecs
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import MetadataDBException
+import time
+import os.path
+
+RES_DIR = os.path.join('..','..','subtitles_test_res')
+
+CREATE_SQL_FILE = os.path.join(RES_DIR,'schema_sdb_v5.sql')
+SQL_DB = ":memory:"#"res/test.sdb"
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+class TestMetadataDBHandler(unittest.TestCase):
+    _keypair1 = generate_keypair()
+    aPermId = str(_keypair1.pub().get_der())
+    _keypair2 = generate_keypair()
+    anotherPermId = str(_keypair2.pub().get_der())
+
+    def setUp(self):
+        #createDB = not os.path.isfile(SQL_DB)
+        self.db = SimpleMetadataDB(CREATE_SQL_FILE, SQL_DB)
+        self.underTest = MetadataDBHandler(self.db)
+    
+    def tearDown(self):
+        self.db.close()
+        #if os.path.isfile(SQL_DB) :
+            #os.remove(SQL_DB)
+        
+        
+    
+    def testInitHandler(self):
+        self.assertTrue(self.underTest is not None)
+        
+    def testSingleton(self):
+        
+        instance1 = MetadataDBHandler.getInstance()
+        instance2 = MetadataDBHandler.getInstance()
+        self.assertTrue(instance1 is instance2)
+        
+    def testInsertNewMetadataSubs(self):
+        metadataDTO = MockMetadataDTO(["nld","ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        testquery = "SELECT * FROM Metadata WHERE publisher_id=?" \
+            + " AND infohash=?;" 
+        results = self.db.fetchall(testquery, (bin2str(metadataDTO.channel),bin2str(metadataDTO.infohash)))
+        
+        self.assertTrue(len(results) == 1)
+        tupl = results[0] 
+        self.assertTrue(tupl[0] is not None and isinstance(tupl[0], int))
+        self.assertEquals(bin2str(metadataDTO.channel),tupl[1])
+        self.assertEquals(bin2str(metadataDTO.infohash),tupl[2])
+        self.assertEquals(metadataDTO.description, tupl[3])
+        self.assertEquals(metadataDTO.timestamp, tupl[4])
+        self.assertEquals(bin2str(metadataDTO.signature), tupl[5])
+        
+        subtitlesQuery = "SELECT * FROM Subtitles WHERE metadata_id_fk=?;"
+        
+        subtitles = self.db.fetchall(subtitlesQuery, (tupl[0],))
+        self.assertEquals(2,len(subtitles))
+        
+        for lang in ("ita", "nld"):
+            found = False
+            foundSub = None
+            for subtuple in subtitles:
+                if subtuple[1] == lang:
+                    found = True
+                    foundSub = subtuple
+                    break
+                
+            self.assertTrue(found)
+            self.assertEquals(bin2str(metadataDTO.getSubtitle(lang).checksum), foundSub[3])
+            
+    
+    def testGetMetadataInstance(self):
+        metadataDTO = MockMetadataDTO(["nld","ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        retrievedMetadata = self.underTest.getMetadata(metadataDTO.channel,
+                                                       metadataDTO.infohash)
+        
+        self.assertFalse(retrievedMetadata is None)
+        self.assertFalse(retrievedMetadata is metadataDTO)
+        self.assertEquals(metadataDTO,retrievedMetadata)
+        
+        
+            
+    
+    def testInsertNewMetadataNoSubs(self):
+        metadataDTO = MockMetadataDTO([])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        testquery = "SELECT * FROM Metadata WHERE publisher_id=?" \
+            + " AND infohash=?;" 
+            
+        channel = bin2str(metadataDTO.channel)
+        infohash = bin2str(metadataDTO.infohash)
+        results = self.db.fetchall(testquery, (channel,infohash))
+        
+        self.assertTrue(len(results) == 1)
+        tupl = results[0] 
+        self.assertTrue(tupl[0] is not None and isinstance(tupl[0], int))
+        self.assertEquals(channel,tupl[1])
+        self.assertEquals(infohash,tupl[2])
+        self.assertEquals(metadataDTO.description, tupl[3])
+        self.assertEquals(metadataDTO.timestamp, tupl[4])
+        self.assertEquals(bin2str(metadataDTO.signature), tupl[5])
+        
+        subtitlesQuery = "SELECT * FROM Subtitles WHERE metadata_id_fk=?;"
+        
+        subtitles = self.db.fetchall(subtitlesQuery, (tupl[0],))
+        self.assertEquals(0,len(subtitles))
+    
+    def testUpdateExistingWithOlder(self):
+        
+        
+        metadataDTO = MockMetadataDTO(["nld", "ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        olderMetadataDTO = copy.copy(metadataDTO)
+        olderMetadataDTO.timestamp = 1 #*really* old
+        olderMetadataDTO.sign(olderMetadataDTO._keypair)
+    
+        self.underTest.insertMetadata(olderMetadataDTO)
+        
+        #assert the the older did not replace the newer
+        testquery = "SELECT * FROM Metadata WHERE publisher_id=?" \
+            + " AND infohash=?;" 
+        channel = bin2str(metadataDTO.channel)
+        infohash = bin2str(metadataDTO.infohash)
+        results = self.db.fetchall(testquery, (channel,infohash))
+        
+        self.assertTrue(len(results) == 1)
+        tupl = results[0] 
+        self.assertTrue(tupl[0] is not None and isinstance(tupl[0], int))
+        self.assertEquals(channel,tupl[1])
+        self.assertEquals(infohash,tupl[2])
+        self.assertEquals(metadataDTO.description, tupl[3])
+        self.assertEquals(metadataDTO.timestamp, tupl[4])
+        self.assertEquals(bin2str(metadataDTO.signature), tupl[5])
+        
+        subtitlesQuery = "SELECT * FROM Subtitles WHERE metadata_id_fk=?;"
+        
+        subtitles = self.db.fetchall(subtitlesQuery, (tupl[0],))
+        self.assertEquals(2,len(subtitles))
+        
+        for lang in ("ita", "nld"):
+            found = False
+            foundSub = None
+            for subtuple in subtitles:
+                if subtuple[1] == lang:
+                    found = True
+                    foundSub = subtuple
+                    break
+                
+            self.assertTrue(found)
+            self.assertEquals(bin2str(metadataDTO.getSubtitle(lang).checksum), foundSub[3])
+        
+        
+    def testUpdateExistingWithNewerSameSub(self):
+        metadataDTO = MockMetadataDTO(["nld", "ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        newerMetadataDTO = copy.copy(metadataDTO)
+        newerMetadataDTO.description = u"I'm newer!"
+        newerMetadataDTO.timestamp = newerMetadataDTO.timestamp +1 #newer 
+        newerMetadataDTO.sign(newerMetadataDTO._keypair)
+        
+        
+        self.underTest.insertMetadata(newerMetadataDTO)
+        
+        #assert the the older has been replaced
+        testquery = "SELECT * FROM Metadata WHERE publisher_id=?" \
+            + " AND infohash=?;" 
+        
+        channel = bin2str(metadataDTO.channel)
+        infohash = bin2str(metadataDTO.infohash)
+        results = self.db.fetchall(testquery, (channel,infohash))
+        
+        self.assertTrue(len(results) == 1)
+        tupl = results[0] 
+        self.assertTrue(tupl[0] is not None and isinstance(tupl[0], int))
+        self.assertEquals(channel,tupl[1])
+        self.assertEquals(infohash,tupl[2])
+        self.assertEquals(newerMetadataDTO.description, tupl[3])
+        self.assertEquals(newerMetadataDTO.timestamp, tupl[4])
+        self.assertEquals(bin2str(newerMetadataDTO.signature), tupl[5])
+        
+        #testing subtitles with the old once since they are not changed
+        subtitlesQuery = "SELECT * FROM Subtitles WHERE metadata_id_fk=?;"
+        
+        subtitles = self.db.fetchall(subtitlesQuery, (tupl[0],))
+        self.assertEquals(2,len(subtitles))
+        
+        for lang in ("ita", "nld"):
+            found = False
+            foundSub = None
+            for subtuple in subtitles:
+                if subtuple[1] == lang:
+                    found = True
+                    foundSub = subtuple
+                    break
+                
+            self.assertTrue(found)
+            self.assertEquals(bin2str(metadataDTO.getSubtitle(lang).checksum), foundSub[3])
+    
+    
+    
+    def testUpdateExistingWithNewerNewSubs(self):
+        metadataDTO = MockMetadataDTO(["nld", "ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        newerMetadataDTO = MockMetadataDTO(["nld","ita","eng"])
+        newerMetadataDTO.channel = metadataDTO.channel
+        newerMetadataDTO.infohash = metadataDTO.infohash
+        newerMetadataDTO._keypair = metadataDTO._keypair
+        newerMetadataDTO.timestamp = metadataDTO.timestamp +1 #newer 
+        newerMetadataDTO.sign(newerMetadataDTO._keypair)
+        
+        
+        self.underTest.insertMetadata(newerMetadataDTO)
+        
+        #assert the the older has been replaced
+        testquery = "SELECT * FROM Metadata WHERE publisher_id=?" \
+            + " AND infohash=?;" 
+            
+        channel = bin2str(metadataDTO.channel)
+        infohash = bin2str(metadataDTO.infohash)
+        results = self.db.fetchall(testquery, (channel,infohash))
+        
+        self.assertTrue(len(results) == 1)
+        tupl = results[0] 
+        self.assertTrue(tupl[0] is not None and isinstance(tupl[0], int))
+        self.assertEquals(channel,tupl[1])
+        self.assertEquals(infohash,tupl[2])
+        self.assertEquals(newerMetadataDTO.description, tupl[3])
+        self.assertEquals(newerMetadataDTO.timestamp, tupl[4])
+        self.assertEquals(bin2str(newerMetadataDTO.signature), tupl[5])
+        
+        subtitlesQuery = "SELECT * FROM Subtitles WHERE metadata_id_fk=?;"
+        
+        subtitles = self.db.fetchall(subtitlesQuery, (tupl[0],))
+        self.assertEquals(3,len(subtitles))
+        
+        for lang in ("ita", "nld","eng"):
+            found = False
+            foundSub = None
+            for subtuple in subtitles:
+                if subtuple[1] == lang:
+                    found = True
+                    foundSub = subtuple
+                    break
+                
+            self.assertTrue(found)
+            self.assertEquals(bin2str(newerMetadataDTO.getSubtitle(lang).checksum), foundSub[3])
+    
+    def testUpdateExistingWithNewerSubsDeleted(self):
+        metadataDTO = MockMetadataDTO(["nld", "ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        newerMetadataDTO = MockMetadataDTO(["nld","eng"])
+        newerMetadataDTO.channel = metadataDTO.channel
+        newerMetadataDTO.infohash = metadataDTO.infohash
+        newerMetadataDTO._keypair = metadataDTO._keypair
+        newerMetadataDTO.timestamp = metadataDTO.timestamp +1 #newer 
+        newerMetadataDTO.sign(newerMetadataDTO._keypair)
+        
+        
+        self.underTest.insertMetadata(newerMetadataDTO)
+        
+        #assert the the older has been replaced
+        testquery = "SELECT * FROM Metadata WHERE publisher_id=?" \
+            + " AND infohash=?;" 
+        channel = bin2str(metadataDTO.channel)
+        infohash = bin2str(metadataDTO.infohash)
+        results = self.db.fetchall(testquery, (channel,infohash))
+        
+        self.assertTrue(len(results) == 1)
+        tupl = results[0] 
+        self.assertTrue(tupl[0] is not None and isinstance(tupl[0], int))
+        self.assertEquals(channel,tupl[1])
+        self.assertEquals(infohash,tupl[2])
+        self.assertEquals(newerMetadataDTO.description, tupl[3])
+        self.assertEquals(newerMetadataDTO.timestamp, tupl[4])
+        self.assertEquals(bin2str(newerMetadataDTO.signature), tupl[5])
+        
+        subtitlesQuery = "SELECT * FROM Subtitles WHERE metadata_id_fk=?;"
+        
+        subtitles = self.db.fetchall(subtitlesQuery, (tupl[0],))
+        self.assertEquals(2,len(subtitles))
+        
+        for lang in ("nld","eng"):
+            found = False
+            foundSub = None
+            for subtuple in subtitles:
+                if subtuple[1] == lang:
+                    found = True
+                    foundSub = subtuple
+                    break
+                
+            self.assertTrue(found)
+            self.assertEquals(bin2str(newerMetadataDTO.getSubtitle(lang).checksum), foundSub[3])
+    
+    def testGetAllMetadataForInfohashEmtpy(self):
+        metadataDTO = MockMetadataDTO(["nld", "ita"])
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        
+        otherinfohash = _generateFakeInfohash()
+        
+        results = self.underTest.getAllMetadataForInfohash(otherinfohash)
+        self.assertTrue(len(results)==0)
+    
+    def testGetAllMetadataForInfohashNotEmpty(self):
+        infohash = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["nld", "ita"],infohash)
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        #different channels since the channel is automatically
+        #generated by MockMetadata DTO
+        metadataDTO2 = MockMetadataDTO(["rus", "eng"],infohash)
+        metadataDTO2.sign(metadataDTO2._keypair)
+        self.underTest.insertMetadata(metadataDTO2)
+        
+        #a 3rd instance with different channel and infohash
+        metadataDTO3 = MockMetadataDTO(["rus", "spa", "jpn"])
+        metadataDTO3.sign(metadataDTO3._keypair)
+        self.underTest.insertMetadata(metadataDTO3)
+        
+        results = self.underTest.getAllMetadataForInfohash(infohash)
+        self.assertTrue(len(results)==2)
+        
+        #in checks for equality, not reference equality
+        self.assertTrue(metadataDTO1 in results)
+        self.assertTrue(metadataDTO2 in results)
+        self.assertFalse(metadataDTO3 in results)
+        
+        
+        
+    def testDeleteSubtitle(self):
+        infohash = _generateFakeInfohash()
+        metadataDTO = MockMetadataDTO(["eng","kor"], infohash)
+        
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        res = self.underTest.getAllSubtitles(metadataDTO.channel, infohash)
+        self.assertTrue("eng" in res and "kor" in res)
+        
+        #delete a subtitle that does not exist
+        self.underTest._deleteSubtitleByChannel(metadataDTO.channel, infohash, "ita")
+        res = self.underTest.getAllSubtitles(metadataDTO.channel, infohash)
+        self.assertTrue("eng" in res and "kor" in res)
+        
+        self.underTest._deleteSubtitleByChannel(metadataDTO.channel, infohash, "eng")
+        res = self.underTest.getAllSubtitles(metadataDTO.channel, infohash)
+        self.assertTrue("kor" in res and not "eng" in res)
+        
+        
+    def testSelectLocalSubtitles(self):
+        
+        infohash1 = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["eng","kor"], infohash1)
+           
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        res = self.underTest.getAllLocalSubtitles()
+        
+        self.assertTrue(len(res) == 0)
+        
+        infohash2 = _generateFakeInfohash()
+        metadataDTO2 = MockMetadataDTO(["nld","spa"], infohash2)
+        
+        metadataDTO2.getSubtitle("nld").path = "/bla/bla"
+        
+        metadataDTO2.sign(metadataDTO2._keypair)
+        self.underTest.insertMetadata(metadataDTO2)
+        
+        res = self.underTest.getAllLocalSubtitles()
+        
+        self.assertTrue(len(res) == 1)
+        
+        self.assertTrue(metadataDTO2.channel in res)
+        
+        self.assertTrue(infohash2 in res[metadataDTO2.channel])
+        self.assertEquals(1, len(res[metadataDTO2.channel][infohash2]))
+        
+        self.assertEquals(metadataDTO2.getSubtitle("nld"), res[metadataDTO2.channel][infohash2][0])
+        
+    def testSelectLocalSubtitles2(self):
+        infohash1 = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["eng","kor", "nld"], infohash1)
+        
+        metadataDTO1.getSubtitle("nld").path = "/bla/bla"
+        metadataDTO1.getSubtitle("eng").path = "/bla/bla"
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        infohash2 = _generateFakeInfohash()
+        metadataDTO2 = MockMetadataDTO(["ita","spa"], infohash2)
+        metadataDTO2.getSubtitle("ita").path = "/a/b"
+        metadataDTO2.getSubtitle("spa").path = "/c/d"
+        metadataDTO2.sign(metadataDTO2._keypair)
+        self.underTest.insertMetadata(metadataDTO2)
+        
+        
+        res = self.underTest.getLocalSubtitles(metadataDTO1.channel, infohash1)
+        self.assertEquals(2, len(res))
+
+        self.assertTrue("eng" in res)
+        self.assertEquals(metadataDTO1.getSubtitle("eng"), res["eng"])
+        
+        self.assertTrue("nld" in res)
+        self.assertEquals(metadataDTO1.getSubtitle("nld"), res["nld"])
+        
+        self.assertFalse("kor" in res)
+        
+    def testUpdateSubtitlesWithNonePathValue(self):
+        
+        
+        infohash1 = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["eng","kor"], infohash1)
+        
+        metadataDTO1.getSubtitle("eng").path = os.path.abspath(os.path.join("bla","bla"))
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        sub = self.underTest.getSubtitle(metadataDTO1.channel, infohash1, "eng")
+        self.assertEquals(os.path.abspath(os.path.join("bla","bla")), sub.path)
+        
+        self.underTest.updateSubtitlePath(metadataDTO1.channel, infohash1,
+                                          "eng", None, True)
+        
+        sub = self.underTest.getSubtitle(metadataDTO1.channel, infohash1, "eng")
+        self.assertEquals(None, sub.path)
+        
+        
+    def testUpdateSubtitles(self):
+        sub1path= os.path.join(RES_DIR,"fake0.srt")
+        sub2path=os.path.join(RES_DIR,"fake1.srt")
+        infohash = _generateFakeInfohash()
+        metadataDTO = MockMetadataDTO([], infohash)
+        sub1 = SubtitleInfo("ita", None, _computeSHA1(sub1path))
+        sub2 = SubtitleInfo("eng",None,_computeSHA1(sub2path))
+        
+        metadataDTO.addSubtitle(sub1)
+        metadataDTO.addSubtitle(sub2)
+        metadataDTO.sign(metadataDTO._keypair)
+        self.underTest.insertMetadata(metadataDTO)
+        
+        res1 = self.underTest.getSubtitle(metadataDTO.channel, infohash,"ita")
+        self.assertEquals(sub1,res1)
+        
+        res2 = self.underTest.getSubtitle(metadataDTO.channel, infohash, "eng")
+        self.assertEquals(sub2,res2)
+        
+        sub1bis = copy.copy(sub1)
+        sub1bis.path = sub1path
+        sub2bis = copy.copy(sub2)
+        sub2bis.path = sub2path
+        
+        self.underTest.updateSubtitlePath(metadataDTO.channel, infohash, 
+                                      sub1bis.lang, sub1bis.path, False)
+        self.underTest.updateSubtitlePath(metadataDTO.channel, infohash, 
+                                      sub2bis.lang, sub2bis.path , False)
+        
+        
+        self.underTest.commit()
+        
+        #still unchanged since I did not commit
+        res1 = self.underTest.getSubtitle(metadataDTO.channel, infohash,"ita")
+        self.assertTrue(sub1== res1 and sub1.path != res1.path)
+        self.assertTrue(sub1bis == res1 and sub1bis.path == res1.path)
+        
+        res2 = self.underTest.getSubtitle(metadataDTO.channel, infohash, "eng")
+        self.assertTrue(sub2 == res2 and sub2.path != res2.path)
+        self.assertTrue(sub2bis == res2 and sub2bis.path == res2.path)
+        
+        
+    # 30-05-2010 Testing of the new added table (SubtitlesHave) manipulation
+    # methods.
+    
+    def testInsertAndGetHaveMask(self):
+       
+        
+        infohash = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["nld","spa"], infohash)
+        channel = metadataDTO1.channel
+        
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        peer_id = TestMetadataDBHandler.anotherPermId
+        
+        #inserting a negative mask has to be refused
+        havemask = -1
+        funcToTest =\
+            lambda : self.underTest.insertHaveMask(channel, infohash, 
+                                                   peer_id, havemask)
+        
+        self.assertRaises(MetadataDBException, funcToTest)
+        
+        #also a bitmask must be smaller then 2**32
+        havemask = 2**32
+        
+        funcToTest =\
+            lambda : self.underTest.insertHaveMask(channel, infohash, 
+                                                   peer_id, havemask)
+        
+        self.assertRaises(MetadataDBException, funcToTest)
+        
+        
+        #now it's time for a correct value
+        havemask1=0x80000001
+        self.underTest.insertHaveMask(channel, infohash, peer_id, havemask1)
+        
+        mask = self.underTest.getHaveMask(channel, infohash,peer_id)
+        self.assertEqual(mask,havemask1)
+        
+        #duplicate insertions should raise an error
+        havemask2=0xffffffff
+        funcToTest = \
+           lambda : self.underTest.insertHaveMask(channel, infohash, 
+                                                  peer_id, havemask2)
+        
+        self.assertRaises(MetadataDBException, funcToTest)
+        
+        #insertion for another peer should go fine
+        self.underTest.insertHaveMask(channel, infohash, channel, havemask2)
+        
+        mask1 = self.underTest.getHaveMask(channel, infohash,peer_id)
+        self.assertEqual(mask1,havemask1)
+        mask2 = self.underTest.getHaveMask(channel, infohash,channel)
+        self.assertEqual(mask2,havemask2)
+        
+        #getting an have mask for an unexistent channel, infohash shall
+        #return None
+        mask1 = \
+            self.underTest.getHaveMask(channel, _generateFakeInfohash(),peer_id)
+        self.assertTrue(mask1 is None)
+        
+        #as it should happen for asking for an unexisting peer_id
+        mask1 = self.underTest.getHaveMask(channel, infohash,
+                                           TestMetadataDBHandler.aPermId)
+        self.assertTrue(mask1 is None)
+        
+    def testUpdateHaveMask(self):
+        infohash = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["nld","spa"], infohash)
+        channel = metadataDTO1.channel
+        
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        peer_id = TestMetadataDBHandler.anotherPermId
+        
+        
+        #adding an have mask to the db
+        havemask1=0x80000001
+        self.underTest.insertHaveMask(channel, infohash, peer_id, havemask1)
+        
+        mask = self.underTest.getHaveMask(channel, infohash,peer_id)
+        self.assertEqual(mask,havemask1)
+        
+        #updating it to a different value
+        new_havemask = 0x1111ffff
+        self.underTest.updateHaveMask(channel, infohash, peer_id, 
+                                      new_havemask)
+        mask = self.underTest.getHaveMask(channel, infohash,peer_id)
+        self.assertEqual(mask,new_havemask)
+        
+        #trying to update a non existing row should cause an error
+        # -- currently this doesn't happen
+        # implementing this beahaviour would slow down the db
+        #funcToTest = \
+        #    lambda: self.underTest.updateHaveMask(channel, infohash, 
+        #                                         channel, new_havemask)
+        # self.assertRaises(MetadataDBException, funcToTest)
+    
+    
+    def testDeleteHaveEntry(self):
+        infohash = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["nld","spa"], infohash)
+        channel = metadataDTO1.channel
+        
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        peer_id = TestMetadataDBHandler.anotherPermId
+        
+        
+        #adding an have mask to the db
+        havemask1=0x80000001
+        self.underTest.insertHaveMask(channel, infohash, peer_id, havemask1)
+        
+        havemask2=0x02324123
+        self.underTest.insertHaveMask(channel, infohash, channel, havemask2)
+        
+        self.underTest.deleteHaveEntry(channel, infohash, peer_id)
+        
+        mask = self.underTest.getHaveMask(channel, infohash, peer_id)
+        self.assertTrue(mask is None)
+        
+        mask = self.underTest.getHaveMask(channel, infohash, channel)
+        self.assertEquals(havemask2,mask)
+        
+        # deleting an entry that does not exist should leave
+        # the db unchanged
+        self.underTest.deleteHaveEntry(channel, infohash, peer_id)
+        
+        mask = self.underTest.getHaveMask(channel, infohash, channel)
+        self.assertEquals(havemask2,mask)
+        
+    
+    def testGetAllHaveEntries(self):
+        
+        infohash = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["nld","spa"], infohash)
+        channel = metadataDTO1.channel
+        
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        peer_id = TestMetadataDBHandler.anotherPermId
+        
+        
+        #adding an have mask to the db
+        havemask1=0x80000001
+        self.underTest.insertHaveMask(channel, infohash, peer_id, havemask1)
+        
+        time.sleep(1) # otherwise they would have the same timestamp
+        havemask2=0x02324123
+        self.underTest.insertHaveMask(channel, infohash, channel, havemask2)
+        
+        d = self.underTest.getHaveEntries(channel, infohash)
+        
+        #the second inserted havemask has to be returned first
+        # since it is newer
+        firstTuple = d[0]
+        self.assertEquals(channel, firstTuple[0])
+        self.assertEquals(havemask2,firstTuple[1])
+        self.assertTrue(firstTuple[2] is not None)
+        
+        self.assertEquals(peer_id, d[1][0])
+        self.assertEquals(havemask1,d[1][1])
+        self.assertTrue(d[1][2] is not None)
+        
+        
+    
+    def testCleanUpAllHave(self):
+        infohash1 = _generateFakeInfohash()
+        metadataDTO1 = MockMetadataDTO(["nld","spa"], infohash1)
+        channel1 = metadataDTO1.channel
+        
+        metadataDTO1.sign(metadataDTO1._keypair)
+        self.underTest.insertMetadata(metadataDTO1)
+        
+        infohash2 = _generateFakeInfohash()
+        metadataDTO2 = MockMetadataDTO(["nld","spa"], infohash2)
+        channel2 = metadataDTO2.channel
+        
+        metadataDTO2.sign(metadataDTO2._keypair)
+        self.underTest.insertMetadata(metadataDTO2)
+        
+        
+        peer_id1 = TestMetadataDBHandler.anotherPermId
+        peer_id2 = TestMetadataDBHandler.aPermId
+        
+        #inserting some data: 4 have maskes for each of the two channels with
+        # custom timestamps
+        # older then 1275295300
+        self.underTest.insertHaveMask(channel1, infohash1, channel1, 0x42, 1275295290)
+        self.underTest.insertHaveMask(channel1, infohash1, peer_id1, 0x42, 1275295291)
+        # newer then 1275295300
+        self.underTest.insertHaveMask(channel1, infohash1, peer_id2, 0x42, 1275295300)
+        self.underTest.insertHaveMask(channel1, infohash1, channel2, 0x42, 1275295301)
+        
+        
+        # older then 1275295300
+        self.underTest.insertHaveMask(channel2, infohash2, channel1, 0x42, 1275295290)
+        self.underTest.insertHaveMask(channel2, infohash2, peer_id1, 0x42, 1275295291)
+        
+        # newer then 1275295300
+        self.underTest.insertHaveMask(channel2, infohash2, peer_id2, 0x42, 1275295300)
+        self.underTest.insertHaveMask(channel2, infohash2, channel2, 0x42, 1275295301)
+        
+        self.underTest.cleanupOldHave(1275295300)
+        haveForEntry1 = self.underTest.getHaveEntries(channel1, infohash1)
+        expectedList1 = [(channel2,0x42,1275295301), (peer_id2, 0x42, 1275295300), 
+                         (channel1, 0x42, 1275295290)]
+        self.assertEquals(expectedList1, haveForEntry1)
+        
+        haveForEntry2 = self.underTest.getHaveEntries(channel2, infohash2)
+        expectedList2 = [(channel2, 0x42, 1275295301),(peer_id2, 0x42, 1275295300)]
+        self.assertEquals(expectedList2,haveForEntry2)
+        
+        
+        
+        
+    
+    
+def _generateFakeInfohash():
+    hasher = hashlib.sha1()
+    hasher.update(str(random.randint(0,65535)))
+    return hasher.digest()
+
+def _computeSHA1(path):
+    hasher = hashlib.sha1()
+    with codecs.open(path, "rb", "utf-8") as file:
+        contents = file.read()
+    
+    hasher.update(contents)
+    return hasher.digest()
+        
+        
+
+
+class MockMetadataDTO(MetadataDTO):
+  
+    
+    def __init__(self, availableLangs, infohash = None):
+        
+        self._keypair = generate_keypair()
+        
+        self._permId = str(self._keypair.pub().get_der())
+        
+        if infohash == None :
+            hasher = hashlib.sha1()
+            hasher.update(self._permId + "a")
+            infohash = hasher.digest()
+        
+        self.channel = self._permId
+        self.infohash = infohash
+        self.description = u""
+        self.resetTimestamp()
+        self._subtitles = {}
+        
+        hasher = hashlib.sha1() #fake checksums for subs
+        
+        for lang in availableLangs:
+            hasher.update(lang + "123")
+            checksum = hasher.digest()
+            self.addSubtitle(SubtitleInfo(lang, None, checksum))
+            
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(TestMetadataDBHandler)
+            
+        
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.testInitHandler']
+    unittest.main()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/__init__.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/__init__.py
new file mode 100644 (file)
index 0000000..284cc10
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_Langugages.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_Langugages.py
new file mode 100644 (file)
index 0000000..e169f3a
--- /dev/null
@@ -0,0 +1,109 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+import unittest
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import *
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages \
+import _loadLanguages
+import os.path
+
+RES_DIR = os.path.join('..','..','..','subtitles_test_res')
+
+
+class LanguagesTest(unittest.TestCase):
+
+    #32 correct language mappings
+    PATH_TO_TEST_LANGUAGES = 'subs_languages.csv'
+    #one description is missing in the csv file
+    PATH_TO_TEST_LANGUAGES_WRONG1 = 'wrong_subs_languages.1.csv'
+    #one character code is invalid in the csv file
+    PATH_TO_TEST_LANGUAGES_WRONG2 = "wrong_subs_languages.2.csv"
+
+    def test_loadLangugas(self):
+        listOfLanguages = _loadLanguages(os.path.join(RES_DIR, LanguagesTest.PATH_TO_TEST_LANGUAGES))
+        self.assertTrue(len(listOfLanguages) == 32)
+        for key, val in listOfLanguages.iteritems():
+            self.assertTrue(len(key) == 3)
+            self.assertTrue(val is not None)
+        
+        self.assertRaises(ValueError,_loadLanguages, \
+            os.path.join(RES_DIR, LanguagesTest.PATH_TO_TEST_LANGUAGES_WRONG1))
+        
+        self.assertRaises(ValueError,_loadLanguages, \
+            os.path.join(RES_DIR, LanguagesTest.PATH_TO_TEST_LANGUAGES_WRONG2))
+        
+    def testLanguageInstance(self):
+        langInstance = Languages()
+        self.assertTrue(len(langInstance.supportedLanguages) == 32)
+        self.assertTrue(len(langInstance.langMappings) == 32)
+        #check if the mappings are all distinct values
+        bitmasksSet = set(langInstance.langMappings.values())
+        self.assertTrue(len(bitmasksSet) == 32)
+        
+    def testCorrectMapping(self):
+        langInstance = Languages()
+        #checking if they are distinct is not sufficient since  they
+        #can be assigned wrong values
+        fullMask = 0
+        for mask in langInstance.langMappings.values():
+            fullMask = fullMask ^ mask
+        
+        #since I know there are 32 different language masks
+        self.assertEqual(0xFFFFFFFF, fullMask)
+        
+    def testLangCodesToMask(self):
+        langInstance = Languages()
+         
+        codes = ["eng", "nld", "ita"]
+        # eng is 0x40
+        # nld is 0x80000
+        # ita is 0x2000
+        mask = langInstance.langCodesToMask(codes)
+        self.assertEquals((0x40 | 0x80000 | 0x2000), mask)
+    
+    def testLangCodesToMaskEmpty(self):
+        langInstance = Languages()
+        codes = []
+        mask = langInstance.langCodesToMask(codes)
+        self.assertEquals(0,mask)
+
+        
+        
+    def testInvalidLangCodesToMask(self):
+        langInstance = Languages()
+        
+        #gne is an invalid language code 
+        codes = ["eng", "nld", "gne"]
+        
+        self.assertRaises(ValueError, langInstance.langCodesToMask, codes)
+    
+    def testMaskToLangCodes(self):
+        langInstance = Languages()
+      
+        eng, nld, ita = 0x40, 0x80000, 0x2000
+        mask = eng | nld | ita
+        
+        codes = langInstance.maskToLangCodes(mask)
+        
+        self.assertEquals(set(codes), set(["eng","nld","ita"]))
+        
+        remask = 0
+        for code in codes:
+            remask = remask | langInstance.langMappings[code]
+        
+        self.assertEquals(mask, remask)
+        
+    def testMaskToLangCodesLongerMask(self):
+        langInstance = Languages()
+        mask = 0x1FFFFFFFF #36 bits!
+        
+        self.assertRaises(AssertionError, langInstance.maskToLangCodes,(mask,))
+        
+
+
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(LanguagesTest)
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.test_loadLangugas']
+    unittest.main()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_MetadataDTO.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_MetadataDTO.py
new file mode 100644 (file)
index 0000000..ed086f9
--- /dev/null
@@ -0,0 +1,192 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+import unittest
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+import BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO as MDUtil
+from BaseLib.Core.Overlay.permid import generate_keypair
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str
+import time
+from BaseLib.Core.BitTornado.bencode import bdecode
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import LanguagesProvider
+from BaseLib.Core.Utilities.utilities import uintToBinaryString,\
+    binaryStringToUint, str2bin
+import os.path
+
+RES_DIR = os.path.join('..','..','..','subtitles_test_res')
+
+test_keypair = generate_keypair()
+test_perm_id = str(test_keypair.pub().get_der())
+
+
+class TestMetadataDTO(unittest.TestCase):
+
+    def setUp(self):
+        self._srtSubs = {"eng": os.path.join(RES_DIR, "fake.srt"),"ita": os.path.join(RES_DIR,"fake1.srt"), "rus" : os.path.join(RES_DIR, "fake2.srt")}
+    def testMetadataDTOInit(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        self.assertFalse(dto is None)
+        self.assertEqual(test_perm_id,dto.channel)
+        self.assertEquals(badInfohash,dto.infohash)
+        current = time.time()
+        self.assertTrue(current -1 <= int(dto.timestamp) <= current)
+        self.assertEquals("",dto.description)
+        self.assertEquals({}, dto._subtitles)
+        self.assertTrue(dto.signature is None)
+        
+    def test_packData(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        dto.description = u"Sample Description\u041f"
+        
+        bla = dto._packData()
+        decoded = bdecode(bla)
+        
+        self.assertTrue(len(decoded) == 6)
+        decodedChannelId = decoded[0]
+        decodedInfohash = decoded[1]
+        decodedDescription = decoded[2].decode("utf-8")
+        decodedTimestamp = decoded[3]
+        bin_decodedBitmask = decoded[4]
+        decodedBitmask = binaryStringToUint(bin_decodedBitmask)
+        self.assertEquals(dto.channel, decodedChannelId)
+        self.assertEquals(dto.infohash, decodedInfohash)
+        self.assertEquals(dto.description,decodedDescription)
+        self.assertAlmostEquals(dto.timestamp,decodedTimestamp)
+        self.assertEquals(0,decodedBitmask)
+        self.assertEquals(0,len(decoded[5]))
+        
+    def test_packDataWithSubs(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        
+        subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
+        
+        for sub in subtitles :
+            sub.computeChecksum()
+            dto.addSubtitle(sub)
+        
+        packed = dto._packData()
+        decoded = bdecode(packed)
+        
+        self.assertTrue(len(decoded) == 6)
+        decodedChannelId = decoded[0]
+        decodedInfohash = decoded[1]
+        decodedDescription = decoded[2]
+        decodedTimestamp = decoded[3]
+        decodedBitmask = decoded[4]
+        checksums = decoded[5]
+        
+        expectedMask = \
+          LanguagesProvider.getLanguagesInstance().langCodesToMask(self._srtSubs.keys())
+          
+        binaryExpexted = uintToBinaryString(expectedMask)
+        
+        self.assertEquals(dto.channel, decodedChannelId)
+        self.assertEquals(dto.infohash, decodedInfohash)
+        self.assertEquals(dto.description,decodedDescription)
+        self.assertAlmostEquals(dto.timestamp,decodedTimestamp)
+        self.assertEquals(binaryExpexted,decodedBitmask)
+        self.assertEquals(3,len(checksums))
+        
+        subs = dto.getAllSubtitles()
+        i=0
+        for key in sorted(subs.iterkeys()):
+            self.assertEquals(subs[key].checksum, checksums[i])
+            i += 1
+            
+
+    
+    def testSignature(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        
+        dto.sign(test_keypair)
+        self.assertTrue(dto.verifySignature())
+        dto.timestamp = 2
+        ok = dto.verifySignature()
+        self.assertFalse(ok)
+    
+    def testSignatureOnChecksums(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        
+        subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
+        
+        for sub in subtitles :
+            sub.computeChecksum()
+            dto.addSubtitle(sub)
+        
+        
+        dto.sign(test_keypair)
+        self.assertTrue(dto.verifySignature())
+        
+        dto.getSubtitle("rus").checksum = "ABCDEFGHILMOPQRS"
+        
+        self.assertFalse(dto.verifySignature())
+    
+    def testSerialize(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        dto.description = u"Sample Description"
+        dto.sign(test_keypair)
+        
+        serialized = dto.serialize()
+        self.assertEquals(7, len(serialized))
+        signature = serialized[6]
+        self.assertEquals(dto.signature,signature)
+        #the rest is tested with test_packData
+    
+    def testSerializeWithSubs(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        
+        subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
+        
+        for sub in subtitles :
+            sub.computeChecksum()
+            dto.addSubtitle(sub)
+        dto.sign(test_keypair)
+        
+        serial = dto.serialize()
+        decoded = serial
+        self.assertEquals(7, len(decoded))
+        signature = decoded[6]
+        self.assertEquals(dto.signature,signature)
+        #the rest is tested with test_packDataWithSubs
+        
+        
+    def testDesrialize(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        dto.description = u"Sample Description"
+        dto.sign(test_keypair)
+        
+        serialized = dto.serialize()
+        newDto = MDUtil.deserialize(serialized)
+        self.assertEquals(dto,newDto)
+        
+    def testDeserializeWithSubs(self):
+        badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
+        dto = MetadataDTO(test_perm_id, badInfohash)
+        
+        subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
+        
+        for sub in subtitles :
+            sub.computeChecksum()
+            dto.addSubtitle(sub)
+        dto.sign(test_keypair)
+        
+        serial = dto.serialize()
+        newDto = MDUtil.deserialize(serial)
+        self.assertEquals(dto,newDto)
+        
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(TestMetadataDTO)
+
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.testMetadataDTOInit']
+    unittest.main()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_Subtitle.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/MetadataDomainObjects/test_Subtitle.py
new file mode 100644 (file)
index 0000000..0427be1
--- /dev/null
@@ -0,0 +1,56 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+import unittest
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+import codecs
+import hashlib
+import os
+
+RES_DIR = os.path.join('..','..','..','subtitles_test_res')
+
+PATH_TO_SRT = "fake.srt"
+
+class SubtitlesTest(unittest.TestCase):
+
+
+    def testInitialization(self):
+        sub = SubtitleInfo("eng","fakepath")
+        self.assertFalse(sub is None)
+        self.assertFalse(sub.subtitleExists())
+        self.assertRaises(AssertionError, sub.computeChecksum)
+        
+    def testChecksums(self):
+        path = os.path.join(RES_DIR,PATH_TO_SRT)
+        sub = SubtitleInfo("ita",path)
+        #I know from the outside the the correct sha1 is
+        # eb8ada2a2094675ea047c27207e449fbfce04e85
+        sha1Hasher = hashlib.sha1()
+        with codecs.open(path, "rb", "utf-8") as subFile:
+            sha1Hasher.update(subFile.read())
+        expectedChecksum = sha1Hasher.digest()
+        
+        
+        sub.computeChecksum()
+        
+        self.assertEquals(expectedChecksum,
+                          sub.checksum)
+        
+        self.assertTrue(sub.verifyChecksum())
+    
+    def testSubsExists(self):
+        path = os.path.join(RES_DIR,PATH_TO_SRT)
+        sub = SubtitleInfo("rus","fakepath")
+        self.assertFalse(sub.subtitleExists())
+        sub.path = os.path.abspath(path)
+        self.assertTrue(sub.subtitleExists())
+        
+    
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(SubtitlesTest) 
+
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.testInitialization']
+    unittest.main()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/__init__.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/__init__.py
new file mode 100644 (file)
index 0000000..bdef2ba
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/test_DiskManager.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/test_DiskManager.py
new file mode 100644 (file)
index 0000000..c6276c9
--- /dev/null
@@ -0,0 +1,239 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+import unittest
+from BaseLib.Core.Subtitles.SubtitleHandler.DiskManager import DiskManager, DISK_FULL_DELETE_SOME, DELETE_OLDEST_FIRST, DELETE_NEWEST_FIRST,\
+    DISK_FULL_REJECT_WRITES
+import os
+import codecs
+from BaseLib.Core import osutils
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import DiskManagerException
+
+RES_DIR = os.path.join('..','..','..','subtitles_test_res')
+TEST_DIR1 = "test1"
+TEST_DIR2 = "test2"
+
+class TestDiskManager(unittest.TestCase):
+
+    def setUp(self):
+        self.undertest = DiskManager()
+        self.path1 = os.path.abspath(os.path.join(RES_DIR,TEST_DIR1))
+        if not os.path.isdir(self.path1):
+            os.mkdir(self.path1)
+        
+        self.path2 = os.path.abspath(os.path.join(RES_DIR,TEST_DIR2))
+        if not os.path.isdir(self.path2):
+            os.mkdir(self.path2)
+    
+    def tearDown(self):
+        if os.path.isdir(self.path1):
+            for xfile in os.listdir(self.path1):
+                os.remove(os.path.join(self.path1,xfile))
+        if os.path.isdir(self.path2):
+            for xfile in os.listdir(self.path2):
+                os.remove(os.path.join(self.path2,xfile))
+        
+
+    def testDiskManagerOneClientEasy(self):
+        
+        self.undertest.registerDir(self.path1) #using default config
+        content1 = "Some Content\n\taaa\n"
+        res =self.undertest.writeContent(self.path1, "test1.txt", content1)
+        self.assertTrue(res)
+        expectedPath =os.path.join(self.path1,"test1.txt")
+        self.assertTrue(os.path.isfile(expectedPath))
+        
+        with codecs.open(expectedPath, "rb", "utf-8") as toRead:
+            readContent = toRead.read()
+        
+        self.assertEquals(content1,readContent)
+        
+        cont = self.undertest.readContent(self.path1, "test1.txt")
+        self.assertTrue(isinstance(cont, basestring))
+        self.assertEquals(content1,cont)
+    
+    def testMinAvailableSpaceReject(self):
+        totalFreeSpace = osutils.getfreespace(self.path1) / 1024.0
+        minAvailableSpace = totalFreeSpace - 16.0 #enough space for the first content
+        self.undertest = DiskManager(minAvailableSpace, RES_DIR)
+        self.undertest.registerDir(self.path1) #using default config
+        content1 = "Some Content\n\taaa\n"
+        
+        self.undertest.writeContent(self.path1, "test1.txt", content1)
+        
+        expectedPath =os.path.join(self.path1,"test1.txt")
+        self.assertTrue(os.path.isfile(expectedPath))
+        
+        with codecs.open(expectedPath, "rb", "utf-8") as toRead:
+            readContent = toRead.read()
+        
+        self.assertEquals(content1,readContent)
+        
+        acc = []
+        for i in range(10*2**10):
+            acc.append("aaaaaaaaaa") 
+        content2 = "".join(acc) #a 100K string
+        #the next write should be rejected
+        self.assertRaises(DiskManagerException, self.undertest.writeContent,
+                          self.path1, "test2.txt", content2)
+        unexpectedPath =os.path.join(self.path1,"test2.txt")
+        self.assertFalse(os.path.exists(unexpectedPath))
+    
+    def testMinAvailableSpaceDeleteOldest(self):
+        
+        acc = []
+        for i in range(30*2**10):
+            acc.append("aaaaaaaaaa") 
+        content1 = "".join(acc) #a 300K string
+        
+        totalFreeSpace = osutils.getfreespace(self.path1) / 1024.0
+        minAvailableSpace = totalFreeSpace - 316 #enough space for the first content
+        self.undertest = DiskManager(minAvailableSpace, RES_DIR)
+        
+        config = {"maxDiskUsage" : -1, 
+                  "diskPolicy" : DISK_FULL_DELETE_SOME | DELETE_OLDEST_FIRST,
+                  "encoding" : "utf-8"}
+        
+        self.undertest.registerDir(self.path1,config)
+        
+        self.undertest.writeContent(self.path1, "test1.txt", content1)
+        
+        expectedPath =os.path.join(self.path1,"test1.txt")
+        self.assertTrue(os.path.isfile(expectedPath))
+
+
+        content2 = "".join(acc[0:(6*2**10)-1]) #60KB string
+        #the next write should be rejected
+        res = self.undertest.writeContent(self.path1, "test2.txt", content2)
+        self.assertTrue(res)
+        unexpectedPath =os.path.join(self.path1,"test1.txt")
+        self.assertFalse(os.path.exists(unexpectedPath))
+        expectedPath = os.path.join(self.path1,"test2.txt")
+        self.assertTrue(os.path.isfile(expectedPath))
+        
+    def testMinAvailableSpaceDeleteOldest2(self):
+        '''
+        Warning this method is really system dipendent, and not fully deterministic.
+        Bad test method, at all
+        '''
+#        acc = []
+#        for i in range(30*2**10):
+#            acc.append("aaaaaaaaaa") 
+#        content1 = "".join(acc) #a 300K string
+#        
+#        totalFreeSpace = osutils.getfreespace(self.path1) / 1024.0
+#        minAvailableSpace = totalFreeSpace - 608 #enough space for two 300 K contents
+#        self.undertest = DiskManager(minAvailableSpace, BASE_DIR)
+#        
+#        config = {"maxDiskUsage" : -1, 
+#                  "diskPolicy" : DISK_FULL_DELETE_SOME | DELETE_OLDEST_FIRST,
+#                  "encoding" : "utf-8"}
+#        
+#        self.undertest.registerDir(self.path1,config)
+#        
+#        self.undertest.writeContent(self.path1, "test1.txt", content1)
+#        
+#        
+#        expectedPath =os.path.join(self.path1,"test1.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+#        
+#        self.undertest.writeContent(self.path1, "test2.txt", content1)
+#        
+#        
+#        expectedPath =os.path.join(self.path1,"test2.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+#
+#
+#        content2 = "".join(acc[0:(6*2**10)-1]) #60KB string
+#        #the next write should be rejected
+#        res = self.undertest.writeContent(self.path1, "test3.txt", content2)
+#        self.assertTrue(res)
+#        unexpectedPath =os.path.join(self.path1,"test1.txt")
+#        self.assertFalse(os.path.exists(unexpectedPath))
+#        expectedPath = os.path.join(self.path1,"test2.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+#        expectedPath = os.path.join(self.path1,"test3.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+        pass
+        
+    def testMinAvailableSpaceDeleteNewest(self):
+        '''
+        Warning this method is really system dipendent, and not fully deterministic.
+        Bad test method, at all
+        '''
+#        acc = []
+#        for i in range(30*2**10):
+#            acc.append("aaaaaaaaaa") 
+#        content1 = "".join(acc) #a 300K string
+#        
+#        totalFreeSpace = osutils.getfreespace(self.path1) / 1024.0
+#        minAvailableSpace = totalFreeSpace - 608 #enough space for two 300 K contents
+#        self.undertest = DiskManager(minAvailableSpace, BASE_DIR)
+#        
+#        config = {"maxDiskUsage" : -1, 
+#                  "diskPolicy" : DISK_FULL_DELETE_SOME | DELETE_NEWEST_FIRST,
+#                  "encoding" : "utf-8"}
+#        
+#        self.undertest.registerDir(self.path1,config)
+#        
+#        self.undertest.writeContent(self.path1, "test1.txt", content1)
+#        
+#        
+#        expectedPath =os.path.join(self.path1,"test1.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+#        
+#        self.undertest.writeContent(self.path1, "test2.txt", content1)
+#        
+#        
+#        expectedPath =os.path.join(self.path1,"test2.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+#
+#
+#        content2 = "".join(acc[0:(6*2**10)-1]) #60KB string
+#        #the next write should be rejected
+#        res = self.undertest.writeContent(self.path1, "test3.txt", content2)
+#        self.assertTrue(res)
+#        unexpectedPath =os.path.join(self.path1,"test2.txt")
+#        self.assertFalse(os.path.exists(unexpectedPath))
+#        expectedPath = os.path.join(self.path1,"test1.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+#        expectedPath = os.path.join(self.path1,"test3.txt")
+#        self.assertTrue(os.path.isfile(expectedPath))
+        pass
+    
+    def testMaxOccupiedSpaceRejectWrites(self):
+        acc = []
+        for i in range(30*2**10):
+            acc.append("aaaaaaaaaa") 
+        content1 = "".join(acc) #a 300K string
+        
+        self.undertest = DiskManager(0, RES_DIR)
+        
+        config = {"maxDiskUsage" : 320, #sufficient only for the first write 
+                  "diskPolicy" : DISK_FULL_REJECT_WRITES,
+                  "encoding" : "utf-8"}
+        
+        self.undertest.registerDir(self.path1,config)
+        
+        self.undertest.writeContent(self.path1, "test1.txt", content1)
+        
+        
+        expectedPath =os.path.join(self.path1,"test1.txt")
+        self.assertTrue(os.path.isfile(expectedPath))
+        
+        content2 = "".join(acc[0:(6*2**10)-1]) #60KB string
+        #the next write should be rejected
+        self.assertRaises(DiskManagerException,self.undertest.writeContent,
+                          self.path1, "test2.txt", content2)
+
+        unexpectedPath =os.path.join(self.path1,"test2.txt")
+        self.assertFalse(os.path.exists(unexpectedPath))
+
+
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(TestDiskManager)
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.testName']
+    unittest.main()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/test_SubtitleMsgHandlerIsolation.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/SubtitleHandler/test_SubtitleMsgHandlerIsolation.py
new file mode 100644 (file)
index 0000000..f2889b6
--- /dev/null
@@ -0,0 +1,385 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+import unittest
+import logging
+import time
+from BaseLib.Core.Subtitles.SubtitleHandler.SubsMessageHandler import SubsMessageHandler
+from BaseLib.Test.Core.Subtitles.simple_mocks import MockOverlayBridge,\
+    MockTokenBucket, MockMsgListener
+from BaseLib.Core.Overlay.permid import generate_keypair
+from BaseLib.Core.Utilities.Crypto import sha
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import LanguagesProvider
+from BaseLib.Core.BitTornado.BT1.MessageID import GET_SUBS, SUBS
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Utilities.utilities import uintToBinaryString
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FOURTEENTH
+
+
+logging.basicConfig(level=logging.DEBUG)
+_keypairs = (generate_keypair(), generate_keypair(), generate_keypair())
+testChannelId = str(_keypairs[0].pub().get_der())
+testDestPermId = str(_keypairs[1].pub().get_der())
+testMyPermId = str(_keypairs[2].pub().get_der())
+
+testInfohash = sha("yoman!").digest()
+
+class TestSubtitlesMsgHandlerIsolation(unittest.TestCase):
+    
+    def setUp(self):
+        self.ol_bridge = MockOverlayBridge()
+        self.tokenBucket = MockTokenBucket()
+        self.underTest = SubsMessageHandler(self.ol_bridge,self.tokenBucket,1000000)
+        
+    def test_addToRequestedSubtitles(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        bitmask1 = langUtil.langCodesToMask(["nld"])
+        
+        self.underTest._addToRequestedSubtitles(testChannelId,
+                                                testInfohash, bitmask1)
+        
+        key = "".join((testChannelId, testInfohash))
+        self.assertEquals(bitmask1,
+                          self.underTest.requestedSubtitles[
+                                                            key
+                                                            ].cumulativeBitmask)
+        
+        bitmask2 = langUtil.langCodesToMask(["jpn", "ita"])
+        self.underTest._addToRequestedSubtitles(testChannelId,
+                                                testInfohash, bitmask2)
+        
+        self.assertEquals(bitmask1 | bitmask2,
+                          self.underTest.requestedSubtitles[
+                                                            key
+                                                            ].cumulativeBitmask)
+        
+        removeBitmask = langUtil.langCodesToMask(["nld", "ita"])
+        self.underTest._removeFromRequestedSubtitles(testChannelId,
+                                                       testInfohash,
+                                                       removeBitmask)
+        
+        codes = langUtil.maskToLangCodes(self.underTest.requestedSubtitles[
+                                                            key
+                                                            ].cumulativeBitmask)
+        
+        self.assertEquals(["jpn"], codes)
+        
+    def testSendSubtitlesRequestConnected(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        request = {}
+        request['channel_id'] = testChannelId
+        request['infohash'] = testInfohash
+        request['languages'] = ["kor"]
+        self.underTest.sendSubtitleRequest(testDestPermId, request, None, None, OLPROTO_VER_FOURTEENTH)
+        
+        self.assertEquals(0, self.ol_bridge.connect_count) #selversion was 1
+        self.assertEquals(1, self.ol_bridge.send_count) #send called one time 
+        
+        binaryBitmask = uintToBinaryString(langUtil.langCodesToMask(["kor"]))
+        expectedMsg = GET_SUBS + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              binaryBitmask
+                              ))
+        passedParameters = self.ol_bridge.sendParametersHistory[0]
+        self.assertEquals(testDestPermId, passedParameters[0])
+        self.assertEquals(expectedMsg, passedParameters[1])
+        
+    def testSendSubtitlesRequestNotConnected(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        
+        request = {}
+        request['channel_id'] = testChannelId
+        request['infohash'] = testInfohash
+        request['languages'] = ["kor"]
+        
+        self.underTest.sendSubtitleRequest(testDestPermId, request)
+        
+        self.assertEquals(1, self.ol_bridge.connect_count) #selversion was -1
+
+        self.assertEquals(1, self.ol_bridge.send_count) #send called one time 
+        
+        binaryBitmask = uintToBinaryString(langUtil.langCodesToMask(["kor"]))
+        expectedMsg = GET_SUBS + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              binaryBitmask
+                              ))
+        passedParameters = self.ol_bridge.sendParametersHistory[0]
+        self.assertEquals(testDestPermId, passedParameters[0])
+        self.assertEquals(expectedMsg, passedParameters[1])
+        
+        
+    def test_decodeGETSUBSMessage(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        binaryBitmask = uintToBinaryString(langUtil.langCodesToMask(["kor", "spa"]))
+        
+        bencodedMessage = GET_SUBS + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              binaryBitmask
+                              ))
+        channel_id, infohash, languages = \
+            self.underTest._decodeGETSUBSMessage(bencodedMessage)
+            
+        self.assertEquals(testChannelId, channel_id)
+        self.assertEquals(testInfohash, infohash)
+        self.assertEquals(["kor", "spa"], languages)
+        
+    def test_decodeGETSUBSMessageInvalid(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        
+        binaryBitmask = uintToBinaryString(langUtil.langCodesToMask(["kor", "spa"]))
+        invalidTypeMsg = chr(25) + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              binaryBitmask
+                              ))
+        
+        self.assertRaises(AssertionError, self.underTest._decodeGETSUBSMessage,
+                           (invalidTypeMsg,))
+        
+        invalidMsgField = GET_SUBS + \
+                         bencode((
+                              42,
+                              testChannelId,
+                              testInfohash,
+                              binaryBitmask
+                              ))
+              
+        decoded = \
+            self.underTest._decodeGETSUBSMessage(invalidMsgField)
+        #when something in the body is wrong returns None
+        self.assertTrue(decoded is None)
+        
+        invalidBitamsk = uintToBinaryString(0xFFFFFFFF11, 5)
+        invalidMsgField = GET_SUBS + \
+                         bencode((
+                              testChannelId,
+                              testInfohash,
+                              invalidBitamsk #40 bit bitmask!)
+                              ))
+        
+        decoded = \
+            self.underTest._decodeGETSUBSMessage(invalidMsgField)
+            
+        #when something in the body is wrong returns None
+        self.assertTrue(decoded is None)
+        
+            
+    def test_createSingleResponseMessage(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        data = {
+                'permid' : testDestPermId,
+                'channel_id' : testChannelId,
+                'infohash' : testInfohash,
+                'subtitles' : {"eng" : "This is content 1", "nld": "This is content  2",
+                               "ita" : "This is content 3"},
+                'selversion' : OLPROTO_VER_FOURTEENTH
+                }
+        langs = data['subtitles'].keys()
+        
+        bitmask = langUtil.langCodesToMask(langs)
+        binaryBitmask = uintToBinaryString(bitmask, length=4)
+        expextedMessage = SUBS + \
+                            bencode((
+                                    data['channel_id'],
+                                    data['infohash'],
+                                    binaryBitmask,
+                                    [data['subtitles']['eng'], data['subtitles']['ita'],
+                                     data['subtitles']['nld']]
+                                     ))
+        msg = self.underTest._createSingleResponseMessage(data)
+        decoded = bdecode(msg[1:])
+        
+        self.assertEquals(expextedMessage, msg)
+        
+        
+            
+    def test_receivedGETSUBSSimple(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        bitmask = langUtil.langCodesToMask(["eng", "rus"])
+        binaryBitmask = uintToBinaryString(bitmask, length=4)
+        request = GET_SUBS + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              binaryBitmask
+                              ))
+        
+        
+        
+        list = MockMsgListener()
+        
+        self.underTest.registerListener(list)
+        self.underTest.handleMessage(testDestPermId, OLPROTO_VER_FOURTEENTH, request)
+        
+        self.assertEquals(1,list.receivedCount)
+        self.assertEquals(testDestPermId, list.receivedParams[0][0])
+        self.assertEquals(OLPROTO_VER_FOURTEENTH,list.receivedParams[0][2])
+        self.assertEquals((testChannelId,testInfohash,["eng","rus"]),list.receivedParams[0][1])
+        
+    def test_receivedGETSUBSInvalid1(self):
+        bitmask = -1
+        request = GET_SUBS + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              bitmask
+                              ))
+        
+        
+        
+        list = MockMsgListener()
+        
+        self.underTest.registerListener(list)
+        val = self.underTest.handleMessage(testDestPermId, OLPROTO_VER_FOURTEENTH, request)
+        
+        self.assertFalse(val)
+        self.assertEquals(0,list.receivedCount) #the invalid msg has been dropped
+        
+    def test_receivedGETSUBSInvalid2(self):
+        bitmask = -1
+        request = GET_SUBS + \
+                      bencode((
+                              testChannelId,
+                              testInfohash,
+                              bitmask
+                              ))
+        
+        
+        
+        list = MockMsgListener()
+        
+        self.underTest.registerListener(list)
+        val = self.underTest.handleMessage(testDestPermId, 13,request)
+        
+        self.assertFalse(val)
+        self.assertEquals(0,list.receivedCount) #the invalid msg has been dropped
+        
+    def test_receivedSUBSSimpleNoRequest(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        data = {
+                'permid' : testDestPermId,
+                'channel_id' : testChannelId,
+                'infohash' : testInfohash,
+                'subtitles' : {"eng" : "This is content 1", "nld": "This is content  2",
+                               "ita" : "This is content 3"},
+                'selversion' : OLPROTO_VER_FOURTEENTH
+                }
+        langs = data['subtitles'].keys()
+        
+        bitmask = langUtil.langCodesToMask(langs)
+        binaryBitmask = uintToBinaryString(bitmask, length=4)
+        expextedMessage = SUBS + \
+                            bencode((
+                                    data['channel_id'],
+                                    data['infohash'],
+                                    binaryBitmask,
+                                    [data['subtitles']['eng'], data['subtitles']['ita'],
+                                     data['subtitles']['nld']]
+                                     ))
+        
+        list = MockMsgListener()
+        self.underTest.registerListener(list)                    
+        val = self.underTest.handleMessage(testDestPermId, OLPROTO_VER_FOURTEENTH, expextedMessage)
+        # never had a request for this message should be dropped
+        self.assertFalse(val)
+        self.assertEquals(0,list.subsCount)
+        
+    def test_receivedSUBSOtherRequest(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        data = {
+                'permid' : testDestPermId,
+                'channel_id' : testChannelId,
+                'infohash' : testInfohash,
+                'subtitles' : {"eng" : "This is content 1", "nld": "This is content  2",
+                               "ita" : "This is content 3"},
+                'selversion' : OLPROTO_VER_FOURTEENTH
+                }
+        langs = data['subtitles'].keys()
+        
+        bitmask = langUtil.langCodesToMask(langs)
+        binaryBitmask = uintToBinaryString(bitmask, length=4)
+        expextedMessage = SUBS + \
+                            bencode((
+                                    data['channel_id'],
+                                    data['infohash'],
+                                    binaryBitmask,
+                                    [data['subtitles']['eng'], data['subtitles']['ita'],
+                                     data['subtitles']['nld']]
+                                     ))
+        
+        list = MockMsgListener()
+        self.underTest.registerListener(list) 
+        
+        #invalid bitmask
+        self.underTest._addToRequestedSubtitles(testChannelId, testInfohash, int(0xFFFFFFFF & ~bitmask), None)                   
+        
+        val = self.underTest.handleMessage(testDestPermId, OLPROTO_VER_FOURTEENTH, expextedMessage)
+        # never had a request for this message should be dropped
+        self.assertFalse(val)
+        self.assertEquals(0,list.subsCount)
+        
+    def test_receivedSUBSSomeRequest(self):
+        langUtil = LanguagesProvider.getLanguagesInstance()
+        data = {
+                'permid' : testDestPermId,
+                'channel_id' : testChannelId,
+                'infohash' : testInfohash,
+                'subtitles' : {"eng" : "This is content 1", "nld": "This is content  2",
+                               "ita" : "This is content 3"},
+                'selversion' : OLPROTO_VER_FOURTEENTH
+                }
+        langs = data['subtitles'].keys()
+        
+        bitmask = langUtil.langCodesToMask(langs)
+        binaryBitmask = uintToBinaryString(bitmask, length=4)
+        
+        expextedMessage = SUBS + \
+                            bencode((
+                                    data['channel_id'],
+                                    data['infohash'],
+                                    binaryBitmask,
+                                    [data['subtitles']['eng'], data['subtitles']['ita'],
+                                     data['subtitles']['nld']]
+                                     ))
+        
+        list = MockMsgListener()
+        self.underTest.registerListener(list) 
+        
+        #invalid bitmask
+        self.underTest._addToRequestedSubtitles(testChannelId, testInfohash, langUtil.langCodesToMask(["ita"]), None)                   
+        
+        val = self.underTest.handleMessage(testDestPermId, OLPROTO_VER_FOURTEENTH, expextedMessage)
+        # never had a request for this message should be dropped
+        self.assertTrue(val)
+        self.assertEquals(1,list.subsCount)
+        
+        params = list.subsParams[0]
+        channel_id, infohash, contentsDictionary = params[1]
+        self.assertEquals(testChannelId,channel_id)
+        self.assertEquals(testInfohash, infohash)
+        contentKeys = contentsDictionary.keys()
+        self.assertEquals(["ita"],contentKeys)
+        
+    def test_cleanSUSRequests(self):
+        
+        self.underTest._requestValidityTime = 0.001 #ds
+        self.underTest._addToRequestedSubtitles(testChannelId, testInfohash, 3, None)
+        self.assertEquals(1,len(self.underTest.requestedSubtitles))
+        time.sleep(1.2)
+        self.underTest._cleanUpRequestedSubtitles()
+        self.assertEquals(0,len(self.underTest.requestedSubtitles))
+        
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(TestSubtitlesMsgHandlerIsolation)     
+        
+        
+        
+        
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/__init__.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/__init__.py
new file mode 100644 (file)
index 0000000..284cc10
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/simple_mocks.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/simple_mocks.py
new file mode 100644 (file)
index 0000000..48b90bf
--- /dev/null
@@ -0,0 +1,287 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+import logging
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+import time
+import os.path
+from BaseLib.Core.Utilities.Crypto import sha
+
+RES_DIR = os.path.join('..','..','subtitles_test_res')
+
+
+
+
+log = logging.getLogger(__name__)
+
+class MockOverlayBridge(object):
+    
+    def __init__(self):
+        self.send_count = 0
+        self.sendParametersHistory = list()
+        self.connect_count = 0
+        self.connectParametersHistory = list()
+        self.add_task_count = 0
+        self.add_taskParametersHistory = list()
+    
+    def send(self,permid,msg,callback):
+        log.debug("MockBridge: Message " + msg + " sent to " + 
+                  show_permid_short(permid))
+        self.send_count += 1
+        self.sendParametersHistory.append((permid,msg,callback))
+        callback(None, permid)
+    
+    def connect(self,permid,callback):
+        log.debug("MockBridge: Connected to " + 
+                  show_permid_short(permid))
+        self.connect_count += 1
+        self.connectParametersHistory.append((permid,callback))
+        callback(None, None, permid, 14)
+    
+    def add_task(self, task, t=0):
+        log.debug("MockBridge: added task %s to be scheduled in %d seconds" %
+                  (str(task), t))
+        self.add_task_count += 1
+        self.add_taskParametersHistory.append((task,t))
+
+
+
+class ConnectedMockOverlayBridge(MockOverlayBridge):
+    def __init__(self):
+        self.ol_connection = None
+    
+        
+    
+    def send(self,permid,msg,callback):
+        if self.ol_connection is not None:
+            self.ol_connection.send(msg)
+        super(ConnectedMockOverlayBridge, self).send(self,permid,msg,callback)
+    
+class MockTokenBucket:
+    
+    def __init__(self):
+        self.sufficientTokens = True
+    
+    def consume(self,howManyTokens):
+        return self.sufficientTokens
+
+
+class MockMetadataDBHandler(object):
+        
+    def __init__(self):
+        
+        self.returnMetadata = True
+        self.getAllSubsCount = 0
+        self.getAllSubsParametersHistory = list()
+        
+        self.getMetadataCount = 0
+        self.getMetadataParametesHistory = list()
+        
+        self.updateSubtitleCount = 0
+        self.updateSubtitleParameterHistory = list()
+        
+        self.commitCount = 0
+        
+        s1 = SubtitleInfo("eng", os.path.join(RES_DIR,"fake0.srt"))
+        s1.computeChecksum()
+        s2 = SubtitleInfo("rus", os.path.join(RES_DIR,"fake1.srt"))
+        s2.computeChecksum()
+        
+        self.returnSubs = {
+                            "eng" : s1,
+                            "rus" : s2
+                            }
+        
+        self.insertMetadataCount = 0
+        self.insertMetadataParameters = list()
+        self.nextKeypair = None
+        
+    
+    def getAllSubtitles(self, channel_id, infohash):
+        self.getAllSubsCount += 1
+        self.getAllSubsParametersHistory.append((channel_id, infohash))
+        return self.returnSubs
+    
+    def getMetadata(self,channel_id,infohash):
+
+        self.getMetadataCount += 1
+        self.getMetadataParametesHistory.append((channel_id, infohash))
+        if self.returnMetadata:
+            return self.getSomeMetadata(channel_id,infohash)
+        else:
+            return None
+        
+        
+    def getSomeMetadata(self, channel_id, infohash):
+
+            
+        s1 = SubtitleInfo("eng", None)
+        s2 = SubtitleInfo("rus", None)
+        
+        self.content1 = u"Subtitle Content 1"
+        self.content2 = u"Subtitle Content 2"
+        
+        hasher = sha()
+        hasher.update(self.content1)
+        s1.checksum = hasher.digest()
+        
+        hasher = sha()
+        hasher.update(self.content2)
+        s2.checksum = hasher.digest()
+            
+        metadata = MetadataDTO(channel_id, infohash, time.time(),
+                               "", {"eng":s1, "rus":s2})
+        
+        if self.nextKeypair is None:
+            metadata.signature = "fake"
+        else:
+            metadata.sign(self.nextKeypair)
+   
+        return metadata
+    
+    def updateSubtitlePath(self, channel_id, infohash, lang, newPath, commitNow):
+        self.updateSubtitleCount += 1
+        self.updateSubtitleParameterHistory.append((channel_id,infohash,
+                                                    lang, newPath, commitNow))
+        return True
+    
+    def commit(self):
+        self.commitCount += 1
+        
+    def insertMetadata(self,metadataDTO):
+        self.insertMetadataCount += 1
+        self.insertMetadataParameters.append((metadataDTO,))
+        return True
+        
+        
+
+class MockVoteCastHandler(object):
+    
+    def __init__(self):
+        self.nextVoteValue = 0
+        
+        self.getVoteCount = 0
+        self.getVoteParametersHistory = list()
+    
+    def getVote(self,channel,permid):
+        self.getVoteCount += 1
+        self.getVoteParametersHistory.append((channel,permid))      
+        return self.nextVoteValue
+
+class MockSubtitlesHandler(object):
+    
+    def __init__(self):
+        self.sendSubReqCount = 0
+        self.sendSubReqParametersHistory = list()
+        
+        self.retrieveMultipleCount = 0
+        self.retrieveMultipleParams = list()
+    
+    def sendSubtitleRequest(self, dest, channel, infohash,lang, callback = None):
+        self.sendSubReqCount += 1
+        self.sendSubReqParametersHistory.append((dest,channel,infohash,lang,callback))
+        
+    def retrieveMultipleSubtitleContents(self, channel, infohash, listOfSubInofs, callback=None):
+        self.retrieveMultipleCount += 1
+        self.retrieveMultipleParams.append((channel,infohash,listOfSubInofs,callback))
+        
+
+        
+        
+class MockLaunchMany(object):
+    def __init__(self):
+        self.set_act_count = 0
+        self.set_act_count_param_history= list()
+        self.richmetadataDbHandler = MockMetadataDBHandler()
+    
+    def set_activity(self,activity,param):
+        self.set_act_count += 1
+        self.set_act_count_param_history.append((activity,param))
+        
+        
+class MockMsgListener(object):
+    def __init__(self):
+        self.receivedCount = 0
+        self.receivedParams = list()
+        self.subsCount = 0
+        self.subsParams = list()
+    
+    def receivedSubsRequest(self,permid,decoded,selversion):
+        self.receivedCount += 1
+        self.receivedParams.append((permid,decoded,selversion))
+        
+    def receivedSubsResponse(self, permid, decoded, callbacks, selversion):
+        self.subsCount += 1
+        self.subsParams.append((permid,decoded,callbacks,selversion))
+        
+class MockSubsMsgHander(object):
+    def __init__(self):
+        self.sendReqCount = 0
+        self.sendReqParams = list()
+        
+        self.sendResCount = 0
+        self.sendResParams = list()
+        
+    def sendSubtitleRequest(self, dest_permid, requestDetails, 
+                            msgSentCallback = None, userCallback = None, selversion=-1):
+        self.sendReqCount += 1
+        self.sendReqParams.append((dest_permid,requestDetails,msgSentCallback, userCallback, selversion))
+
+    def sendSubtitleResponse(self, destination, response_params, 
+                             selversion = -1):
+        self.sendResCount += 1
+        self.sendResParams.append((destination,response_params,selversion))
+        
+class MockPeersHaveMngr(object):
+    
+    def __init__(self):
+        self.getPeersHavingCount = 0
+        self.getPeersHavingParams = list()
+        
+        self.newHaveCount = 0
+        self.newHaveParams = list()
+        
+        self.retrieveCount = 0
+        self.retrieveParams = list()
+        
+        self.cleanupCount = 0
+        
+    def getPeersHaving(self, channel, infohash, bitmask, limit=5):
+        self.getPeersHavingCount += 1
+        self.getPeersHavingPars.pars((channel,infohash,bitmask,limit))
+        
+        return ["permid1","permid2","permid3"]
+        
+        
+    def newHaveReceived(self, channel, infohash, peer_id, havemask):
+        self.newHaveCount += 1
+        self.newHaveParams.append((channel, infohash, peer_id, havemask))
+        
+        
+    def retrieveMyHaveMask(self, channel, infohash):
+        self.retrieveCount += 1
+        self.retrieveParams.append((channel, infohash))
+        
+        return 42
+        
+        
+    def startupCleanup(self):
+        self.cleanupCount += 1
+        
+class MockSession(object):
+    '''
+    only implements methods to read the config
+    '''
+    
+    
+    
+    def get_state_dir(self):
+        return os.path.join(RES_DIR,'state')
+    
+    def get_subtitles_collecting_dir(self):
+        return "subtitles_collecting_dir"
+    
+    def get_subtitles_upload_rate(self):
+        return 1024
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/test_RichMetadataInterceptor.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/test_RichMetadataInterceptor.py
new file mode 100644 (file)
index 0000000..5ec0299
--- /dev/null
@@ -0,0 +1,179 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+
+import unittest
+import logging
+from BaseLib.Core.Overlay.permid import generate_keypair, sign_data
+from random import randint
+import hashlib
+import time
+from BaseLib.Core.BitTornado.bencode import bencode
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+from BaseLib.Core.Subtitles.RichMetadataInterceptor import RichMetadataInterceptor
+from BaseLib.Test.Core.Subtitles.simple_mocks import MockMetadataDBHandler,\
+    MockSubtitlesHandler, MockVoteCastHandler, MockPeersHaveMngr
+    
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+CHANNELCAST_NUM_OF_ENTRIES = 2
+
+    
+    
+
+
+class TestRichMetadataInterceptor(unittest.TestCase):
+
+        
+
+
+    def setUp(self):
+        
+        self.metadataDBHandler = MockMetadataDBHandler()
+        self.voteCastDBHandler = MockVoteCastHandler()
+        self.subSupp = MockSubtitlesHandler()
+        self.my_permid_and_keypair = generatePermIds(1)[0]
+        self.advertisedChannelIds = generatePermIds(CHANNELCAST_NUM_OF_ENTRIES)
+        self.advertisedInfohash =  generateInfohashes(CHANNELCAST_NUM_OF_ENTRIES)
+        self.peersHaveMngr = MockPeersHaveMngr()
+        
+        self.channelcastMsg = dict()
+        for i in range(CHANNELCAST_NUM_OF_ENTRIES):
+    
+            signature, msg = generateChannelCastEntry(self.advertisedChannelIds[i][0], 
+                                                      self.advertisedInfohash[i], 
+                                                      self.advertisedChannelIds[i][1])
+            
+            self.channelcastMsg[signature] = msg
+            
+        self.undertest = RichMetadataInterceptor(self.metadataDBHandler, self.voteCastDBHandler, 
+                                                 self.my_permid_and_keypair[0], self.subSupp,
+                                                 self.peersHaveMngr)
+        
+        self.metadataDBHandler.nextKeypair = self.advertisedChannelIds[0][1]
+            
+
+
+    def testAddRMDContentNoContent(self):
+        self.metadataDBHandler.returnMetadata = False
+        newMessage = self.undertest.addRichMetadataContent(self.channelcastMsg)
+        #message should be left untouched
+        self.assertEquals(self.channelcastMsg,newMessage)
+        #check if the the db handler is called
+        self.assertEquals(CHANNELCAST_NUM_OF_ENTRIES,self.metadataDBHandler.getMetadataCount)
+        
+        for i in range(CHANNELCAST_NUM_OF_ENTRIES):
+            self.assertTrue(
+                            (self.advertisedChannelIds[i][0], self.advertisedInfohash[i])
+                              in self.metadataDBHandler.getMetadataParametesHistory) 
+    
+    def testAddRichMetadataContentSomeContent(self):
+        self.metadataDBHandler.returnMetadata = True
+        newMessage = self.undertest.addRichMetadataContent(self.channelcastMsg)
+        #message should have been changed
+        self.assertNotEquals(self.channelcastMsg,newMessage)
+        
+        for item in newMessage.itervalues():
+            #check the contents of the modified message
+            self.assertTrue('rich_metadata' in item.keys())
+            #description,bitmask,timestamp,listofchecksums, signature
+            self.assertEquals(6, len(item['rich_metadata']))
+        
+        
+
+        
+        
+    
+    def test_splitChnAndRmdNoContent(self):
+        self.metadataDBHandler.returnMetadata = False
+        newMessage = self.undertest.addRichMetadataContent(self.channelcastMsg)
+        
+        listOfmetadata = \
+            self.undertest._splitChannelcastAndRichMetadataContents(newMessage)
+        
+        self.assertEquals(([],[]),listOfmetadata)
+    
+    
+    def test_splitChnAndRmdSomeContent(self):
+        self.metadataDBHandler.returnMetadata = True
+        newMessage = self.undertest.addRichMetadataContent(self.channelcastMsg)
+        
+        listOfmetadata = \
+            self.undertest._splitChannelcastAndRichMetadataContents(newMessage)
+    
+        listOfmetadata = listOfmetadata[0]
+        self.assertEquals(2,len(listOfmetadata))
+        for dto in listOfmetadata:
+            self.assertTrue(isinstance(dto[0], MetadataDTO))
+
+    def testHandleRMetadata(self):
+        self.metadataDBHandler.returnMetadata = True
+        newMessage = self.undertest.addRichMetadataContent(self.channelcastMsg)
+        
+        #it will result that i am a subscriber for any channel
+        self.voteCastDBHandler.nextVoteValue = 2
+        
+        
+        self.undertest.handleRMetadata(self.advertisedChannelIds[0][0], 
+                                       newMessage)
+        
+        self.assertEquals(2, self.metadataDBHandler.insertMetadataCount)
+        self.assertEquals(2, self.subSupp.retrieveMultipleCount)
+        
+        pass
+        
+    
+
+
+
+
+def generatePermIds(numOfPermids):
+    permids = list()
+    keypair = generate_keypair()
+    
+    #two equal permids for ease of testing
+    permids.append((str(keypair.pub().get_der()), keypair))
+    permids.append((str(keypair.pub().get_der()), keypair))
+#    for i in range(numOfPermids):
+#        keypair = generate_keypair()
+#        permids.append((str(keypair.pub().get_der()), keypair))
+    return permids
+
+def generateInfohashes(num):
+    infohashes = list()
+    hasher = hashlib.sha1()
+    for i in range(num):
+        seed = randint(0,1000)
+        hasher.update(str(seed))
+        infohash = hasher.digest()
+        infohashes.append(infohash)
+    
+    return infohashes
+
+def generateChannelCastEntry(channel, infohash, keypair):
+    channel_name = u'channel-' + unichr(randint(0,255))
+    torrent_name = u'torrent-' + unichr(randint(0,255))
+    timestamp = int(time.time())
+    msg = dict()
+    msg['publisher_id'] = str(channel)
+    msg['publisher_name'] = channel_name
+    msg['infohash'] = str(infohash)
+    msg['torrentname'] = torrent_name
+    msg['timestamp'] = timestamp
+    
+    bencoded = bencode(msg)
+    
+    signature = sign_data(bencoded, keypair)
+    
+    return signature, msg
+
+    
+
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(TestRichMetadataInterceptor)
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.testName']
+    unittest.main()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/Core/Subtitles/test_SubtitlesHandler.py b/instrumentation/next-share/BaseLib/Test/Core/Subtitles/test_SubtitlesHandler.py
new file mode 100644 (file)
index 0000000..942d8de
--- /dev/null
@@ -0,0 +1,197 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+from BaseLib.Core.Overlay.permid import generate_keypair
+from BaseLib.Test.Core.Subtitles.simple_mocks import  \
+    MockOverlayBridge, MockSubsMsgHander, MockMetadataDBHandler, MockSession
+from BaseLib.Core.Utilities.Crypto import sha
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import LanguagesProvider
+import logging
+import os
+import unittest
+import codecs
+from BaseLib.Core.Subtitles.SubtitlesHandler import SubtitlesHandler,\
+    getSubtitleFileRelativeName
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FOURTEENTH
+
+
+
+logging.basicConfig(level=logging.DEBUG)
+_keypairs = (generate_keypair(), generate_keypair(), generate_keypair())
+testChannelId = str(_keypairs[0].pub().get_der())
+testDestPermId = str(_keypairs[1].pub().get_der())
+testMyPermId = str(_keypairs[2].pub().get_der())
+
+testInfohash = sha("yoman!").digest()
+
+RES_DIR = os.path.join('..','..','subtitles_test_res')
+
+class TestSubtitlesHandler(unittest.TestCase):
+
+
+    def setUp(self):
+        
+        self._session = MockSession()
+        self.ol_bridge = MockOverlayBridge()
+        self.rmdDBHandler = MockMetadataDBHandler()
+        self.underTest = SubtitlesHandler()
+    
+    def tearDown(self):
+        self.ol_bridge = None
+        #cleanup the mess in collected dir
+        if self.underTest.subs_dir is not None:
+            toDelete = [ os.path.join(self.underTest.subs_dir, entry) for entry in os.listdir(self.underTest.subs_dir)\
+                        if entry.endswith(".srt")]
+        
+            for xfile in toDelete:
+                if os.path.isfile(xfile) :
+                    os.remove(xfile)
+
+    def testRegisterStuff(self):
+        self.underTest.register(self.ol_bridge, self.rmdDBHandler, self._session)
+        self.assertTrue(self.underTest.registered)
+        self.assertTrue(self.ol_bridge is self.underTest.overlay_bridge)
+        self.assertTrue(self.rmdDBHandler is self.underTest.subtitlesDb)
+        expectedPath = os.path.join(os.path.abspath(os.path.curdir), self._session.get_state_dir(),
+                                    "subtitles_collecting_dir")
+        self.assertEqual(os.path.normpath(expectedPath), self.underTest.subs_dir)
+        # preaparing the mock msg handler
+        
+        self.mockMsgHandler = MockSubsMsgHander()
+        self.underTest._subsMsgHndlr = self.mockMsgHandler
+    
+    def testGetSubtitlesFileRelativeName(self):
+        #subtitles filenames are build from the sha1 hash
+        #of the triple (channel_id, infohash, langCode)
+        name = getSubtitleFileRelativeName(testChannelId, testInfohash, "rus")
+        hasher = sha()
+        for value in (testChannelId, testInfohash, "rus"):
+            hasher.update(value)
+        
+        self.assertEquals(hasher.hexdigest() + ".srt", name)
+        
+    def testSendSubtitleRequestSimple(self):
+        self.testRegisterStuff()
+        
+
+        self.underTest.sendSubtitleRequest(testDestPermId, testChannelId, testInfohash,
+                                           ["zho","rus"], None, OLPROTO_VER_FOURTEENTH)
+        
+        self.assertEquals(1,self.mockMsgHandler.sendReqCount)
+
+    def testReceibedGETSUBSNoSubs(self):
+        self.testRegisterStuff()
+        
+        self.underTest.receivedSubsRequest(testDestPermId, 
+                                           (testDestPermId,testChannelId,["ita","nld"]), OLPROTO_VER_FOURTEENTH)
+        
+        self.assertEquals(0,self.mockMsgHandler.sendResCount)
+        
+    def testReceivedGETSUBSTwoSubs(self):
+        self.testRegisterStuff()
+        self.underTest.receivedSubsRequest(testDestPermId, 
+                                           (testChannelId,testInfohash,["eng","rus"]), OLPROTO_VER_FOURTEENTH)
+        
+        eng = u"this is a fake srt\n\nonly needed for testing\n\ncheers :)\n\n"
+        rus = eng + \
+                u"asfasgb sagas gba\n\nasfasfas 24214 a \nThe checksum is different yuppy!\n\n"
+        
+        
+        self.assertEquals(1,self.mockMsgHandler.sendResCount)
+        destination, response, selversion = self.mockMsgHandler.sendResParams[0]
+        self.assertEquals(testDestPermId,destination)
+        self.assertEquals(OLPROTO_VER_FOURTEENTH,selversion)
+        channelId, infohash, contentsList = response
+        self.assertEquals(testChannelId,channelId)
+        self.assertEquals(testInfohash,infohash)
+        self.assertEquals(contentsList,{"eng":eng,"rus":rus})
+        
+    
+    def testReceivedSUBSMessage(self):
+        self.testRegisterStuff()
+        languages = ["eng","rus"]
+        zho = u"Subtitle Content 1"
+        kor = u"Subtitle Content 2"
+        contentsDict = {"eng":zho, "rus":kor}
+        
+        msg = (testChannelId,
+               testInfohash, contentsDict)
+        
+        
+        simpleCallback = lambda x : x
+
+        
+        bitmask = LanguagesProvider.getLanguagesInstance().langCodesToMask(["eng","rus"])
+        self.underTest.receivedSubsResponse(testDestPermId, msg, [(simpleCallback,bitmask)], OLPROTO_VER_FOURTEENTH)
+        
+        #self.assertEquals(languages,callbackParams)
+        expectedFilename1 = getSubtitleFileRelativeName(testChannelId, testInfohash, "eng")
+        expectedFilename2 = getSubtitleFileRelativeName(testChannelId, testInfohash, "rus")
+        expectedPath1 = os.path.join(self._session.get_state_dir(),self.underTest.subs_dir,expectedFilename1)
+        expectedPath2 = os.path.join(self._session.get_state_dir(),self.underTest.subs_dir,expectedFilename2)
+        self.assertTrue(os.path.isfile(expectedPath1))
+        self.assertTrue(os.path.isfile(expectedPath2))
+        
+        with codecs.open(expectedPath1,"rb","utf-8") as file1:
+            content1 = file1.read()
+            
+        self.assertEquals(zho,content1)
+        
+        with codecs.open(expectedPath2,"rb","utf-8") as file2:
+            content2 = file2.read()
+            
+        self.assertEquals(kor,content2)
+        
+        self.assertEquals(1, self.ol_bridge.add_task_count)
+        params = self.ol_bridge.add_taskParametersHistory[0]
+        #calling the lambda
+        val = params[0]()
+        self.assertEquals(languages,val)
+        
+        
+    def test_saveSubsOnDisk(self):
+        self.testRegisterStuff()
+        subContent = u"Test Content\nFor a pseudo subtitle file\n\nYo!\n"
+        self.underTest._saveSubOnDisk(testChannelId, testInfohash,
+                                      "eng", subContent)
+        expectedFilename = getSubtitleFileRelativeName(testChannelId, 
+                                                       testInfohash, "eng")
+        expectedPath = os.path.join(self.underTest.subs_dir, expectedFilename)
+        self.assertTrue(os.path.isfile(expectedPath))
+        
+        #check the contents
+        with codecs.open(expectedPath, "rb", "utf-8") as file:
+            cont = file.read()
+        
+        self.assertEquals(subContent,cont)
+        
+        ##now the file exists. If a new subtitle is saved for the same
+        # channel, infohash, lang but with a different content
+        # the old one should be overwritten
+        newContent = u"I'm the new content! I shall win over the old one!"
+        self.underTest._saveSubOnDisk(testChannelId, testInfohash,
+                                      "eng", newContent)
+        self.assertTrue(os.path.isfile(expectedPath))
+        #check the contents
+        with codecs.open(expectedPath, "rb", "utf-8") as file:
+            cont = file.read()
+        
+        self.assertEquals(newContent,cont)
+
+        
+        
+        
+def suite():
+    return unittest.TestLoader().loadTestsFromTestCase(TestSubtitlesHandler)     
+        
+        
+        
+
+if __name__ == "__main__":
+    #import sys;sys.argv = ['', 'Test.testName']
+    unittest.main()
+    
+    
+
+    
diff --git a/instrumentation/next-share/BaseLib/Test/Core/__init__.py b/instrumentation/next-share/BaseLib/Test/Core/__init__.py
new file mode 100644 (file)
index 0000000..284cc10
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/TESTSUITE.txt b/instrumentation/next-share/BaseLib/Test/TESTSUITE.txt
new file mode 100644 (file)
index 0000000..4d5c74c
--- /dev/null
@@ -0,0 +1,93 @@
+TESTSUITE FOR TRIBLER
+=====================
+
+The test suite technically consists of two groups: tests that can be run
+by a single python interpreter, one after another, and tests that require
+a fresh python interpreter to prevent problems with singleton classes,
+threads and listen ports. The former group is tested in one blow by running 
+test_all_single_python.py (see below), for the other groups there are
+separate shell scripts, and you'll see "Run XXX" instructions for those.
+
+Description of tests
+--------------------
+
+Feature: Simple Merkle Hashes
+
+       Create various hashtrees. For each tree, check that the 
+       set of hashes returned is correct for each piece.
+       
+       Create a Merkle torrent. Test that our code correctly reads it.
+
+Feature: PermIDs
+
+       Start a Tribler client, open a socket to it, and test whether
+       it accepts various valid and invalid CHALLENGE messages.
+       
+       Test RESPONSE1
+       
+       Test RESPONSE2
+
+
+Feature: Megacaches/CacheDB
+
+       Jie already made a test suite for this.
+
+Feature: SecureOverlay
+
+       Write a client and a server that use the SecureOverlay API. 
+       Let them exchange messages. In particular:
+       * Test what happens when connections are dropped. Do messages get lost?
+       * Test what happens when the server is down and the client sends.
+
+Feature: Download Helper
+
+       Start a Tribler client, connect to it via the SecureOverlay API.
+       * Test whether it accepts valid and invalid ASK_FOR_HELP,
+       RESERVE_PIECES, PIECES_RESERVED and STOP_DOWNLOAD_HELP messages.
+       * Test whether the client always stops helping on a STOP_DOWNLOAD_HELP
+       in various situations.
+
+  Run
+       $ test_dlhelp.sh
+
+
+Feature: BuddyCast
+
+       Start a Tribler client, connect to it via the SecureOverlay API.
+       * Test whether it accepts valid and invalid BUDDYCAST messages.
+       
+       Jie already has a test suite for testing the Buddycast algorithm.
+       
+       
+Feature: SecureOverlay V3
+
+       Run 
+               $ test_secure_overlay.sh
+               
+Feature: Dialback Message
+
+       Run
+               $ test_dialback_reply_active.sh
+               $ test_dialback_conn_handler.sh         
+
+Feature: Remote Query
+
+    Run 
+        $ test_rquery_reply_active.sh
+        
+Feature: uTorrent Peer Exchange
+
+    Part of test_all_single_python.py
+
+
+Running
+-------
+
+To run the full test suite, run
+
+$ test.sh
+
+from the this test dir. This calls test_all_single_python.py 
+and all separate shell scripts.
+
+Arno Bakker, 2007-08-15
diff --git a/instrumentation/next-share/BaseLib/Test/__init__.py b/instrumentation/next-share/BaseLib/Test/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Test/bak_tribler_sdb.py b/instrumentation/next-share/BaseLib/Test/bak_tribler_sdb.py
new file mode 100644 (file)
index 0000000..fe6c3cd
--- /dev/null
@@ -0,0 +1,46 @@
+import sys\r
+import os\r
+from traceback import print_exc\r
+from shutil import copy as copyFile, move\r
+\r
+DB_FILE_NAME = 'tribler.sdb'\r
+DB_DIR_NAME = None\r
+FILES_DIR = os.path.abspath(os.path.join('extend_db_dir'))\r
+TRIBLER_DB_PATH = os.path.join(FILES_DIR, 'tribler.sdb')\r
+TRIBLER_DB_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_tribler.sdb')\r
+STATE_FILE_NAME_PATH = os.path.join(FILES_DIR, 'tribler.sdb-journal')\r
+\r
+def init_bak_tribler_sdb():\r
+    if not os.path.isfile(TRIBLER_DB_PATH_BACKUP):\r
+        got = extract_db_files(FILES_DIR, 'bak_tribler.tar.gz')\r
+        if not got:\r
+            print >> sys.stderr, "Missing bak_tribler.tar.gz"\r
+            sys.exit(1)\r
+\r
+    if os.path.isfile(TRIBLER_DB_PATH_BACKUP):\r
+        copyFile(TRIBLER_DB_PATH_BACKUP, TRIBLER_DB_PATH)\r
+        #print "refresh sqlite db", TRIBLER_DB_PATH\r
+\r
+    if os.path.exists(STATE_FILE_NAME_PATH):\r
+        os.remove(STATE_FILE_NAME_PATH)\r
+        print "remove journal file"\r
+\r
+\r
+\r
+def extract_db_files(file_dir, file_name):\r
+    try:\r
+        import tarfile\r
+        tar=tarfile.open(os.path.join(file_dir, file_name), 'r|gz')\r
+        for member in tar:\r
+            print "extract file", member\r
+            tar.extract(member)\r
+            dest = os.path.join(file_dir,member.name)\r
+            dest_dir = os.path.dirname(dest)\r
+            if not os.path.isdir(dest_dir):\r
+                os.makedirs(dest_dir)\r
+            move(member.name, dest)\r
+        tar.close()\r
+        return True\r
+    except:\r
+        print_exc()\r
+        return False\r
diff --git a/instrumentation/next-share/BaseLib/Test/btconn.py b/instrumentation/next-share/BaseLib/Test/btconn.py
new file mode 100644 (file)
index 0000000..314708a
--- /dev/null
@@ -0,0 +1,169 @@
+# Written by Arno Bakker, Jie Yang
+# see LICENSE.txt for license information
+
+import socket
+import sys
+from binascii import b2a_hex
+from struct import pack,unpack
+from StringIO import StringIO
+
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_CURRENT
+
+DEBUG=False
+
+current_version = OLPROTO_VER_CURRENT
+lowest_version = 2
+
+protocol_name = "BitTorrent protocol"
+# Enable Tribler extensions:
+# Left-most bit = Azureus Enhanced Messaging Protocol (AEMP)
+# Left+42 bit = Tribler Simple Merkle Hashes extension v0. Outdated, but still sent for compatibility.
+# Left+43 bit = Tribler Overlay swarm extension
+# Right-most bit = BitTorrent DHT extension
+tribler_option_pattern = '\x00\x00\x00\x00\x00\x30\x00\x00'
+overlay_infohash = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+def tobinary(i):
+    return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 
+        chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+class BTConnection:
+    def __init__(self,hostname,port,opensock=None,user_option_pattern=None,user_infohash=None,myid=None,mylistenport=None,myoversion=None):
+        assert user_option_pattern is None or isinstance(user_option_pattern, str)
+        assert user_option_pattern is None or len(user_option_pattern) == 8
+        assert user_infohash is None or isinstance(user_infohash, str)
+        assert user_infohash is None or len(user_infohash) == 20
+        assert myid is None or isinstance(myid, str)
+        assert myid is None or len(myid) == 20
+        self.hisport = port
+        self.buffer = StringIO()
+        if mylistenport is None:
+            self.myport = 481
+        else:
+            self.myport = mylistenport
+        if myid is None:
+            self.myid = "".zfill(20)
+            if myoversion is None:
+                myoversion = current_version
+            self.myid = self.myid[:16] + pack('<H', lowest_version) + pack('<H', myoversion)
+            self.myid = self.myid[:14] + pack('<H', self.myport) + self.myid[16:]
+        else:
+            self.myid = myid
+        self.hisid = None
+
+        if opensock:
+            self.s = opensock
+        else:
+            self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self.s.connect((hostname, port))
+        handshake = chr(len(protocol_name))
+        handshake += protocol_name
+        if user_option_pattern is None:
+            handshake += tribler_option_pattern
+        else:
+            handshake += user_option_pattern
+        
+        if user_infohash is None:
+            self.expected_infohash = overlay_infohash
+        else:
+            self.expected_infohash = user_infohash
+        handshake += self.expected_infohash
+        handshake += self.myid
+        if DEBUG:
+            print >>sys.stderr,"btconn: Sending handshake len",len(handshake)
+        self.s.send(handshake)
+
+    def get_my_id(self):
+        return self.myid
+        
+    def get_his_id(self):
+        return self.hisid
+
+    def get_my_fake_listen_port(self):
+        return self.myport
+
+    def read_handshake(self):
+        data = self._readn(68)
+        assert(data[0] == chr(len(protocol_name)))
+        assert(data[1:20] == protocol_name)
+        assert(data[20:28] == tribler_option_pattern)
+        assert(data[28:48] == self.expected_infohash)
+        self.hisid = data[48:68]
+        hisport = unpack('<H', self.hisid[14:16])[0]
+        assert(hisport == self.hisport)
+        low_ver = unpack('<H', self.hisid[16:18])[0]
+        assert(low_ver == lowest_version)
+        cur_ver = unpack('<H', self.hisid[18:20])[0]
+        #if DEBUG:
+        #    print >> sys.stderr, "btconn: his cur_ver: ", cur_ver
+        #    print >> sys.stderr, "btconn: my curr_ver: ", current_version
+        assert(cur_ver == current_version)
+
+    def read_handshake_medium_rare(self,close_ok = False):
+        data = self._readn(68)
+        if len(data) == 0:
+            if close_ok:
+                return
+            else:
+                assert(len(data) > 0)
+        assert(data[0] == chr(len(protocol_name)))
+        assert(data[1:20] == protocol_name)
+        assert(data[20:28] == tribler_option_pattern)
+        assert(data[28:48] == self.expected_infohash)
+        self.hisid = data[48:68]
+        # don't check encoded fields
+
+    def close(self):
+        self.s.close()
+
+    def send(self,data):
+        """ send length-prefixed message """
+        self.s.send(tobinary(len(data)))
+        self.s.send(data)
+
+    def recv(self):
+        """ received length-prefixed message """
+        size_data = self._readn(4)
+        if len(size_data) == 0:
+            return size_data
+        size = toint(size_data)
+        if DEBUG and size > 10000:
+            print >> sys.stderr,"btconn: waiting for message size",size
+        if size == 0:
+            # BT keep alive message, don't report upwards
+            return self.recv()
+        else:
+            return self._readn(size)
+
+    def _readn(self,n):
+        """ read n bytes from socket stream """
+        nwant = n
+        while True:
+            try:
+                data = self.s.recv(nwant)
+            except socket.error, e:
+                if e[0] == 10035: 
+                    # WSAEWOULDBLOCK on Windows
+                    continue
+                elif e[0] == 10054: 
+                    # WSAECONNRESET on Windows
+                    print >>sys.stderr,"btconn:",e,"converted to EOF"
+                    return '' # convert to EOF
+                else:
+                    raise e
+            if DEBUG:
+                print >> sys.stderr,"btconn: _readn got",len(data),"bytes"
+            if len(data) == 0:
+                #raise socket.error(ECONNRESET,'arno says connection closed')
+                return data
+            nwant -= len(data)
+            self.buffer.write(data)
+            if nwant == 0:
+                break
+        self.buffer.seek(0)
+        data = self.buffer.read(n)
+        self.buffer.seek(0)
+        return data
diff --git a/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_multiple.torrent b/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_multiple.torrent
new file mode 100644 (file)
index 0000000..2c7d514
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_multiple.torrent differ
diff --git a/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_single.torrent b/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_single.torrent
new file mode 100644 (file)
index 0000000..217c0cb
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_single.torrent differ
diff --git a/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_tribler.tar.gz b/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_tribler.tar.gz
new file mode 100644 (file)
index 0000000..eb53814
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/extend_db_dir/bak_tribler.tar.gz differ
diff --git a/instrumentation/next-share/BaseLib/Test/extend_db_dir/superpeer120070902sp7001.log b/instrumentation/next-share/BaseLib/Test/extend_db_dir/superpeer120070902sp7001.log
new file mode 100644 (file)
index 0000000..c63a16f
--- /dev/null
@@ -0,0 +1,560 @@
+# Tribler Overlay Log Version 2
+# BUCA_STA: nRound   nPeer nPref nTorrent   nBlockSendList nBlockRecvList   nConnectionsInSecureOver nConnectionsInBuddyCast  nTasteConnectionList nRandomConnectionList nUnconnectableConnectionList
+# BUCA_STA: Rd  Pr Pf Tr  Bs Br  SO Co  Ct Cr Cu
+1188691201.5 CONN_ADD 120.180.54.112 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f 6 
+1188691201.5 RECV_MSG 195.186.224.9 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 21, 'random peers': [{'ip': '8.54.51.63', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '2.172.150.234', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '36.252.137.244', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb', 'port': 7763, 'similarity': 0}, {'ip': '110.222.153.179', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'DarKMacBookPro.local'} 
+1188691201.8 SEND_MSG 213.64.250.144 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2100, 'random peers': [{'ip': '201.180.187.251', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '224.78.209.106', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '45.3.81.143', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '26.113.136.54', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '23.90.25.224', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '241.86.45.98', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/O5b+hTW3Nrf2o1JAIaKomB7Z94R802h3MG2m9Ae7QXzTmoey3oodDZXjwaK39hu1wJcyH3sLWc5EK', 'ip': '163.150.231.192', 'similarity': 0, 'port': 7774, 'nfiles': 3891, 'oversion': 6}, {'ip': '14.98.234.87', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '184.151.65.140', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'port': 7771, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691205.8 CONN_ADD 72.148.52.96 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcvEGJQwrFOXmAtZ1yhtIg9Dl9gXf9nuSM0G+t8gAVf0xmoycIXRnUp1gTWG5KFp6nWFTfT98ju23u6B 5 
+1188691205.8 CONN_DEL 216.177.89.232 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAd2XK70Zd5riUEWzW8ypYZ1hSNPZarDuPZ0OLN8MAUNno9lWsmV6el2nf3Z8L2azIqHej4w617Us2Xc5 overlayswarm:connection lost 
+1188691205.8 RECV_MSG 2.157.77.181 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcvEGJQwrFOXmAtZ1yhtIg9Dl9gXf9nuSM0G+t8gAVf0xmoycIXRnUp1gTWG5KFp6nWFTfT98ju23u6B 5 PASSIVE_BC {'preferences': [], 'connectable': 0, 'taste buddies': [], 'random peers': [{'ip': '252.96.197.251', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk', 'port': 7002, 'similarity': 0}, {'ip': '123.236.81.108', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'ip': '43.23.212.136', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}], 'collected torrents': [], 'name': 'PerryPC'} 
+1188691206.2 SEND_MSG 64.62.243.125 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcvEGJQwrFOXmAtZ1yhtIg9Dl9gXf9nuSM0G+t8gAVf0xmoycIXRnUp1gTWG5KFp6nWFTfT98ju23u6B 5 PASSIVE_BC {'preferences': [], 'taste buddies': [], 'name': u'superpeer1', 'random peers': [{'ip': '49.156.106.23', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '189.212.8.35', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '84.160.240.160', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '247.78.194.220', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh', 'port': 7770, 'similarity': 0}, {'ip': '107.175.7.169', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '192.118.51.104', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '160.135.83.158', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '45.73.24.70', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '173.250.165.91', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'connectable': 1} 
+1188691208.8 CONN_ADD 61.235.144.190 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeg/a0WEB2kJbeGjeRfA2fquxFNFYsgOI0Xb+nmpAVIWltgFuSPnF2Q1XjHwBBAFEPvBHzuO6mvTEpZs 6 
+1188691209.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeg/a0WEB2kJbeGjeRfA2fquxFNFYsgOI0Xb+nmpAVIWltgFuSPnF2Q1XjHwBBAFEPvBHzuO6mvTEpZs 6 1 {'q': 'SIMPLE trini ', 'id': '3\xf5x\x99\x0b\xdd.\x97\x96\xb3w\xe9Cq1n\xb9\xad1\x8b'} 
+1188691209.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeg/a0WEB2kJbeGjeRfA2fquxFNFYsgOI0Xb+nmpAVIWltgFuSPnF2Q1XjHwBBAFEPvBHzuO6mvTEpZs 0 'd1:ade2:id20:3\xf5x\x99\x0b\xdd.\x97\x96\xb3w\xe9Cq1n\xb9\xad1\x8be' 
+1188691210.4 CONN_ADD 141.15.184.38 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ0RVgW0YXc03m0M18bWDPCYh5aTGQmpQHX1Cc5EAZBqB2SGmLcj9xOSF+37jsZDab1UfBA6Xka7XgOd 2 
+1188691210.4 RECV_MSG 129.146.252.66 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ0RVgW0YXc03m0M18bWDPCYh5aTGQmpQHX1Cc5EAZBqB2SGmLcj9xOSF+37jsZDab1UfBA6Xka7XgOd 2 PASSIVE_BC {'name': 'kyle', 'taste buddies': [{'ip': '56.252.20.196', 'age': 775, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOAkxv/0qc5YIsqWobOlEEmP7P8z9daSivw0fU+vAejyfY/O7rU3H/7GJnyebSqxafH1qhhIyhdx523w', 'port': 7765, 'preferences': ['0P5CSOSuqghG7LA16zVLWdLJYIw=', 'l6hcnQcHx2K0FiTkw4eXbpPMFOw=', '2aXvI6iCCSPx0j597XZjJ0uezaQ=', 'zFAQiO27KGtnsjgTSZ6OhEPK2pw=', 'Z65lufnTQUhfjqgCrmzFzUKGTbU=', 'V1N0jIRNBZzUPRQfWj1zwMOKNQs=']}, {'ip': '146.84.168.54', 'age': 467, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABvk3rSgcX0QL2F0IYQv0PsHy7Ux3L12ReGOAhSEAJZBw5sEo121y23Et6vKsAmsy9N/ttSZQQmzweFy', 'port': 6881, 'preferences': ['lZyR5VE6KZJoxSqZpinC5eiBNp0=', 'X1SEgmTlKNwR/gyED4yB6bY7PZE=', 'I41ysJAy6Bx/MrGiXYWcsbTKA+g=', '23rqeFBQOyodDMgUIoHXRnbS98o=', 'vgLN3svKfXsS8thscpOK2lPgc2U=', 'PRsG5WZjdi4P37V+9RSbVAG8OMM=', 'WVnsJ2cTmKcYVSGObPxDLWiy+wA=', 'q/PP8ajLOK9YSfQa+6YDeIxVhoU=', 'eYmjgpyx13zAxEJD2puBsxMLjtI=', 'ZHrlJPNwyzacDp9sfnZXwuoEA60=']}], 'random peers': [{'ip': '176.24.138.167', 'age': 3289, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOpYDWlnIBrUTTyK9PZE6XI4sR3tPL/CGfB7fRMaAYzmEyQAQE2QRbFvri21Twbdqfm3UOe3VCwqWkP7'}, {'ip': '135.111.224.72', 'age': 3289, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPrI1eJh6BWI5fibPITmu5xnAg28Nvg/0GzRIKHjAGBqCzv7TnipxxcyO0pKa6TW8V/06jZRyoM1Aow+'}, {'ip': '251.61.182.209', 'age': 1637, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZp65mQpME4E/hUNqonbu1Z75kI7TeUM/j0n7zRsABsEnYtCiUBmHjeLgwC49PHrsO93MQlKMQX1UMsd'}, {'ip': '243.137.56.44', 'age': 3288, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSeC1vA8s2SRbOJea9DZfAAZFTEfg7hM9h8DUU/ARwXtZ+w7RulnWL0zWhcBU5qxS+d5lyJhmE6edQx'}, {'ip': 'superpeer6.das2.ewi.tudelft.nl', 'age': 812, 'port': 7006, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd'}, {'ip': '207.123.42.190', 'age': 3288, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWShsqdeNmjtdXB/nAo95SMaXfGQafPaUAzMAtHOAepUw/fZzKf8wyXVu/VfoUCSGZT8RmbjPu7qECwB'}, {'ip': '81.100.10.177', 'age': 2882, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAezGLwb65yVDwWePHMA7ubidUBS0xhYnZGkODUImAOvg+rfEKNyedhuujIqChX1lO8U0v58MHjO2HbO5'}, {'ip': '211.96.1.71', 'age': 105, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARJJIuIAFTr2WD5Sk1UFUtj3Koq8L1jaMzYoKlkoAY/suCczP705LFYs4v4GIrE6Oy5thDX6MGOBTch8'}, {'ip': '105.148.49.192', 'age': 3289, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACu4NmJkJWmNVWx1yJYP8Vp+HMeS2xapk4QjrlbuAODgM/Fw/5SFrs+thvDCnMKX4VbH38JoD9yoKJES'}, {'ip': '20.61.254.133', 'age': 3288, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZgFnjmoe+4sJcN3IsYcZHqbTToAfz5s1ucMvDhNARKixVFTZUurTHPNO+kyU4w0NGI+rcQwfPB8uqvZ'}], 'preferences': ['553UNCk7BEbuJbciE/xOzzE/A+E=', 'G0/kI/O8Uw2xi/JKzjeckMDqKAE=', '2aXvI6iCCSPx0j597XZjJ0uezaQ=']} 
+1188691211.2 SEND_MSG 55.245.114.169 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ0RVgW0YXc03m0M18bWDPCYh5aTGQmpQHX1Cc5EAZBqB2SGmLcj9xOSF+37jsZDab1UfBA6Xka7XgOd 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '189.183.229.57', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '195.24.135.108', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '229.110.228.200', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '196.56.170.67', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh'}, {'ip': '126.218.149.70', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '251.232.199.8', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '177.123.249.187', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '207.109.56.171', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '8.173.86.55', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691211.2 BUCA_STA 9502  2104 2104 100  1579 12082  61 61  0 9 10 
+1188691211.2 CONN_DEL 172.99.249.157 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/Lk9P06tZGGJ0LmvhQYaa/eFB1XohzqAsT3zUbAdBjkW6s91ww6UOA9htCge+ly2iHBKc/vq9tvB8o overlayswarm:connection lost 
+1188691211.2 CONN_DEL 187.118.149.33 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUJo8gDYK64nh42DjVqtRQGvAP+fsr7BGPjtA0vTAKLcyBoVsCnwkrmZF2dPBIkzUJ6JYXsX7PN/o1SU overlayswarm:connection lost 
+1188691211.2 CONN_DEL 129.245.181.128 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWgzjLCg+8vv3amzXGyzLFs3rU2ExmynLwO4JAdVAPTzsWCaWgFPH74nU0PyXBaOGJJvXpnnSSt6h5jF overlayswarm:connection lost 
+1188691211.2 CONN_DEL 198.199.242.206 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEvzMXbRIxwcph7Rf416vS6ixOv9xX3tZxaLfjssAD9szA10VgZ5JpXkTLGP/vDeoPUmus1wc2oj5OVS overlayswarm:connection lost 
+1188691211.2 CONN_DEL 178.81.155.126 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM+tzQZJXyh4TxQrsUkm2wI1IMOWv+i8fckENtKkAHJZJlH/EIo36ipA3n4ajYiQ4WbXTnrcCi0IK60O overlayswarm:connection lost 
+1188691211.7 CONN_ADD 91.73.241.73 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaT0ru2Je+OdrlLCBO0/naU+Nf7OgyoPKQ335ZwyADNQGJ0334T461ftmsU2YmGMhknjVn4n7Wctef63 2 
+1188691211.7 RECV_MSG 139.163.65.242 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaT0ru2Je+OdrlLCBO0/naU+Nf7OgyoPKQ335ZwyADNQGJ0334T461ftmsU2YmGMhknjVn4n7Wctef63 2 PASSIVE_BC {'name': 'home-f8de795960', 'taste buddies': [], 'random peers': [{'ip': 'superpeer6.das2.ewi.tudelft.nl', 'age': 149, 'port': 7006, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd'}], 'preferences': []} 
+1188691211.9 SEND_MSG 127.182.116.230 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaT0ru2Je+OdrlLCBO0/naU+Nf7OgyoPKQ335ZwyADNQGJ0334T461ftmsU2YmGMhknjVn4n7Wctef63 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '69.71.148.23', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '91.154.94.228', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '67.242.40.241', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '85.172.82.170', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh'}, {'ip': '227.153.169.249', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '214.42.120.229', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '240.55.213.61', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '165.193.221.138', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '137.46.94.118', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691213.7 CONN_DEL 121.199.28.78 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKUdaRnXg2ldD05/3HjCl46xNMofgS6pNu1H9egoAXRZwtDEMEkpXVOBGHz1n6gqpaYHu53Bi0evsAXR overlayswarm:connection lost 
+1188691216.0 CONN_ADD 209.186.74.176 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMzFpCvHhfv5AVCJSVUJ2h9+pHObB8cxloPun4R0Ac1ahnsJfgz28ru3g6JNPPXPDSezm+fHXTwtIUrh 5 
+1188691216.5 RECV_MSG 34.214.114.121 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMzFpCvHhfv5AVCJSVUJ2h9+pHObB8cxloPun4R0Ac1ahnsJfgz28ru3g6JNPPXPDSezm+fHXTwtIUrh 5 PASSIVE_BC {'preferences': ['CN85/QYRr16g1h+4iydoiAbS41c=', 'ypo6Coa4FjEYhbPWxiAeke6aRQI=', 'MrE8U29fQfrZQRM81S8EJz8VWQA=', '+rfZH8tZwjuSwA1tOOml5DZ4B4s=', 'kAMZSR5kthqewR4F9HpncrkIAsw=', 'Pf+sgmXAVf+OtRk6kQq+0mMO9aE=', '/qJV8EjUfwaYxs3KtQ1HEcEErpY=', '2CAH6Msya8JL4+i+AJ9oGJp6uTA=', '4fUpFGkgtQ+oA8krmUEzdEjf0Hs=', '2iZHWw+OdHlGBw6S6poBPfhGVmw=', 'JRclM1UKEt4CNQgcaojkYVqU2po=', 'd+aAi0U1qkblxscI/7i1TMPRfuk='], 'connectable': 0, 'taste buddies': [{'ip': '60.238.165.149', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 12}, {'ip': '142.215.188.69', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYz1VZ7sb9HSC3MJyjf+eCzN9srdHPndMxuZ8m4KATQPEMXXCbLdNojv2OtR4C9S1/VC85qWboV6bvnq', 'port': 7765, 'similarity': 10}], 'random peers': [{'ip': '71.63.186.88', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 3}, {'ip': '50.40.79.27', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOaZSvufGsjINB7bPM8HyUmJqD5KYxhIfJeVeYw8ATAG47ftSY5genu4iynrNr5kD6Y4c9HzHkVW8meW', 'port': 7762, 'similarity': 1}], 'collected torrents': ['\xbaW\x9a+\x8a\x0c\xa6l\x01X\xf2\x11\xb5Y\xcb\x98\xef\x9c\xa0\xa7', '_\x9e=\x81w\x07|;<;0\xb0#H\x9bIS\xf4\xd5A', '\xd0\xd7\xf5\xa6\x843\x86NZG^\xf9\x12l\t\xa6\xcf_,y', '\xa57\xd5}^\x94\xc4hV<\xf7\xd5+\x94a\x18\xfb\xb6\xe2\xd1', '\xe3\x9a\x98\xf9\xe8\x90\x0eaV\x0c\xc8\x85\xbe\xae\x11\xa8\xc8_TM', '9\x17\x0b\xe1o\xbd\x7f\t\x85\xd0?\xdcr\xa3\xa9\xd3\x9a\x181\xf1', '\xc4>\x8aF\xa6\xfa\x14\xa3D\x9c\xc9\x98\xbd\x15P\x0e\xabU\\\xea', '\xb8?\xd2\xecI\x1eR\xd1\xc0*\x9dM\xc0\xeca\xf2j\xe2s\xdc', '\xfb\xd78i\x81(C\xcd\x11hO\xcb%a\x97\xda\xb3\xc2|\xde', '\x12lf\x00\xc7\x1e\xbb[\xb7\xbb6\x9f\xd8h\xc3\x10t\x96]"', '9\xf9\xdc\xc9\xe2 \xdd\xee\x05\x0f\x10\xcfs3\xde\xc9f\xb6k}', '\xc7\x82\x15j\x8e\xc5\xe2\xb3\xd13\xcf\xc8\x0f\xa8y\xe1\x9d\xe7\xec\x85', '\xd1:MaS\xde\x9bD\xa2\x98\xf5k \xa2\xb8"+\xe2\xfc\x97', '4\x80\x89]\x8fk\x00\x8f?\x04\x020&8^[\xcc\xaf2\x00', '$\xf8\x94eev{7o\xd7lX\xdb\xd4c=\xe5\xdas\x15', 'O\xadbP\x97v\x05]\xf6|r\xc4\xba\xe8\x9c\xf9\xbb\xe2t\xbb', '\x0c\x19^\x9b\x82\x91\xb6\x95\x11e[\xf7\xd5+M\xe0_k\x96\x8f', '\x99\x17\x87\xdcb\x97\x9e\x1f\x0f$L\\FJ\xc3.\x1c\xe2WW', '\x0f\xc7\xc5C\x12)k\x994\xb3\x87\x89\xc9!\xc0u\x8a^\x8fu', "c\xeavT\x86\xadZ'Q\xc1\xf7\xe9\x87\xf7\xf3\xae\xeac\x1ak", '\xbfn\x11\xeb7\xc9\xba\xb3&\x1c\xb46\x0f}"_\xfb\xe7]\xe4', '\x05yG\xd4\x91Z\xe0\x1dZ{:\xbc\xa6\x86\xb9aU\xecF\xdc', 'x\xe5\xeb<\xcf9\xc7\xefT9\nj\xbe-\x11#r\xc3\xf2\x12', '\x8e\x9e\xf6\x10\xe3rX\xe8\xd4S!\xcb\x14^\x97\xc8j\xf7\xca\xd3', '_)V\xaf\xc7\xa1q\xa7\x12\xd3 \x82\xb8j\xb0\t?\x03cm', '\xac\xb8\xae\x91\xfb\xa2\x00\x04\xa0\x9e\xcc\x0c\xb4`\xfdVxQ\x90X', 'DC\xa3\xea\xc0\xb9\xa6n\x87\xef\xc5c\x8b\xdcJ\x03\xce<\x92\xfc', 'eA\xe0V>\x1e\x9f,\xf9\xe2?Lgf\xb3\t\xecG\xd9C', 'v\xa9\xaf \xb8\x17>1\xd1EVT\x92\xcc{Z\x03\xaa\x10\xfa', 'i\x00\x11YyY\xe6G\x1do0D\\0\x01\xf2#\x8d\xb0\x83', '\xfd\xc4X\xe1nd4\xaa}D\xbe\x8c\x9b@1\x83,B\xe2\xe7', '<\x80\xe9\xb7D\xbc\xab\x07\xa7\xf6\t\x0f\x8f\x80\x8d\xc4JY`\x00', ',\x13M\x15\xdb\xb0\x05g\xcd\xdaA\xa4\xf0\x10\xd1\x8d\x05^\r\x8d', '\x10 \xde \x1a\xa2\xd9%\xdd\xdbR9\x1f@\rS_.\x19_', ']!g\xa0\xb0[\xf2\x99\xb96\xcf\x0c\xabq\x18_\x7f\x1a\xebg', ';0\x9c\xe2^\x0e\xff`\x19\x01\xa9\x00\xf9n\x0fv\x9e\x85\x80\xeb', 'N\xa4P\x1cN\x03\xaf\xb5\xbc9\xe0\xc7*\x19\xa9\xdei\x8f%\x90', '3QZ\xa9\xbc\xcfrB\xd2L\xbb}\xc0\x19A\xcfV0\x01:', '\xc1\x08L\xbet3\xde\x1b\x89U+\x14\xb4\xf4\x97?/\x96\x02u', '\x83VW\x88\xadX\xce\x85\x91x\x9e1\xc6\xa0\xa2 \xe5Sj\xc1', 'c\x9c\x0f\xa7d\xa4}\xff\x0b9\x17\xf7\xff\x15p<\xf5{%\xbd', '\xddD\x9avI \xc1\t\x03\x85\xebu\xb4\xde?\xe4\xfe\xa1+i', '\xcc\xdb>\xa4D\xbbJQ\xc6\xe7&b\x05\xbc\xde\x88\x8aZ\xe3&', '\xfc^\x08\x97g\x9f\xb6H\x88\xe1\x90nEUe\xea\x82\xeb\xbcm', 'w\xffn\xed\xd4\xcd\xa2\x0bD\xa0\x8a\xf9\x8306%1\x89\x83\xec', '\xfc@o\x14\xa5\xb9\xe46\xe2\xe3\xc5~\x08\x88\x93\x14\x1e(l\xf0', 'Kb\x83\x1b\x8b\xe6\xf6\xd2#c\x8b\x82\x02>\xaa7\x82\xa1R\xf9', '\x02\xe6\xc5S\x81S\xe8\xc1-\xb1\xd3P\x05\xf1\xbe\x19\x08\x90\x19\x7f', 'D\xc4\x1c\xfd5s\x00\r\xd7\xa6<RWK\x8c6\xe1\x14\xeaU', '\x8f&\xaf\x17`8\xf3]\x1fh\xdd\xe1\r2+\x8eMD\xa7\xac'], 'name': '01-0022'} 
+1188691217.2 SEND_MSG 112.101.216.214 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMzFpCvHhfv5AVCJSVUJ2h9+pHObB8cxloPun4R0Ac1ahnsJfgz28ru3g6JNPPXPDSezm+fHXTwtIUrh 5 PASSIVE_BC {'preferences': [], 'taste buddies': [], 'name': u'superpeer1', 'random peers': [{'ip': '132.126.2.207', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '38.15.29.69', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '88.80.205.246', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '248.184.11.106', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh', 'port': 7770, 'similarity': 0}, {'ip': '58.30.121.255', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '48.28.73.46', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '217.66.42.6', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '151.164.236.238', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '249.14.111.150', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'connectable': 1} 
+1188691218.0 CONN_ADD 122.5.162.35 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX 6 
+1188691218.2 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX 6 8 {'q': 'SIMPLE in living color fly girls ', 'id': '\x91\x15\x82\x91FVC\xd2Kx\xfb\xf5\xd7]\x12\x9c\x8a\xa8\x12\xc5'} 
+1188691218.2 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX 0 'd1:ade2:id20:\x91\x15\x82\x91FVC\xd2Kx\xfb\xf5\xd7]\x12\x9c\x8a\xa8\x12\xc5e' 
+1188691223.9 CONN_DEL 44.179.109.136 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADR97F5EgsNo+y7N2jOVi/NBW9cZ0K5qcCiJGq34AKdNQKxDPwPh16rdwHBH8IUCUzZxGr5UWKdFvnBb overlayswarm:connection lost 
+1188691223.9 CONN_ADD 55.105.23.149 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMK3TeW1q0VQlz9E7Uf3VG1S8ZRbwIekjK6H4odsAS4Gn9rUCndYF4XemqwSfF11hn3xnVorBpJoWyMj 2 
+1188691226.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M 6 0 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': 'Ld\x9b^\x8a\x85\xcdh\tT\xbc/6$[o*\xc4\x9a{'} 
+1188691226.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M 0 'd1:ade2:id20:Ld\x9b^\x8a\x85\xcdh\tT\xbc/6$[o*\xc4\x9a{e' 
+1188691227.3 CONN_ADD 117.241.19.95 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 
+1188691227.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M 6 1 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '\xf2U\xcb\xea\x8a\xef\x1d\xb1\x8blQ"\x058\xdf\xdb{=V\x84'} 
+1188691227.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M 0 'd1:ade2:id20:\xf2U\xcb\xea\x8a\xef\x1d\xb1\x8blQ"\x058\xdf\xdb{=V\x84e' 
+1188691227.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 0 {'q': 'SIMPLE my boys ', 'id': '\x93`z!f_\x12\x8fW\xc1\x18T\xf1\x1c\x00\x05\x16A\xe1-'} 
+1188691227.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 0 'd1:ade2:id20:\x93`z!f_\x12\x8fW\xc1\x18T\xf1\x1c\x00\x05\x16A\xe1-e' 
+1188691232.0 CONN_DEL 221.27.4.231 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe2j30ejMWrmXF9dI2BtM/MJx9Lr4DVG56pxaRuiAdzFQWNHH1leUG0vUjo6wL5awrQgUUhY6G4tW/DZ overlayswarm:connection lost 
+1188691236.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 1 {'q': 'SIMPLE music ', 'id': '(|\x99U\xc2a\x99O\xac\x07\xeb\xe2\xa9\x979\x9e\xe6\xc7\xda)'} 
+1188691236.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 0 'd1:ade2:id20:(|\x99U\xc2a\x99O\xac\x07\xeb\xe2\xa9\x979\x9e\xe6\xc7\xda)e' 
+1188691242.4 CONN_DEL 38.3.9.115 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAH6nXa+oUs3X9SgqxB/Py4i8gwG5NvJP5vL6NcE6AY13mXG0nLJSD2IGp19qVsQPOQxEjmImfHDBdEip overlayswarm:local close 
+1188691244.5 CONN_ADD 49.21.188.173 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQt1HJVhsHObMjRmnoaZnZpuP3y/HfANBJfgwUHUAYT5fbb4gwk50rHerfQ3K+NLKgKtfL3I+hyDd71D 6 
+1188691244.5 RECV_MSG 160.19.40.9 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQt1HJVhsHObMjRmnoaZnZpuP3y/HfANBJfgwUHUAYT5fbb4gwk50rHerfQ3K+NLKgKtfL3I+hyDd71D 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 56, 'random peers': [{'ip': '177.212.2.197', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'similarity': 0, 'ip': '3.179.46.21', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'oversion': 6, 'nfiles': 3634, 'port': 7764}, {'ip': '19.146.207.163', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'ip': '54.190.5.139', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMFEzOs9poNUO4QKobqkg0S1NddVXqoRaOglxn4bALe/2f1qrYH+VYrGBzlcAyPDZmxsDLN+tySY7KGP', 'port': 65535, 'similarity': 0}, {'ip': '218.83.106.221', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '142.72.154.29', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL7VN8m4HojmNP9vsEyUR+6jc+9V/RXtfUfVxGgVAPpfm5y8VRmVgbWu6I9Q69uTTLT5QworO50/cuiF', 'port': 51337, 'similarity': 0}, {'ip': '65.88.199.247', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFY3lSHHMzF8T7LFp+1G2+y9UNO56SSGZLfDOYZtAAkKJ85kAZrooaafKgSi3fKv8oTBgFAHU7Dw6s9Z', 'port': 7772, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'zed'} 
+1188691244.9 SEND_MSG 247.220.85.43 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQt1HJVhsHObMjRmnoaZnZpuP3y/HfANBJfgwUHUAYT5fbb4gwk50rHerfQ3K+NLKgKtfL3I+hyDd71D 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2109, 'random peers': [{'ip': '83.81.193.173', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '48.40.15.109', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '209.184.128.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '143.31.99.143', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh', 'port': 7770, 'similarity': 0}, {'ip': '208.100.219.87', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'port': 7771, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '188.109.203.190', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL/l2IyVa6lc3KAqQyEnR++rIzi+AamnbzXHCxOFAFy67COiBhrC79PLzzUiURbHDx21QA4p8w3UDHLA', 'ip': '53.163.218.118', 'similarity': 0, 'port': 7026, 'nfiles': 107049, 'oversion': 6}, {'ip': '107.40.224.24', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '126.109.136.36', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '135.190.53.86', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691245.0 CONN_ADD 56.47.160.91 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/Lk9P06tZGGJ0LmvhQYaa/eFB1XohzqAsT3zUbAdBjkW6s91ww6UOA9htCge+ly2iHBKc/vq9tvB8o 6 
+1188691245.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/Lk9P06tZGGJ0LmvhQYaa/eFB1XohzqAsT3zUbAdBjkW6s91ww6UOA9htCge+ly2iHBKc/vq9tvB8o 6 8 {'q': 'SIMPLE van dyk ', 'id': '\x98\xcdEP=l\x9f\x94\x87\x87j\xd1\x12\xefM\x0e\xd72t_'} 
+1188691245.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/Lk9P06tZGGJ0LmvhQYaa/eFB1XohzqAsT3zUbAdBjkW6s91ww6UOA9htCge+ly2iHBKc/vq9tvB8o 0 'd1:ade2:id20:\x98\xcdEP=l\x9f\x94\x87\x87j\xd1\x12\xefM\x0e\xd72t_e' 
+1188691245.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc 6 1 {'q': 'SIMPLE brit ', 'id': '\x99\x83\x8bh\x18\xcc\x19\x8e\xab\x16!O/\xef\x08\xf1\xdb(w0'} 
+1188691245.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc 0 'd1:ade2:id20:\x99\x83\x8bh\x18\xcc\x19\x8e\xab\x16!O/\xef\x08\xf1\xdb(w0e' 
+1188691249.4 CONN_ADD 227.111.193.216 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 6 
+1188691249.4 RECV_MSG 207.199.21.9 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 28, 'random peers': [{'ip': '40.213.31.171', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'ip': '110.234.91.123', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '237.216.197.119', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '66.243.96.28', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb', 'port': 7763, 'similarity': 0}, {'ip': '106.138.173.24', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOCdNvgsa7J6sRgq//Rvh3w9WDKfIjVs5xuxfa4GAFO+2HUkDELPnUA+xCz3GHqNsONSINtVHbUJQjb9', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'leviathan'} 
+1188691249.9 SEND_MSG 75.222.198.18 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2110, 'random peers': [{'ip': '20.98.19.230', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '11.83.135.10', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '229.187.78.85', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '48.76.32.126', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '80.139.135.243', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'port': 7771, 'similarity': 0}, {'ip': '133.42.150.111', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '10.181.67.119', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh', 'port': 7770, 'similarity': 0}, {'ip': '131.108.214.39', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL/l2IyVa6lc3KAqQyEnR++rIzi+AamnbzXHCxOFAFy67COiBhrC79PLzzUiURbHDx21QA4p8w3UDHLA', 'ip': '49.207.18.117', 'similarity': 0, 'port': 7026, 'nfiles': 107049, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '239.132.27.225', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691257.9 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 6 0 {'q': 'SIMPLE girl toe suck ', 'id': 'V&\xaed\xb7\x0b\x84$\x1br\xc3\xe0\xc0;\xe5\xe6\x8f\xda\xe6b'} 
+1188691257.9 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 0 'd1:ade2:id20:V&\xaed\xb7\x0b\x84$\x1br\xc3\xe0\xc0;\xe5\xe6\x8f\xda\xe6be' 
+1188691258.3 CONN_DEL 34.158.110.102 9101 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR overlayswarm:local close 
+1188691260.8 CONN_ADD 194.185.136.58 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUobv4ZlYbbwSRrXgql5JrpPcodZamZqaqAOj268AcaBtgB6Pdbe2GWAek0XZo9VGVWcJ69RQpb16OGr 6 
+1188691261.3 CONN_ADD 251.249.112.10 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB 6 
+1188691267.0 CONN_ADD 27.242.163.157 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAE3u3Y0cFFXtRMXcQT+PHBjSqa+UvHzsxtH3xNQAI2zvOIUJf1vmaSjF7kIrrCZW9zSYox8XliqZzlI 2 
+1188691267.0 RECV_MSG 58.77.5.231 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAE3u3Y0cFFXtRMXcQT+PHBjSqa+UvHzsxtH3xNQAI2zvOIUJf1vmaSjF7kIrrCZW9zSYox8XliqZzlI 2 PASSIVE_BC {'name': 'denis', 'taste buddies': [{'ip': '22.84.253.118', 'age': 64883, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADzj9gH4aQrH6umyxEYmr0DhDnJ2xbt2x6mrxRJPAIh+OMRl+P+3PM0vrYjnhB9SwGRNmz+FVqvG+ewx', 'port': 6881, 'preferences': ['NUwABTHisET/bYFkpcyxUEOpxwQ=', 'jWkUcwAe/oWM7gZ6vZefmW1O/go=', 'nU+oU7/C+Q9VV0NM1Akrka8NxHA=', 'ZE4zuSmExvEv0zCma2p872vVviU=']}, {'ip': '246.200.229.147', 'age': 34585, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcSNqdHWT9EJi/xowWZIkRrKUxTvIYiquCh6ELQxAHgPuagR04qB05hJZmzcChXYMlmyZJLsrp+fsar1', 'port': 6881, 'preferences': ['+sGtXqGgfaxb1tAYDd/P6o0s5Sc=', 'HGKNxM0Ma7cu1+CMDLAXefE5GaI=', 'GeV5VbJoVZrU+CNUkY7GeGSUT58=', 'wp2Lql2OwdoYtr8NoCuF0BXpWK8=', 'GT03poazzASOMIpQCUO9W4gexY0=', 'yG0VG/AVY7Rz0vVj2lP46h9XLLc=', 'DX0ltnUVS1SGii1c08czR3ujWxk=']}, {'ip': '198.90.52.163', 'age': 33722, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdrCqAxH0O/Nv5qMNrvjsX1TvhLT+alkdxzboDJ3ABaQNxwleBGoug5T9/k4X/PuqIxEVvlhq3MG8qiW', 'port': 5335, 'preferences': ['6IdcVof0a2se0hvIHuTb+zEA95g=', 'kv5OK82XaeXknpI7ltzrHyaZyRI=', 'YvCse7EkaM+hhvcNa51OjH0r18c=', '4h7KaxMwVEjWMm03RlF+YOlz0LY=', 'YceE4/dk1uCFwIERwt/M3m/YWqU=', 'O/jBNFWhdtZgBYuSxturgsTT2f0=', 'wP9vdYBcMYs7oou2RXToZEG3vYw=', 'lDH2g4NuBXNRsDHsrVHE01sNXBg=', 'FE+paTjK7s/JbWRUyO4DLwb5btI=', 'THqQeGVZemAyLXviPqikdIi1hR0=']}, {'ip': '177.39.143.130', 'age': 16537, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMGY5vrFHR4u7Qx381sR0hEqsZQSuujeZeOWU+MyAX1/LBZqgvNYWxt1ANebw/NK2W6ofucLER5A1kA2', 'port': 6881, 'preferences': ['YS3VzyX/iIUoH3t79amXJOi7ulY=', '7hEz/KwmNbFOyWWBZD0K+8ZZH5A=', 'jI9sfu2qfRKWZFEnab1XYCCqvcM=', 'BaC5qwQk6ZFMyngDbOy/zMsUYZE=', 'z/TQGw9V2gkRNfxl5VE/b8qPI4U=', 'T2Td77+G3naKHqOUA7zWMstHhq4=', 'cL3DrhN0is8FnnktkWU3/foZ+54=', 'l6hcnQcHx2K0FiTkw4eXbpPMFOw=', '1GipsUQXn4vdDRO3la79pqHvodk=', '5GoB07X3ncrIf8YUsJ6XaIw23Eo=']}, {'ip': '229.212.42.151', 'age': 34243, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM+DGISvLflm5rVArI+0+NETpyPpshpTk0pRLzhdAAoZQNED7XOBgpHTMfHtuXse1Q1mf3yvAGFMlMYh', 'port': 6881, 'preferences': ['PY1hqvxc1SOFZpnMF3bpuXH3MCg=', 'maVSSFaj4S3mPQ9Aw8peyGNYBp4=', 'gSidW6PJCMZEjoL97V+OpqXOuKs=', 'OY5O9dqj0VSZfwEqKuwhDvAZ5UM=', 'PmZihChUbwMJQvAYQf9GF5z3HOs=', 'DkNGNSPMCBRj/PaLeQUI/SJReHw=', 'ts+DnwkBQmar8LyTtsSGXDtbmLA=', 'spJrRQABmT/ONkOcr6OfOelQQgE=', '0vNE4CUrnxhjzzSdy+jnYm1/LfI=', '2GQHAXf11Hx85pTHIAdBzbJ3Bso=']}], 'random peers': [{'ip': '24.169.187.245', 'age': 2037, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEsDzpms4OxwnHjVITwnzxew83Zp1EKI0vAc7MJSAG+XApCx9VvEtlGvTnxmwipiQzVLXLNcriP8wdjR'}, {'ip': '241.227.193.142', 'age': 14332, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKU10rVQdpmIMXNCXifqCpmX/7oTSEtzepl+feJQADyqGgUJa6Y5wExJGBgT7YUVgPNxIx2GIHLxQTsh'}, {'ip': '69.226.83.21', 'age': 948, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADgMa67lPUFAFxZoo2/naGJdY9MFkitQ8FILANrYAK23QvSXFjxgt1VSIE+XAgxqj9SfggMAjbC1Hm5t'}, {'ip': '95.170.187.218', 'age': 4085, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABmcc1B5PwiDx3xdlpo1D1Zv/JHO5xCLUW0BibcmAE2uYqYVxe7B+hjeZd4O6Rgxpx8h+KlktJNkMutB'}, {'ip': '76.60.224.61', 'age': 1094, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX0Y9TtkdHPyQuj8W7ZdrPtz/crMQXnYDvBhwAZdAJo+OPSTxw8m7hr31aQMv3jH1HEEfhnv8JHcYZn6'}, {'ip': '104.50.247.105', 'age': 25, 'port': 7769, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUc7AZzwy0XxaVnLSzFx14ov8FLVAvkDHpTLSwr+AYFAQap+unWFsNb4JL4+Myvwp1CA9gbwBrThTBOW'}, {'ip': '36.202.228.186', 'age': 25, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPm1med7TvRRv2XEvpEsD/jazdrROIdK5GdT4QhcAbq9NQNBYCHcy7eaKr+F1ilsgI32VqjHEJ1tc2qf'}, {'ip': '120.63.151.171', 'age': 496, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJM4LYxsbTIV9FHtHrXmaSeoK4hpuv9apJ252v4aAfDmiEXZzKa1Ajrt68lPaLqEceW5eKyj3JykZ7JM'}, {'ip': '21.137.132.229', 'age': 17437, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAN0Ce9MrR/Grideu5mR+aQylGrd2RPYYKhgawAD2LBc0tjWb9XT7xf768l721QNIUOPaBMe+reX3j'}, {'ip': '17.246.184.175', 'age': 2796, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUZwASVSS/Ov13i2zfroTJKqR6c055oCyBR1TL7CACdaD2sCS4+st24lhl64j/Nk88iT/7bmgAsE2YvI'}], 'preferences': ['tHt6verWXIm7+34Xln4tY3CBPsg=', '/4aSh2jU6VwTSHyWMcqQXN7Bzjs=', 'tO3PBxcYFBmHgwvpNeYkJmKGjLc=', 'wp2Lql2OwdoYtr8NoCuF0BXpWK8=', 'jWkUcwAe/oWM7gZ6vZefmW1O/go=', '5GoB07X3ncrIf8YUsJ6XaIw23Eo=', 'mpP7Exxa3wLSExsbr5l6gF0xBdo=', 'yY8LOow8efZP6y/k9dpQv9HDibw=', 'AJ0avol5u0oyQD062RRPtc1Vl3g=', 'NbmEJ4P8WynqXuKU2Glr17VorJA=', 'qs+RLPUSVQVO+kcetoVKMpWUogM=', 'uFdT9zcp0HHMig66mWd6lCgfnn8=', '5kywaIUaAmOs/dXEOzOpLOtJLRQ=', 'G0Mejh+QXH+00ieiVCh/68DJhkg=', '+bhv4wFLqWWsisDhNI9kkiH2s0s=', 'ExhKW/JArDZN9QK6wFUlo+yxjzc=', '/ndf1ZoTywlk20tYR6b2YEQwv1c=', 'MG6Cz86tYiT2KjEoxLroQ/v2L1k=', 'WzAZ+mS8RZo6yva5MYRcRzVK0SQ=', '1AcWoFxS8K1UL9LP4gDQOzNFHg0=', 'TyVV+c5FC0VXgIfK+D/up6jadtA=', 'PWmMheLUspkmtW1PtwM4DHKpMJs=', 'qSLIie96b6FFnGSZlyLFXRthWhI=', 'ZYtFpNy2CTSYMYjr5RNs5TTmCNk=', '0goDY4Z7SwljDJ245na3obXNyZw=', 'L49uTTs3jK6NHNI58MqqkHrUGvQ=', 'G3ZQmMHWR2gXTM4Aqxk6fI4/dkk=', 'WcCtRikKUQ/+5GWCJ/Lh0rJJzY8=', '2CGRA2z+HDn81au7gVStMYb4+LU=', '8np5ZfjXz9+KRZCgY8skFfHinKY=', 'bpC3tmz/oYOE9k5phzdCu67w5uA=', 'xSlkGRMxKJkRdLlL3xa6XoffNHM=', '1IFNHZ4VJ6zofh4BBluCvgrHIYg=', 'TgfWbXTEorvYgPDXH/yqW6PtsvY=', 'BThASu4SbYh/w+u4PzL3wrv4ddg=', '36lSWV+ScFSlT/v6RnpuwLVc3G0=', 'IZNSxX/16TNoGyathl+BHOInOQ0=', 'hf0RVSRJyZqJ/93BLYkastq82Ds=', 'zei8tjYjqysmP0RwCUXaO5FaGyI=']} 
+1188691267.0 SEND_MSG 236.216.206.238 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAE3u3Y0cFFXtRMXcQT+PHBjSqa+UvHzsxtH3xNQAI2zvOIUJf1vmaSjF7kIrrCZW9zSYox8XliqZzlI 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '87.76.246.165', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '229.128.220.236', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '26.152.216.16', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '31.126.176.12', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh'}, {'ip': '32.9.24.62', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '56.42.23.133', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '17.158.118.44', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '188.32.94.145', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '156.25.242.200', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691267.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUobv4ZlYbbwSRrXgql5JrpPcodZamZqaqAOj268AcaBtgB6Pdbe2GWAek0XZo9VGVWcJ69RQpb16OGr 6 5 {'q': 'SIMPLE notre dame fight song ', 'id': '\x190\x9d\xe2\x99\xbb,\xf8\xfe}\x1f\x8d\xfe\xa4\xa9\x9d\xb1T\x86\xb9'} 
+1188691267.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUobv4ZlYbbwSRrXgql5JrpPcodZamZqaqAOj268AcaBtgB6Pdbe2GWAek0XZo9VGVWcJ69RQpb16OGr 0 'd1:ade2:id20:\x190\x9d\xe2\x99\xbb,\xf8\xfe}\x1f\x8d\xfe\xa4\xa9\x9d\xb1T\x86\xb9e' 
+1188691267.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUobv4ZlYbbwSRrXgql5JrpPcodZamZqaqAOj268AcaBtgB6Pdbe2GWAek0XZo9VGVWcJ69RQpb16OGr 6 6 {'q': 'SIMPLE notre dame ', 'id': '\xf5J/\n\x81\xc0\xb0\x91)\xdaH\xbf\x0f\xd4\xd3\xb4\x92D\x87g'} 
+1188691267.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUobv4ZlYbbwSRrXgql5JrpPcodZamZqaqAOj268AcaBtgB6Pdbe2GWAek0XZo9VGVWcJ69RQpb16OGr 0 'd1:ade2:id20:\xf5J/\n\x81\xc0\xb0\x91)\xdaH\xbf\x0f\xd4\xd3\xb4\x92D\x87ge' 
+1188691271.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 0 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '0U\xe3N\x8b\x8f\xee*\xe3{\xffpu\x1e\x18\x8d\xe8c\xbc\xb0'} 
+1188691271.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:0U\xe3N\x8b\x8f\xee*\xe3{\xffpu\x1e\x18\x8d\xe8c\xbc\xb0e' 
+1188691271.2 BUCA_STA 9503  2118 2118 100  1577 12085  62 62  0 9 10 
+1188691271.5 CONN_DEL 210.50.118.11 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEbX0oc1/rRGm/YXgN9g3mH5sxAdop2zfQzsSGYPAeK+yCwhKEQrXep19NlQPpFeOYkn3NlvPCxt5a9c overlayswarm:connection lost 
+1188691271.5 CONN_DEL 151.67.21.155 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAW7KQbAfJF8zUiLVF9ivvOIq9gxrbOrYsGmNItGKAfr4Ufkoy4OpT5Y7WyApCHZWC3YietKJXxQQeA3z overlayswarm:connection lost 
+1188691271.5 CONN_DEL 157.14.186.135 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAE6NbIiGgn41fOj5+kSwTq1KUBlhBP8IaaV4duh/ARs4DDMAubs/KGMf7tZqCQwTnMqKJq4wlxkE1660 overlayswarm:connection lost 
+1188691272.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 1 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '\xf6\xf0\x94\x8e\xc7n@\xfeK\x1ao\x00\xffC\xabP\t\xd1\x12\x92'} 
+1188691272.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:\xf6\xf0\x94\x8e\xc7n@\xfeK\x1ao\x00\xffC\xabP\t\xd1\x12\x92e' 
+1188691272.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 6 1 {'q': 'SIMPLE shemale ', 'id': '\xd2\xe7\xd0\x19\xcb\x95\x08FhL\x17\x13\xc4\x11.n\xbf\x19M\x96'} 
+1188691272.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 0 'd1:ade2:id20:\xd2\xe7\xd0\x19\xcb\x95\x08FhL\x17\x13\xc4\x11.n\xbf\x19M\x96e' 
+1188691273.4 CONN_ADD 141.71.157.106 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVlAG71pFCXmivk2xV8jdD7wd3uSwU0QTMFcROoPANm+W5r4cn0A2rGjgCVVT9/uxwSF9+OQDHVXwZah 2 
+1188691274.4 RECV_MSG 20.14.102.82 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVlAG71pFCXmivk2xV8jdD7wd3uSwU0QTMFcROoPANm+W5r4cn0A2rGjgCVVT9/uxwSF9+OQDHVXwZah 2 PASSIVE_BC {'name': 'KEITH', 'taste buddies': [{'ip': '211.62.74.158', 'age': 6, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHA5HA7HmXF5WR5fD5+t2hxeUdFByEeRBCcV5dWUAUNxS758b2bwVUg+1VeeSl1yTlgXcrvSJ8k0IRQ7', 'port': 6881, 'preferences': ['yySe+nnhnr6s+yXQxFj4pI1b+yE=', 'iPSnguPSpNV+Wcv2WtfCZ6gGM3E=', 'n6I5cc0iSt2Lqj2MLcE8Vx+qKIg=', 'o1V41/0GNzZ9YvJEa/BE6CIb5UQ=', 'Y8jNbtGkrOs9zrIG4GXfuGfjhuE=', '6haeKGt45DGK+4Y5UhWTv0IIra4=', 'RTfnMPwmN2mukWur5kNfprzQJ1c=', 'h0nQbTEXdADfdukWvEnaPZt6EZ4=', 'JnVXMvE8+ROu137YMHa3RP+MOq4=', 'pJeKSRQ76Oaf8ePGbggVPIyhHmA=']}, {'ip': '35.255.50.131', 'age': 17, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAruoQhe71RhFfNMgs/eLDnyaXmP7DK3I08+cbrVAKjUoLE++5I0s91vrHsDSNuZtOYZjcqhhj8/Zdky', 'port': 6881, 'preferences': ['SeVPM3qAigccKWkcD/uNGKM52jU=', 'sOTLJP9mRwCuYsVO6gxS10hZm1o=', 'mpTwHB/YZXSgLzR9wgZDNq5RAsc=', 's5mlNkxqmDBhBthBqiHKWCYbDMA=', 'qzPtThCDIf+h1KJGZwPSr3TsdFs=', '9M/OvGoP8X26KVrlG/dHTpUFzsM=', 'VbjFHqa/7eKG9UeHjqXUJdY90Xo=', 'aP6rYVFcWjIwMy/msX7bRAjM8mU=', 'PJBgmtN3ebw94zgpQkatt8reZdY=', '/VKqLU7brRPaH7fYqkvFzbZhnio=']}, {'ip': '61.186.252.12', 'age': 28, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ1/aTcOWqKzSJZTiHkJa2mbUJv3exi3z7zM+Fh/AIjhnTZ9arVRKwIexy0fevF+kCy2RoaadMKuClQt', 'port': 6881, 'preferences': ['PgLkQliXHHjDrQgoSTwOsUy4qro=', 'I6gFXYEGZzCxCVbfZPb6ZtODOl4=', 'QSUhrNbqKZF0P6UVOep5RnIjglc=', 'KaIXBwBukReE/R3MyLZnj7PpG0c=', '/UElM8CYR95RnZvZ8fxgj6im0uE=', 'OuFq0jufZ3boRehHz/RuBRJxBM4=', 'V4jCQ7NA9IKq9I096gjFTQSofjI=', '3WC2e3U91QlKa7eEiKoQbIAGwV4=', 'B1pjFdrJzfX+gYwE9IvgzHRkfMQ=', '4ErsZ3FP1XuURStvSQ6zANZwt+A=']}, {'ip': '73.214.192.128', 'age': 1946, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARqAykwXFekSzF5m5HNTkhKNjG6XNviU98+PzYiMACfJZm/Uh8NT2BVa6x1zb88FDUsTrotb6jmbdzK1', 'port': 6881, 'preferences': ['MWY2ewbyIUpbwQ9ZKj//cWf+3g8=', 'KPdvKZAwgZKG6tR+sHS5wm7x2+Y=', 'CRJJhlFT4MsMUOhc4a7iQ9P96nA=', 'ZC43IVwsWXrb4VASRPBe9cvZ2qQ=', '+o+Udz3LAmof9hrhOZiHROTTBYk=', 'nt+CB2Jl6ZbjVAmL+AKKq2Pqf2I=', 'xROvyuHIm5DWLBVTt+/lwFXFfVE=', '8aqIzvU2aqpUrwf1RStORg4l59w=', 'FfnSCK6vDOBqbH/zhUxGEi4UA08=', 'y8BDNob7IsZT65oy5mCim6Vwjv0=']}], 'random peers': [{'ip': '114.235.8.253', 'age': 822, 'port': 7763, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb'}, {'ip': '137.206.123.105', 'age': 12865, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfW++JQeWXpjNLAtx+f0+BlQrB+kQ7hEcDQjyM6FAbI2CMK/1SlOEV4Xa1/BsqZltMOerKH9EW+WV9Vz'}, {'ip': '208.53.14.232', 'age': 12866, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQkXfuUvyUGmTZueA06Q6yoUFVI6LZWiJ7tvfTJjAEE59lMIXyHiyY0u3FQ9SjHN2ckhw+2GW0KLQCrS'}, {'ip': '116.44.132.179', 'age': 6201, 'port': 9881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdWFHay1bRRh5LVeRm9Ey4aasYUiuDd135y6caVTAXtV8vgkEF7J5RBriCXfuf5SYTSptnwo1MZG6KGD'}, {'ip': '197.75.228.85', 'age': 1678, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ/CLk7/K6QIuN8aR0O+eN45qlHNHLn0mc0uB16fAZmQQSx00h/mp32mXnjusNT+bHbfsyx/0Tip5KRT'}, {'ip': '130.222.213.25', 'age': 12866, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKY2OVePuX7CX4FFsf95y2iAbJsEVUC1xbtxK1JpAHb4ZLaPfBQrfZsbum2DtIO3EAyYi8JQC+v62Ij2'}, {'ip': '153.224.116.223', 'age': 12866, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKyqA3VqT5oLaRe6JY796dS3kBfw9XKesMpRwAocAQn5lNKsQBNabThoU+8OCfHcATef8C7JrMFvud1j'}, {'ip': '78.125.115.200', 'age': 6655, 'port': 7048, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU51jXT/QzAMIE0CJZRi9Oz9ps7xxfa7ppSp0DMwAcKegxn6rGf9Xl/KczCL+qg1M2E771aIaKxz/2hU'}, {'ip': '55.90.183.12', 'age': 12865, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZolKMEIKA0inwhfFAEzCvujGAWv/zL0xyHmRtAmAcBJaNAt3lVC3h/0dxQ8xISQRZ6lQC/U5BExVmuD'}, {'ip': '65.23.194.252', 'age': 12866, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAA77f4TOr0dNCaa7Frgz6+2+j90vWUGlh1/acQTVAVv2w1u4F2AyF3TShi+GKvmqdkse3mc9oEE5xOb9'}], 'preferences': ['W4qpUnNdD/BzHVUQaMYobC6qoOM=', 'hsyXTV1JCQcUTaE8/9fHwj3CHxE=', 'EkcjmG1slwrmVfuL8a3djXAY9ZQ=', 'IuLoJCL1lbWMvKhv1ETic5JQqIE=', 'dfmH9rQhe2WSnQC9c1QuSbWb3BM=', 'RoCesXKpSdoDSl9vtc4C8E3a/oE=', 'T8KB8FFlghC0Hl53/cUXca90g0E=', 'ThHB5Nfw0hOIf7L2eCw9rx2v4hU=', 'OLWnY8NTt4+AzWrmt5IL0miHRnY=', 'Z4l0NcdG5dkSz9Nbh4/trywdjYw=', 'RvRe375kyBBrYnym1mSiDX1LTqI=', 'znpGpP3ddR1daijZmPE3ds9u9BA=', 'Ad/p5EI0H26oPj1jDug/GCWdorM=', 'f6lKk/1G6zcHi/o7dnLF1Z5pdrg=', 'SeVPM3qAigccKWkcD/uNGKM52jU=', 'HcqP0P1pugxcU4I58GqhcbT5Yr4=', 'ZrtzioJKFp1EN2ZuUP+PP+6mra8=', 'l2Z+e5JNfSccLeOHLXuh+ueWUJ8=', '+UaUdDmHz6POL+eBoJI0fvX2pYQ=', 'B6aUBqNq0Ck17fMjWfbfvWaPeow=', '/FTCGjEIV7p5YJf1lnItyl+H/Ds=']} 
+1188691274.6 SEND_MSG 8.253.154.31 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVlAG71pFCXmivk2xV8jdD7wd3uSwU0QTMFcROoPANm+W5r4cn0A2rGjgCVVT9/uxwSF9+OQDHVXwZah 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '221.61.186.192', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '171.60.22.210', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '209.104.45.248', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '137.0.163.147', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh'}, {'ip': '12.80.161.200', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '168.147.177.235', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '67.225.80.72', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '88.203.31.216', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '112.113.106.116', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691276.7 CONN_DEL 117.17.67.230 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAShzgCs5h03ec7WM1BA0tH8nELzAUFG+wQAI5CTpAF5cSf/hITapQ3aF2mHOBFpPAoqPos4tb186N0gh overlayswarm:connection lost 
+1188691278.2 CONN_DEL 181.148.58.103 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGCf0zUzTjymv03B/r7s4Is7JJ3YIRngjwgPKh7QAfrjYy4rRLdb0CN5P84rmc7thJnzB0OMYLtDCJga overlayswarm:connection lost 
+1188691282.2 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeg/a0WEB2kJbeGjeRfA2fquxFNFYsgOI0Xb+nmpAVIWltgFuSPnF2Q1XjHwBBAFEPvBHzuO6mvTEpZs 6 2 {'q': 'SIMPLE tits ', 'id': ';k_\xec\xe9(-\xfcgJ\xd0\xb4\xf3L\x07\xb5\xa6\xca\xc7\xc6'} 
+1188691282.2 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeg/a0WEB2kJbeGjeRfA2fquxFNFYsgOI0Xb+nmpAVIWltgFuSPnF2Q1XjHwBBAFEPvBHzuO6mvTEpZs 0 'd1:ade2:id20:;k_\xec\xe9(-\xfcgJ\xd0\xb4\xf3L\x07\xb5\xa6\xca\xc7\xc6e' 
+1188691285.8 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGyvWGrcfZtlQ+bfsmDmC06nVQ1lEDrJRZNCeVthAcDv9TEQVjzyviPu+E1AO9Y0l/gMijPGrlUWN7xt 6 5 {'q': 'SIMPLE chuck & larry ', 'id': '\x86h\xde\x08p\xef\x81\xa6\x0f\xa8\xa3T\xad\xbb0\xf2\x00\xdc\xfa\x1c'} 
+1188691285.8 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGyvWGrcfZtlQ+bfsmDmC06nVQ1lEDrJRZNCeVthAcDv9TEQVjzyviPu+E1AO9Y0l/gMijPGrlUWN7xt 0 'd1:ade2:id20:\x86h\xde\x08p\xef\x81\xa6\x0f\xa8\xa3T\xad\xbb0\xf2\x00\xdc\xfa\x1ce' 
+1188691286.8 CONN_DEL 248.66.223.72 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g overlayswarm:connection lost 
+1188691289.9 CONN_DEL 162.129.217.174 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJyAhdMR5uLVIe+E8tfuH6aGWXzfOEBRB4bezr8ZAaIFE/cGgCg26Xr/2vrnHR9UV9jAQa7XvZyHykj1 overlayswarm:connection lost 
+1188691290.0 CONN_DEL 206.176.65.89 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALw82JH7svl40hjmsEBBtHrcqIP1iFljNgECVpFOAREJ0UA2M2YE8dUUbYc8ROZTBeDOjKWRgbDOoD6r overlayswarm:connection lost 
+1188691290.3 CONN_ADD 245.92.116.96 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY3/MbfvgHreul0QBqRJ+pdwX2jgrZNiCWviUVvCAY6x4oLiwLCoEwlCYGPIYtj6mm1qgFdWNKj4mXuL 6 
+1188691292.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 6 1 {'q': 'SIMPLE girl toe suck ', 'id': '\xa5\x82*\xc1N5n>\xd7\xb0$a\xa2\xfd\xc6\xdck\xa3\x06\xb9'} 
+1188691292.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 0 'd1:ade2:id20:\xa5\x82*\xc1N5n>\xd7\xb0$a\xa2\xfd\xc6\xdck\xa3\x06\xb9e' 
+1188691304.6 CONN_ADD 81.123.41.246 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQPXhYUDgXymqNjrP8ArGRuSCbf6DpEc0Pupcdv7AbEwJ7nyst2e+ygo1dbe09sO6hRgBA+E7hlAHGD8 6 
+1188691304.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQPXhYUDgXymqNjrP8ArGRuSCbf6DpEc0Pupcdv7AbEwJ7nyst2e+ygo1dbe09sO6hRgBA+E7hlAHGD8 6 0 {'q': 'SIMPLE phil ochs ', 'id': '\xe0<\xf8\xaehy\xdc\xcej~E\x02K\xf6\xa2\x14!{z='} 
+1188691304.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQPXhYUDgXymqNjrP8ArGRuSCbf6DpEc0Pupcdv7AbEwJ7nyst2e+ygo1dbe09sO6hRgBA+E7hlAHGD8 0 'd1:ade2:id20:\xe0<\xf8\xaehy\xdc\xcej~E\x02K\xf6\xa2\x14!{z=e' 
+1188691305.0 CONN_ADD 26.100.116.37 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 6 
+1188691305.4 RECV_MSG 146.54.217.199 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 16, 'random peers': [{'ip': '26.160.139.242', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU', 'port': 7008, 'similarity': 0}, {'ip': '186.30.88.214', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAT8svDz56hztH53BxA4TBYf5eLHclSMsZveKgP4VAHK0376GQ8FfpsoTsm11gQlQhNaXFevTKjH31wez', 'port': 7764, 'similarity': 0}, {'ip': '142.226.6.183', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdKIkz837tiibf+MXLiRzwkmJdPnMoM9upyFQbRdAA0XFRQjVIdIibVn1irCeBs+zZOy4fjUIWlhIHMF', 'port': 7025, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'javier-gomezs-computer.local'} 
+1188691305.9 SEND_MSG 175.195.18.184 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2014, 'random peers': [{'ip': '230.49.118.31', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '245.60.241.179', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '21.94.111.56', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '243.76.54.150', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '102.220.197.223', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '51.189.129.19', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '101.14.199.53', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '176.196.92.29', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '218.37.94.107', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAT8svDz56hztH53BxA4TBYf5eLHclSMsZveKgP4VAHK0376GQ8FfpsoTsm11gQlQhNaXFevTKjH31wez', 'port': 7764, 'similarity': 0}, {'ip': '75.139.2.50', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUgVROCcROdQcTNkoo3f3pyGIxn7itdPw4RTCfDGASXNm+3rT5Qp3k5OaDipi3JJJARjHpzzsT7d1De9', 'port': 7771, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691308.8 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 2 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '6\xa3q\x12\x18N\xed_\x88\xf7\x03&\xc179\xac$\xb5\x01]'} 
+1188691308.8 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:6\xa3q\x12\x18N\xed_\x88\xf7\x03&\xc179\xac$\xb5\x01]e' 
+1188691313.5 CONN_ADD 174.72.225.47 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 
+1188691314.3 CONN_DEL 233.168.27.72 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUQnkLDxEkifJN+4qEXfziuvlu9qgA4IDHPHD6uzAe8FRaMgea7FrlMJdiPO0Hn3eqG2PBxypjZ6dXmY overlayswarm:connection lost 
+1188691314.7 CONN_ADD 43.7.237.101 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcgn9eYmkjIExKMJH/N/XG5NO4rZSgmNgiFqXEUzAYS+rxdD/9giKZciG1aylLzQ8+lJe0bp1Gp+HF0z 2 
+1188691314.7 RECV_MSG 138.146.39.210 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcgn9eYmkjIExKMJH/N/XG5NO4rZSgmNgiFqXEUzAYS+rxdD/9giKZciG1aylLzQ8+lJe0bp1Gp+HF0z 2 PASSIVE_BC {'name': 'Li', 'taste buddies': [], 'random peers': [{'ip': 'superpeer8.das2.ewi.tudelft.nl', 'age': 107, 'port': 7008, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU'}, {'ip': '188.240.174.68', 'age': 2638, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWd1tX6VQQseDZvpbyDDwa7U09f2gt9dIsre3r6DAL7JUDk1FVm4pAIjZuN25GUzqvMkZ1SNDg1gvcwD'}, {'ip': '4.227.92.3', 'age': 853, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACRNSJuK3dvGtJTUxKl8qyr8dIXVYyW7UahX0odFACJ43vMDM46Yl3h2/JO4yty4tjIO/an37dhKkkTd'}, {'ip': '239.84.201.39', 'age': 519, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUXrUVy9MRSj5SR77jnyOxpN0wdleKLQ44465G64Af8AD7N0Nizs6RZPh4vvw3oP+jHSXlSLFu0rTafB'}, {'ip': '12.157.43.132', 'age': 1268, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaJlEpntmrwMQ3zhL+NmXf7qhHekQod0VdakZ/Y0AX8EbasJ0hxI3+2cOoiU0KrzY3MT0fZ+eJuJ9iXX'}, {'ip': '105.153.114.112', 'age': 3428, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVuZ+6Pv8W5zGvSPPrLWQJm6I/eHrLIdqW5IqwlVAJjhtd/eBrUarllsCvRpdn5a9mtgOB7WL7ylfGlt'}, {'ip': 'superpeer4.das2.ewi.tudelft.nl', 'age': 1905, 'port': 7004, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA'}, {'ip': '41.23.36.83', 'age': 599, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFTI4VImgg2T8SXnFqTppEbIx9qdnY9J1QbPhr3+AYROo7pkoFrTiEf4MpbYDIkVB+7Yy9gD93O9SqcN'}, {'ip': '170.182.228.69', 'age': 1846, 'port': 7774, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAIvpoqeLPjcHE14vb52IlsCsL/D/F2ONqvxiMhQAD5wcfYy6iuf1jHPiOY+yhFPn5LSbaX5DFoMlw4B'}, {'ip': '232.7.5.153', 'age': 1719, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGAl/ijlpDPTjUt+iWbMMrYczJYjylgaAh+DvAdEAbHWeT8Ns5YRWoFJliiKwzm6CVWkrKXPG1amrhER'}], 'preferences': ['naC5C+RDeLSLidvBz1mDt/QAKzI=', 'hVFK0hjqGPz3yEkvRfgWYzPtbBs=']} 
+1188691315.1 SEND_MSG 199.87.82.161 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcgn9eYmkjIExKMJH/N/XG5NO4rZSgmNgiFqXEUzAYS+rxdD/9giKZciG1aylLzQ8+lJe0bp1Gp+HF0z 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '165.228.186.247', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '20.215.72.110', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '95.95.120.253', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '83.197.186.198', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '33.75.72.212', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '13.176.223.232', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '173.197.120.205', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '114.222.193.137', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691315.1 CONN_ADD 92.16.67.198 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc0PZJeUaj8RStpsLTyJ/0szs28v1FnzJS62A9+TATpPTpnolUtgC4nimVcoDylFcAex6FljTsNeB0fc 6 
+1188691315.1 RECV_MSG 228.50.14.39 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc0PZJeUaj8RStpsLTyJ/0szs28v1FnzJS62A9+TATpPTpnolUtgC4nimVcoDylFcAex6FljTsNeB0fc 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 304, 'random peers': [{'similarity': 0, 'ip': '253.250.79.140', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdKIkz837tiibf+MXLiRzwkmJdPnMoM9upyFQbRdAA0XFRQjVIdIibVn1irCeBs+zZOy4fjUIWlhIHMF', 'oversion': 6, 'nfiles': 23231, 'port': 7025}, {'similarity': 0, 'ip': '233.209.92.248', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXei2QVn8uYAQfC5NewLOjlcQugVbN7ybggJpXFhAY+/TwE+jRVSQot7fPgBcBU1fDkWaBr6sYTyWPQm', 'oversion': 6, 'nfiles': 1114, 'port': 7767}], 'collected torrents': [], 'nfiles': 26, 'name': 'jeffs-computer.local'} 
+1188691315.1 SEND_MSG 98.220.118.198 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc0PZJeUaj8RStpsLTyJ/0szs28v1FnzJS62A9+TATpPTpnolUtgC4nimVcoDylFcAex6FljTsNeB0fc 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2023, 'random peers': [{'ip': '192.74.89.60', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '118.253.60.156', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '171.1.118.80', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '33.177.170.104', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '44.141.125.82', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '134.251.109.159', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '120.85.213.242', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '255.189.81.181', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '164.197.1.119', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'port': 7764, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/O5b+hTW3Nrf2o1JAIaKomB7Z94R802h3MG2m9Ae7QXzTmoey3oodDZXjwaK39hu1wJcyH3sLWc5EK', 'ip': '215.218.219.77', 'similarity': 0, 'port': 7774, 'nfiles': 3891, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691315.9 CONN_DEL 25.44.227.108 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAH6AJWkkAHOk3M+O/5VKIybqEuzCAFqFEvN9LH1TAWiV9ALPwhq1kBur+xNX0QFA5mqjK52TPz9V+xtQ overlayswarm:connection lost 
+1188691317.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 3 {'q': 'SIMPLE all ', 'id': 'e\x12\xb1,\xbb\xe1\x1b\x1e\xa1\xf6\x19\xd5x\x9d\x0f7I\xfcF\x9c'} 
+1188691317.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:e\x12\xb1,\xbb\xe1\x1b\x1e\xa1\xf6\x19\xd5x\x9d\x0f7I\xfcF\x9ce' 
+1188691318.1 CONN_ADD 247.225.60.174 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAV8MyqshTN8Wxy1v265LZ9kqGW6i7vSIxzHUXBKAURWEFlKZI1eyRzEPkp1Vz//gsHNf0e5kv0e6+nb 2 
+1188691318.3 RECV_MSG 43.49.161.113 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAV8MyqshTN8Wxy1v265LZ9kqGW6i7vSIxzHUXBKAURWEFlKZI1eyRzEPkp1Vz//gsHNf0e5kv0e6+nb 2 PASSIVE_BC {'name': 'mtw-665f2196474', 'taste buddies': [{'ip': '88.115.86.224', 'age': 151478, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZng0wRSBXiJVELlxwg31cVBvGKVWr8QN7G7D4biAOvDULjkdNCU7NlsUYKbW20R0vup589gBNp9QEtm', 'port': 7762, 'preferences': ['m+/1G/X2EWXzJ4ZrCqVmKp1jqvc=', 'A9Qk7HRqMlTvsLUy506qoPabb9Q=', 'hUImY+VDbeSScxaAUkL+xk2bgh4=', 'KiNfUtzNpRSX3GXtDSzuWx/hVUk=']}, {'ip': '10.104.70.198', 'age': 189890, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARxnxOD72EEdH8o/9U2b08S+gmIomMbhVK3MJzZdASp1cgFSK3t5+lS2JeEjz3k7+P0NC1nZkVzTQcu+', 'port': 6881, 'preferences': ['cyHxgOJTir8VFvYsiT/4wUS6hZo=', '0ajbF2jOjM3bvlalpvmLWxS8ohg=', 'bxnmC5MKwvzcAZHdNRyP8Hvenu4=', 'QJbNGwx1quC/vZHrQ0Wj/nizNrM=', 'FkMBKKd5EXmFoEEzJZb/489PAIs=', '9jBN8nbKkJa1bzmxc+jAxiqxUp0=', 'ba8T9D2mkiU87OLtq+H6JwfFz34=', 'TVcgfiQPti26RXXJHAtVT0k3uMM=']}, {'ip': '143.52.86.160', 'age': 146227, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXVpTBbpMAU21ULC2u7AaQbYdWXZ2lgaOdK+IKYIASEhmVSUXy3iTcQdjqA5VFCGtYyCeNQr7O6l4rlU', 'port': 6881, 'preferences': ['GOhln2xSeuIIZo3ms6TkgzkP+n8=', 'APMd9RcOJBe0YoNUY9W1g/pJwJ0=', 'V1GWQT5orezmY6J4xAvfrWKhj7M=', 'BdUvTIBA7kglPhsn4V0kGox7YBk=', 'mpJaFxdTTRUxOmmgafnOdoOpvYo=', 'P4OGCOjnD7fF3A90bz0G15OdLPw=', 'kTBm3lJHAVT/Rg/76Igmz9HKHw8=', 'oLdQjW4w+Fzo1U1dstZUcDeZ21w=']}, {'ip': '57.159.56.152', 'age': 190966, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAA+Rc4O6+N5y5pGWA9EJNTLm8BXfnRrRDrApfguNAFxA1DLlBnxcWxskiaKirjgKJiFps2MrafMB3xky', 'port': 6881, 'preferences': ['XK+etaXVjtJDU9Zb4ZUDAOhN5h0=', '6/nq/R3jE1Z6emesYnfXJAkqy7M=', 'xs6uAECCMIVsKuCguOQJTaW3IL0=', 'bX5nfPzYoTOTo6aPfURxdkJUxMA=', 'IJNfqaLhVj0mHL6V5GaDHC+kbPw=', 'oqEye3Up81IVVq1ybW6y7bZokOA=', 'XD7lBwwcclprdkbLhFLMF58Q52Q=', 't49MzQqmSJdxtNns60MWvX11R/A=', 'Bwzh6wXPc+JQ3mkptQUXglPZHgs=', '/tkrk3SHannJ71honJ7g2oXoRHs=']}], 'random peers': [{'ip': '183.187.0.217', 'age': 11881, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAd+j8G+sOKtli5yceyZ7k6hMfCyTPf3GywxckG95AWJTPe66G1tLCqMOQlTohvB36FClvT/0nE4n1xcO'}, {'ip': '117.251.211.195', 'age': 20519, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAS9pl16ZGJQZz5TpWxuFdt5YI3Bge6Xcqm1u/qLrAZ+sySMnfqlRPwB3XM+lUSZP87W+k5X6JHO1tQvg'}, {'ip': '28.22.93.81', 'age': 20010, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALne2lLjWiLrPnYEZT8BGIGKYPZAXrGfrlaH4TSAAL9RirQa0iEChySE+u6/hkF2rMLEfjOB5/F+qCtG'}, {'ip': '212.229.248.254', 'age': 15435, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUnLBbuHtMzB7IxmWjPuU/UHkfyaR+eAQ41e0cAhAP5mi+dasU4Y4eyht2IUqDsnaDQNVrTEhsWdHYw4'}, {'ip': '64.79.120.146', 'age': 3906, 'port': 7769, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUc7AZzwy0XxaVnLSzFx14ov8FLVAvkDHpTLSwr+AYFAQap+unWFsNb4JL4+Myvwp1CA9gbwBrThTBOW'}, {'ip': '158.137.191.19', 'age': 1284, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADMiQw21JSPLtUpCIcjcBGGPGmGqSs8I5g5U93n4AcTe3+cqcVaJbXUGOxR/tuRf4gKQHGDdWkViYKrS'}, {'ip': '118.176.236.37', 'age': 2249, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU+GOg2/HI9I37uHrLU2gKFAoIrlBegbKrCE9PBRAZqoK9kuBE10TZWTT0qZA7gafwx4q9br8eWzxDnw'}, {'ip': '49.49.25.160', 'age': 218, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYdVjn42OFc+3Qkf0eO5V8AcgOsXjVGnkcpnmDZ6AecU+RXWy8TsQu900kgU3ldIC4HQyppSqpxO/Ikw'}, {'ip': '228.132.103.147', 'age': 8084, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVXdEQr8+vLoVJW4uQij3UX9IVFB6f2Nx71RkW4PAHxDzqwG6PQYbW3+CQuE9cRQ1/pW7ZHZklckxF8B'}, {'ip': '139.242.217.39', 'age': 9747, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQUxxBWZYEtonH1SsHFZfKhX1WFDGtP5OmB0VgiUAL1F0JiZwMmTpdMsPLr/mD3WE/5Mo+hX8oShi/Sz'}], 'preferences': ['5HSdFPF9yITlunJE5WNkzwpEj6k=', 'LCX1VAKbKsv4hDofVdLhbFDgbp8=', 'U7DRcgkTp6w0u19sRIKXlyPOAcM=', '/QEyiaP2GLMtR3N2pTP3QPZRuF0=', 'P+NUuBPJ6fzAG5NABx5WRm14t80=', 'f47Pk0INoYOr4iVzyLBE2IF+e4E=', 'qLwHyJE9XD1wv+oc7LlDKJ4Qw/Y=', 'WJLe2Liq36XBdlD9iapzqikZQRU=', 'ppmD1k9QOqtcJUvwdP5FFvYepl0=', 'uCtpB8ex6KU7UA+veAjuP2gH6gw=', 'Tb1aV/F8j/I1PM2v3G7UOaqeGfM=', 'FWbqH5vDeX76lSjEFecBNUlfS5Q=', 'gVjlYLpdYj0QBJxsMsmWGr4q6k8=', 'PfD2JoB7+GjjHn+uD1AnD3YKUf0=', 'xQTL6b1nW/myW4vWVyo4809evZA=', 'Ttd+2moLMbFVCb1b1NC+ibmpCGk=', 'FkMBKKd5EXmFoEEzJZb/489PAIs=', 'mMA1VRti1y26C4wdeN75Nm9tJW4=', 'Usw6fU4eofgwGIeQyrd0nd0AL4o=', 'JXu55M2hmIFx/zSGvOnby1WFxmU=', 'kTBm3lJHAVT/Rg/76Igmz9HKHw8=', 'O5MuRyqthtS8hcSIHQmLvqBEo6g=', 'hUImY+VDbeSScxaAUkL+xk2bgh4=', 'vklEjI1B0eFVbTVu7fCW0HLwrOY=']} 
+1188691318.8 SEND_MSG 214.182.96.203 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAV8MyqshTN8Wxy1v265LZ9kqGW6i7vSIxzHUXBKAURWEFlKZI1eyRzEPkp1Vz//gsHNf0e5kv0e6+nb 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '204.206.30.144', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '109.168.98.62', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '144.71.40.12', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '32.80.69.234', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '98.170.153.203', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '194.69.133.110', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '140.94.189.136', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '48.171.228.46', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691320.7 CONN_ADD 52.199.205.113 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3MP63sdnQS5dhaeAuLWJ9fJqHGcjQ4jtXIT6y9AKrOZbIhpb5t0OzpanvSZ8LU51XDS/03PNSHXzls 6 
+1188691321.0 RECV_MSG 134.67.119.192 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3MP63sdnQS5dhaeAuLWJ9fJqHGcjQ4jtXIT6y9AKrOZbIhpb5t0OzpanvSZ8LU51XDS/03PNSHXzls 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 57, 'random peers': [{'ip': '176.34.49.130', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '245.174.15.88', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '20.199.33.166', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAH1Oiw/3ghpN4oQ9URcPJMqDKBAIttCPZfM4ITEDAJmYlwc+meh6EhVWobTRsu0fkNeeb5z/jrIMrWlA', 'port': 7774, 'similarity': 0}, {'similarity': 0, 'ip': '103.91.214.252', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'oversion': 6, 'nfiles': 234, 'port': 7767}, {'ip': '251.227.33.44', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'similarity': 0, 'ip': '159.244.195.109', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'oversion': 6, 'nfiles': 265, 'port': 7766}, {'ip': '216.224.143.19', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '97.85.221.87', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR', 'port': 9101, 'similarity': 0}, {'similarity': 0, 'ip': '204.203.12.61', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'oversion': 6, 'nfiles': 234, 'port': 7767}], 'collected torrents': ['\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z'], 'nfiles': 1, 'name': 'NBL-TomRodgers.local'} 
+1188691321.3 SEND_MSG 183.201.1.20 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3MP63sdnQS5dhaeAuLWJ9fJqHGcjQ4jtXIT6y9AKrOZbIhpb5t0OzpanvSZ8LU51XDS/03PNSHXzls 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2038, 'random peers': [{'ip': '8.97.37.11', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '198.235.212.54', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '84.168.240.145', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '202.9.193.252', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '164.174.134.23', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '94.92.51.210', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '18.146.62.147', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '243.151.85.211', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '6.24.59.246', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUgVROCcROdQcTNkoo3f3pyGIxn7itdPw4RTCfDGASXNm+3rT5Qp3k5OaDipi3JJJARjHpzzsT7d1De9', 'port': 7771, 'similarity': 0}, {'ip': '153.59.172.244', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'port': 7767, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691324.7 CONN_ADD 230.254.29.219 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARmTFzBvLQidmznT+pHspdnI3KNMeVa/hSKwver8AbZHnQAXyGU67GTXyXucorkkYmJ8/1kNLbopK8qE 2 
+1188691324.7 RECV_MSG 176.209.18.48 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARmTFzBvLQidmznT+pHspdnI3KNMeVa/hSKwver8AbZHnQAXyGU67GTXyXucorkkYmJ8/1kNLbopK8qE 2 PASSIVE_BC {'name': 'stef', 'taste buddies': [], 'random peers': [{'ip': '98.89.75.134', 'age': 12343, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVT0mEDqBzh86j24U3HXFadjMfxhxRW0LnChttv+AcHviSvSzKLqT450d/Tfpsg4yur+meB0x0yTeNzh'}, {'ip': '211.230.150.56', 'age': 17175, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSeC1vA8s2SRbOJea9DZfAAZFTEfg7hM9h8DUU/ARwXtZ+w7RulnWL0zWhcBU5qxS+d5lyJhmE6edQx'}, {'ip': '77.48.60.94', 'age': 1732, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAJ1FB3Mzb/ZpdA1kmu1HUp9fW5SbH0k0+/9uT+7Aa0BjGlOd+EQchLxqQZD7EHBfQ239nzv3GvjKD7/'}, {'ip': '75.180.118.191', 'age': 2110, 'port': 22, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL/l2IyVa6lc3KAqQyEnR++rIzi+AamnbzXHCxOFAFy67COiBhrC79PLzzUiURbHDx21QA4p8w3UDHLA'}, {'ip': '39.15.103.102', 'age': 16030, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAuO2DAtnDK0EF5JFYMM4OHEMGABzKkpeUhvWiEqAFeha/4kS8KdRId6M+s1+JoaTMMQp6LkwkA69z2C'}, {'ip': '52.196.76.197', 'age': 2584, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALyWMtpH/C5PSaS810leB0MCxnuLXWY1UKdavmgZAam1nw6a98mQwYRpsS9PMY/8s1cG6UxevpdHDYye'}, {'ip': '196.140.102.181', 'age': 11467, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVRE+wEJwO9T/a0PqlhUamePH8MRwud6DYLBhUmZAK5KDxmoMeOl4w1R8lGDWaA2WyHZ8mzk5cvS0x58'}, {'ip': '167.109.176.17', 'age': 1539, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAjKyPDatdAlYUYGh67f9s773gOUNBqShhg/qXwIASRIAvrDgWQhNIKCJV1DwMiPr9LVRU4GCctIY/O6'}, {'ip': '61.10.212.146', 'age': 12291, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbtf77j4oPM5N78zhaHV6hkBcKy7tPpP7VbrsqFVAWyAxQfc5Rui9ESVjPTnEnPyoxtv06/mimi3nGNH'}, {'ip': '144.158.88.238', 'age': 8392, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKn9WmNHN3WH327KMFjDoJ0EswyknKe5cj27cCNgAKhJcNM621Ul6Up9ZOK+FdnzSB/TZdCmGPIzyAsO'}], 'preferences': ['QeQHa0G3jnC3N38fqEbzjno+GoI=', '+qrt2hNpPCLTGidxKCh70akz/bc=', 'BNBeixOkzP1bhj/J1HrazmKUsRs=', 'MvPgG/IJfsOS9hB6NfEtDC0eUXs=', 'Fwz9srdWexigM4oQJ4Iz7VkVQNk=', 'FMJkdS3y9XXIXnx5qsbU5WTjvaw=', 'ACZmZPZb7NVyLVrwSrscHJpIaig=', 'tU9+gCr8/IFwjFSbCj2nhAvkaRU=', 'i/K3gB9tMpoW0Sb1LSNz4J75BE8=', 'Yc+GZpmZJuh8LzyF/5Ka053DmBw=', 'C9YfDWfHzvEckv4v+Bbn4Q4nIcY=', 'ftSGkG6cC9FmWKgeodmvYWVJ38E=']} 
+1188691324.7 SEND_MSG 230.119.191.49 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARmTFzBvLQidmznT+pHspdnI3KNMeVa/hSKwver8AbZHnQAXyGU67GTXyXucorkkYmJ8/1kNLbopK8qE 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '253.235.150.15', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '47.33.188.62', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '109.179.72.67', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj'}, {'ip': '33.174.177.67', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '77.165.251.131', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '24.77.38.197', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '32.146.199.3', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '4.236.62.137', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691327.1 CONN_ADD 133.181.80.61 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 
+1188691327.1 RECV_MSG 138.120.23.167 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 PASSIVE_BC {'preferences': ['EUd/jh35cFbvYpsCmARrv3fSgwU='], 'ndls': 1, 'taste buddies': [{'similarity': 337, 'ip': '56.147.32.228', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb', 'oversion': 6, 'nfiles': 4860, 'port': 7763}, {'ip': '241.119.154.243', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfeoB4DB/uKLFY2AnGDZefiUGz7EZ+uHcWnYoLXTASKzTGnbOmuN2DAtgL+TXA04Wm1eM/qB2tVzvWNn', 'port': 6881, 'similarity': 150}, {'ip': '252.192.163.238', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcI6HGEIFmyi8BGz8cRIkCg7ChuIcHEFVIqPjqukAbVx3uNTzv334nVb0+WQaQfd9q5M43nbpFpDdY7q', 'port': 7762, 'similarity': 37}, {'ip': '27.187.90.0', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaUKsFKOjMnAPIvcy5IvKz5CPAWafoVQj5y+5wtNAf/ogHl+YltJm9NDnH750ZmeQcdSR2ZUh+RdUKEb', 'port': 7762, 'similarity': 37}, {'similarity': 37, 'ip': '20.159.14.167', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXt/FFv6i5QC50+T1PLK0zeDHeNUK05S+19uRgFQADf8sTHAU0Rjl4q/HBbE7Vg0anKBUfMhGu4kN+Dh', 'oversion': 6, 'nfiles': 489, 'port': 7773}], 'connectable': 1, 'npeers': 473, 'random peers': [{'ip': '15.232.184.210', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALtinCNB77hftcNL1nLgh8FoZU0yQNu8hmdNZ4WXAYFj/iep5b485SL6E8hXKWRWqZtl+PeifI4akELg', 'port': 7764, 'similarity': 0}, {'ip': '212.52.82.184', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'similarity': 37, 'ip': '151.176.4.84', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXt/FFv6i5QC50+T1PLK0zeDHeNUK05S+19uRgFQADf8sTHAU0Rjl4q/HBbE7Vg0anKBUfMhGu4kN+Dh', 'oversion': 6, 'nfiles': 489, 'port': 7773}, {'ip': '67.109.22.116', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAOpAz6zAcfwxexw9v12R9yfvzPrCYS1poX1qAK7ATC9+uUdd+8IPldRBhErlDxibSNqfNGHYAJdAzLS', 'port': 7764, 'similarity': 0}, {'ip': '41.153.224.235', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'similarity': 0, 'ip': '118.222.12.34', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPBCcTKTqzwxPjpQds/YgjvZNwbiDX8E038Xk3haAQ1NMLFN9oKUfQfGZVNfL6o2Bi0mild9EW3zl3wy', 'oversion': 6, 'nfiles': 104, 'port': 7764}, {'ip': '0.236.249.75', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '1.187.5.67', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'ip': '89.223.251.124', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu', 'port': 7005, 'similarity': 0}], 'collected torrents': ['\x97\xa8\\\x9d\x07\x07\xc7b\xb4\x16$\xe4\xc3\x87\x97n\x93\xcc\x14\xec'], 'nfiles': 48, 'name': 'mobin'} 
+1188691327.4 SEND_MSG 152.61.153.90 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2051, 'random peers': [{'ip': '201.245.50.35', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '215.169.111.248', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXt/FFv6i5QC50+T1PLK0zeDHeNUK05S+19uRgFQADf8sTHAU0Rjl4q/HBbE7Vg0anKBUfMhGu4kN+Dh', 'port': 7773, 'similarity': 0}, {'ip': '114.245.109.174', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '66.4.192.72', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '206.82.19.101', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj', 'port': 7772, 'similarity': 0}, {'ip': '154.28.2.250', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '185.9.239.66', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '108.35.46.159', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '241.183.18.137', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691330.3 CONN_DEL 84.151.77.60 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaT0ru2Je+OdrlLCBO0/naU+Nf7OgyoPKQ335ZwyADNQGJ0334T461ftmsU2YmGMhknjVn4n7Wctef63 overlayswarm:connection lost 
+1188691331.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 0 {'q': 'SIMPLE monica sweetheat ', 'id': '\xe4x\xd7L~sC\x8e\x8d~\x14\xcf\xed\xbdx\xcaS\x02\x9c\xc7'} 
+1188691331.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 0 'd1:ade2:id20:\xe4x\xd7L~sC\x8e\x8d~\x14\xcf\xed\xbdx\xcaS\x02\x9c\xc7e' 
+1188691331.5 BUCA_STA 9504  2051 2051 100  1579 12091  62 62  0 9 9 
+1188691331.5 CONN_DEL 10.36.7.43 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADFmBY5bwdEa6YiHzJijwi3mSnGYrdiI8zKcBOnrAeVujdv/+p4EG9MXnow2OEv5JvjnxSQeQxp7tmDp overlayswarm:connection lost 
+1188691331.5 CONN_DEL 84.76.177.114 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjUXU3Lg1jt8LvQBhrNOXPAJDpWKEVStXWXWmHNAPX8ta32pkPysRlaNMsCEZ3F5NFiOIrpq5qrk1aj overlayswarm:connection lost 
+1188691331.5 CONN_DEL 84.213.47.129 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM0Vuwniauo+WqVVzML2DRYuS30Sc9ytQLnUt4z7AB4a43XmiXhMPQFl4jJDlMoapvSyyG9bYtmVMLAW overlayswarm:connection lost 
+1188691331.5 CONN_DEL 246.85.214.16 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGdzvTcViM+jP5c2X17Kb6llkEqeMUTzxhPfUJAZANSGI8r/3gdkdg8mXGgcZ5CNhHIyzDHR8aO78a4z overlayswarm:connection lost 
+1188691331.5 CONN_DEL 114.113.37.174 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAevG/uOdqyTFrAIOv5Ep1J/JNWReIn8Zubw6d3cAAOqCsBfjEmIksQ+xOx4izLJYuzwOn9GOKZYEeodu overlayswarm:connection lost 
+1188691331.5 CONN_DEL 168.69.19.163 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAgzNGNl1y27nZEMSHcuPAierDRBbLdlYH8S2NZvASpLBso2JS85Nf0/XTxWvgYDSrhhdvKx8AlZ/kJR overlayswarm:connection lost 
+1188691332.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 6 0 {'q': 'SIMPLE usagi ', 'id': "ix\x9c\xf7\xad'\xcf\xbd\xfd\x08:\xec\x88\x84\xcf\x86\xf9H\xe3\xbe"} 
+1188691332.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 0 "d1:ade2:id20:ix\x9c\xf7\xad'\xcf\xbd\xfd\x08:\xec\x88\x84\xcf\x86\xf9H\xe3\xbee" 
+1188691333.1 CONN_ADD 19.61.175.65 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFHX07bWUyt7ZmuOZ/FtmkxQ+JqxstkI6hXUSR66AYqrEBe2ydAyKiKHPeZua5+DdLUkCXiip9DyQaZy 2 
+1188691335.1 CONN_ADD 14.22.247.72 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACmQbR3WQrGc3EF6kbVZa1gRKelPtT2eexDEUfVwAZgn2sXwZXxhoEYheYQMX3TimnaYLzmBfxpDaHAx 2 
+1188691335.1 RECV_MSG 116.125.184.219 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACmQbR3WQrGc3EF6kbVZa1gRKelPtT2eexDEUfVwAZgn2sXwZXxhoEYheYQMX3TimnaYLzmBfxpDaHAx 2 PASSIVE_BC {'name': 'karen', 'taste buddies': [], 'random peers': [{'ip': '121.104.55.177', 'age': 16777, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdvVgDtV00ADTCDjj++y0GXN2EFRfkMKv3EjUSAMARS/tX9lRd+C+HMjKIPG0oRYN8rnnNTSQk+FR67G'}, {'ip': '13.243.158.14', 'age': 3317, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQuLEqG6jaAywvjhE6pK5mFO9i9itsWv+SM4C/MpAHu0xMG/0dcDmfsdDOm6BHyy67mSwW75M0hdvgg6'}, {'ip': '166.243.199.255', 'age': 13279, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJShWTJ80hXWhl4tOBlSBAOjDhygUEuSFZp23k/+ARK6zBlWHjl0iCmmliUnZ6ZYrJNypMSVepqcq8/N'}, {'ip': '175.248.113.43', 'age': 7854, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAT3TB9z8OE4dqngcecb7ofVVeGTexOxiiN0fjL/QAJIKWa6EVpPh46TgLh1MWFeaWOX0+sVlc/IWoLli'}, {'ip': '141.112.49.12', 'age': 2170, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADY6NJhG3wWJ3Q67IvpBzxXndAGSLYub7+ErC6JqAWUA1z/3YnGgKzdgB2e2HW88i6keKUW0yqHzc5jU'}, {'ip': '247.136.237.14', 'age': 801, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY914JsAiPLGEeJDvoIFo4/bEAbP4IIfjLyraUghAPUlsLXzURCkTG+fnFr4evVwECuICUu6JTev0CNi'}, {'ip': '177.163.177.41', 'age': 10362, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSeC1vA8s2SRbOJea9DZfAAZFTEfg7hM9h8DUU/ARwXtZ+w7RulnWL0zWhcBU5qxS+d5lyJhmE6edQx'}, {'ip': '58.236.34.35', 'age': 12729, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG844xlfmISp1w9tS5WzvDiWFUX97ML06lrQIS/CANqC6If8SzrSIVp7jR9iUB3qzakNAL1ZB4lEs6ut'}, {'ip': '91.41.134.107', 'age': 492, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAExoVJwbESvuqKgiaG0ruR+73nxsSRInIhaV70VNALfxrM5yC36ypjbzWlXAkCrtZbQrHabFco5dvmzV'}, {'ip': '214.107.43.44', 'age': 13327, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQxWJhHONF4FGYmU+zaCRNm7RGogezBHlT57FChXAKl7g1Z/+HF0A4m0PiM32+bGCxj/Qnbj4Bc4LWrs'}], 'preferences': ['AbvS09DFpAcpFstyD0prJT/jYTc=', 'b2yMhEdXZI/JOXpUTGCkbtjCkkg=']} 
+1188691335.1 SEND_MSG 174.143.98.222 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACmQbR3WQrGc3EF6kbVZa1gRKelPtT2eexDEUfVwAZgn2sXwZXxhoEYheYQMX3TimnaYLzmBfxpDaHAx 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '182.234.20.53', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '54.184.207.190', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '21.33.18.177', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '133.70.151.4', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '138.60.1.10', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '93.234.87.237', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '218.115.70.43', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '68.157.49.167', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691339.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc0PZJeUaj8RStpsLTyJ/0szs28v1FnzJS62A9+TATpPTpnolUtgC4nimVcoDylFcAex6FljTsNeB0fc 6 0 {'q': 'SIMPLE pickle surprise ', 'id': '\xcf\xd8\x88S\x04\xf1\x91`S\xc4OW\xcb\x9d\xd3DB\xb0\xf8\xcf'} 
+1188691339.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc0PZJeUaj8RStpsLTyJ/0szs28v1FnzJS62A9+TATpPTpnolUtgC4nimVcoDylFcAex6FljTsNeB0fc 0 'd1:ade2:id20:\xcf\xd8\x88S\x04\xf1\x91`S\xc4OW\xcb\x9d\xd3DB\xb0\xf8\xcfe' 
+1188691339.9 CONN_DEL 163.128.46.150 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc overlayswarm:connection lost 
+1188691343.0 CONN_ADD 227.213.67.132 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAefabVLoM/fghtRi3VgTmXP7pFo/5NtunUVI/H+lADWKS5v3IEUyPeq7Erj+W7O2W/pd/3D7Dm/VuAZ9 6 
+1188691343.1 RECV_MSG 22.20.36.189 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAefabVLoM/fghtRi3VgTmXP7pFo/5NtunUVI/H+lADWKS5v3IEUyPeq7Erj+W7O2W/pd/3D7Dm/VuAZ9 6 PASSIVE_BC {'preferences': ['iC/LzNHCfQLr6yUdqfjfF2SwDn4='], 'ndls': 1, 'taste buddies': [{'ip': '9.216.69.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc2kgAglCiBl5jRJRbZhNRJlArOtDMU5+XGt0dT4AfelHvcR0H0gyPQnFOu8qfGt+gV1mvW9WUBfgp3S', 'port': 6881, 'similarity': 718}, {'ip': '54.226.45.114', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGhA7L6MXDjNCjdWpd4ghM+CNZ9NpIJy43G4rAiUASkXeHnSMkiWVibwNU0oi5jZSABC6K1mrb0xlUWQ', 'port': 51500, 'similarity': 680}, {'ip': '5.70.226.154', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWWyHj/ZBC1gA6D5PkLb3JQpO1ga4Ygqs4WtkoE7ACHaHcR9K6IZfBC6YyVnzmc7WZsNdBDrLD/oKYR9', 'port': 6881, 'similarity': 428}, {'ip': '159.141.90.192', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfTOXIIKGSuG7I7hUciVO0bjcdT4pehXjOGD2LjvAY/C8lJnnKZOKoPTNHuBALJWcumcTDa6ris/HsoL', 'port': 6881, 'similarity': 416}, {'ip': '243.220.59.160', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARVi6wD2BJ9WAjoIJeAb8TcLki+ke2rpKjBETZooAaG9c3+JCVam+E2FqQPGZi68FICqouwtoLjizebU', 'port': 6881, 'similarity': 198}, {'ip': '102.250.209.223', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPSgs5V9WvDQm6Me7mp6pc8gic86A5paujS405RVAI5n6DQPU9YyK557KQMdeox0va3TyxWGDTm40zun', 'port': 6881, 'similarity': 198}, {'ip': '63.137.95.56', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfBO5z5U7ECMie7g3AaYx+W6HiEBaATYS8Gpe0XfASNs8jjNKqkCv/AnJM3VWMPf6iLEcj6yr9pfuaTo', 'port': 7762, 'similarity': 164}, {'similarity': 151, 'ip': '168.221.69.44', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'oversion': 6, 'nfiles': 2228, 'port': 54545}, {'ip': '28.55.230.58', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATDs8UZlCBqn4knPjBxe0qIO8EyS7KbEmzO0CReCAQI+T23mc7XwwjxtrmxrGfhn7AV1xo0PD3u3ANsS', 'port': 7762, 'similarity': 82}, {'ip': '118.68.114.165', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAD9/hVcDG8V9AGRwu2yEsTfkWxG8sp3kisfV6yS8AEbld2+sKIzl6ZNqDPZ7y7Ahs1g6yzfen9TTZFpR', 'port': 6881, 'similarity': 82}], 'connectable': 0, 'npeers': 902, 'random peers': [{'ip': '239.72.150.190', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdAdxnRlYSo0sU5QQq5RxtkwzADJWrJjra0eCif+AW874y5yrzBChiGQOmZHqV15a36Xo1UAibMd3oUC', 'port': 7762, 'similarity': 0}, {'similarity': 25, 'ip': '150.187.130.118', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX', 'oversion': 6, 'nfiles': 344, 'port': 7764}, {'ip': '86.188.100.226', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI5Alp9TzQyg5UX0zjtM+oMqgnQKnwH/THVriv2sAHXxKEECt4tOlv+qxiHZF1Y5yAu+92OFEmGR5K4u', 'port': 6881, 'similarity': 0}, {'ip': '229.3.146.252', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADEJAxP6+C2k83YF4gdXgi3AuRjtBHZCJWRhFdViAapaFEl3o3+Soc2Akyc/8aSMbX9gaXUQbBggUFdH', 'port': 7762, 'similarity': 23}, {'similarity': 70, 'ip': '165.70.232.24', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOAkxv/0qc5YIsqWobOlEEmP7P8z9daSivw0fU+vAejyfY/O7rU3H/7GJnyebSqxafH1qhhIyhdx523w', 'oversion': 6, 'nfiles': 729, 'port': 7765}, {'similarity': 0, 'ip': '97.112.229.244', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAW7KQbAfJF8zUiLVF9ivvOIq9gxrbOrYsGmNItGKAfr4Ufkoy4OpT5Y7WyApCHZWC3YietKJXxQQeA3z', 'oversion': 6, 'nfiles': 103, 'port': 7768}, {'similarity': 63, 'ip': '99.146.26.10', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIibpPXZodVcbWH4OaeQHMnA8WVjIHcnAM0APTZ2AcXj/wdZ6oWbzMsQte2Ntc5bZhodAfxsnU2r2dEf', 'oversion': 6, 'nfiles': 744, 'port': 7764}, {'ip': '160.217.81.209', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMFJEbGhUVnZ9Im/aJPS6tfpPgXZsOuKduo4TOQ5AeAI4iYjxvxJwjQJU6PqNH0M1HQ6gr2pf9BaHK4j', 'port': 6881, 'similarity': 0}, {'ip': '18.238.66.49', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJOaf2HGMkKe70PYmsX/zqNrylwJXSy/dK5Y7dYGAOzgzJvY3+WiadZWyUJEKmlfMwu+nVF+I7rkl3Ii', 'port': 7772, 'similarity': 0}], 'collected torrents': ['\xa0\xc6R\xb7N\x08Z[fUc\x18\xfb\x9eU\xeb\xab\xf01\xce', 'qa%\xfbH\xd7\xca\xff\xc4\x8a\x89#\x08\x94\xfe\xa4R\x94\xbc\xa1', "\xf2l\xb8\x83\x1dg9'\xbf9\t\xeb\xa4S\xdf\xe4\x0by\x8a\xa5", '\xed\x19\xc1I\xff\xa2\x03s\xaf\xa0@;b\x01a\xf0\x14Y=.', '\xec]\xba]\x18\x0c\xea\xe0\x0c\xc8\x0e\x7f\xf2\xa4\xc9\x0bg\xd3y\xc7', '\xacQ\xa4O\x80\xd0lZ$\xd5V62+H\xf9!G\xaf\x86', '\xc2\xf4\x9a\x9at\xed\x16\x9d\xb7\xcaw\xf4\xa9L\xce\x97b4\x82%', '=D\x93\xe8\x99\xdb\x1a\x87,\\\xd5\xe5\x95\xd1\xf5\xabj\xaf\x03D', '\xc77\x8f\xb0\x04bM"\x00\xd5\x8fIG\x826\xf2\xdc/\x1fz', '\xa0P7\xb4\xa8\x88>\x99k\xb09\xaf\xea\x02Xn!\xfc\xb6\xfc', '\x88/\xcb\xcc\xd1\xc2}\x02\xeb\xeb%\x1d\xa9\xf8\xdf\x17d\xb0\x0e~', '\x14\xe3.\xc7tz\xdc\xce\xe8\x8f9\x03`O!9\x1e\x03E\x11', '\xac\xb8\xae\x91\xfb\xa2\x00\x04\xa0\x9e\xcc\x0c\xb4`\xfdVxQ\x90X', '\xac\xe1\xcfi`\x07bWY\x8c*\xd1\x04}\x9e\xaf\x91\x96\xd2\xc2', '3~S\x8e\x17\xb2\xef\xd4\x87v\xcb$\xab\x0c``n\xb5\x98\xb6', '\x7fA\xbe\xaeL\xff-2\xff\x1d\x95\x9c\x1bF\x82G\x03\xa1\xdd\xe1', '\xbb{>\xd5~\xf3\xcb\x92O`-Z7\xa4\xef\xa6\xbe\xcf\xfc\x08', "\x99\x00\tys\x91\xf9\xf2\xf0B\\\x8c\x7f\xb3\xf7\xf9'F4\xc0", '\r\r\x8c\xbeR\x14\xba\xcfORW\xb1\xd9\x11\xb8\xe7Ao\xc9\xa0', 'HQI\xe3!\xe2p\xea\xb2}\x07\x84\x17\x82\xe4\xf5Z\x94\x99h', 'T\xbb1\x99\xc0.\xaf_i\x90\xa0\x17Hx\xb7-i\x97\x07s', 'P\x02\x10\xdd\x84;\xb5\xc8A\xe8\x04\x18\xac\x8d!\x943\x97|\x97', '8T\xe6\x17L\xb4\xecB\x13O9"+\x13Zp\xa5\xbd\x99\xb5', '\xf0\xc7_\xf0\xee \x7f\xc3\xe6\xf2\xa5\xc8\xfb\xe61]t\x1fU\xae', ',0\x860\xdfE\x96?\xc6C\xee\nY\xdfC\x8b4\xbc\xa8G', 'p~\x07({\x0f^\xc8?\xf1lv\x86\xba;I(\x85\xec\xd7', ';0\x9c\xe2^\x0e\xff`\x19\x01\xa9\x00\xf9n\x0fv\x9e\x85\x80\xeb', '\xe7\xe7\x0c\xc3Uc\x1439\x8e\xa3\xea\x0b\xd7a\xc3\x8e\x13\xbb^', 'e\xdfSNxJ+\x90*\xe4q\x00\x87\xe9\xe2\xe3\xa2"\x9a\x7f', '\x14\x87\x9e\xc7L\x04\xf7\xd1%N]"h/\t\x1e@V{\x8d', '\xee\xcb\x06\xa8\xeb\xd3^\xf4\xeb8\xc8!~\xdc\xed\xe6\xc0I\x08\x7f', '\xab\x81\xc9\xea,tQ\xd4[\xc7P\x94\x85\xba\x1f">q\x025', '\x04\xbfl\xef\xcd5\xaf\x14\xddP\x9f\xa5\xf1\x03\x05\x8f\x13n\x1b\xe1', '\x05\xa0~5\xf6\x9c\xa6b\xd0\xc6\xa4\xeb\x83n\xe8\xfd\xb4\xef~2', '\xb5m\x81#^\x07\xc8\t\xf2k\xee\xdcx`\xc0]\x12\xaeJY', '\xd9D\x1bd!\xa1\xad7"k\xd3\x92#9\xd5\xb1\x99P\x8c\x0c', '\x99\xa6W\xc4\x9c\xc9\xbeV\xf1\xb2#XM\xf5}o\x15\x84\x05\xcf', '(L\xc5\xd5\xd7-\xee\x93nx\x06\xc6\xf4\x01\xc0J\x86\x7f\xd8\xd6', 'Kk\xf0\x93\xf4\xbb\x0c=\x005e7\xc2\x02{\x19\xc1\xb5\xdf\xeb', '\x97\xa8\\\x9d\x07\x07\xc7b\xb4\x16$\xe4\xc3\x87\x97n\x93\xcc\x14\xec', '\x80\xc9\xfdEy\xc7\x91Sw\x8d\xc1\xe3#C\x8dA\x0e\xf20\x01', '\xc3x\xa7 \x93\x13[\\\xc9e\xe4O\x03\xa7\x7fo\xe6R\xd4\xa9', 'lm\xb8|\x07M\x8b\xbd\xc6\x0e\xa3\xd9\x16\x95A\xea\x16\xd6\xcc!', 'K}\\D\xfb\xa1\x81\xd1C\x83\x1b\xf9\xa28W)\xeeo@U', '\xd7^C\x91D\x1a\x84\xf7\xeeq\\\xf9\x10\x8b23\x08gxe', '34\x81\x8b0\xcc;\xf3\xa9\x80\xa6o\x0c\xeb\x99\xe3w\x9c\x12S', '\r\x8db\xec\xd2h>\x9f\xa4\xf5\x80}\xda&\xb4\x84(\x02\x10\x85', "\xd9\xa5\xef#\xa8\x82\t#\xf1\xd2>}\xedvc'K\x9e\xcd\xa4", '@\xa4\x87\x16q*\x98h&R\xde>\xd2\x8d\xe9\xbb\xed\xbf\xa8m', '\xf9\xbc\xde\x0bt#\x84\xc4W\xc9\x95^\xd4\x11\x0b\x9flO2s'], 'nfiles': 108, 'name': 'nm-e0c51c19dc4b'} 
+1188691343.3 SEND_MSG 103.202.16.246 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAefabVLoM/fghtRi3VgTmXP7pFo/5NtunUVI/H+lADWKS5v3IEUyPeq7Erj+W7O2W/pd/3D7Dm/VuAZ9 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2070, 'random peers': [{'ip': '18.20.76.41', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '121.169.59.43', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '179.188.95.198', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '36.212.108.56', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7', 'port': 7771, 'similarity': 0}, {'ip': '124.97.93.130', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '249.89.31.190', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '193.3.131.80', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '44.254.98.242', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '215.169.135.112', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAW7KQbAfJF8zUiLVF9ivvOIq9gxrbOrYsGmNItGKAfr4Ufkoy4OpT5Y7WyApCHZWC3YietKJXxQQeA3z', 'port': 7768, 'similarity': 0}, {'ip': '1.96.131.36', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIibpPXZodVcbWH4OaeQHMnA8WVjIHcnAM0APTZ2AcXj/wdZ6oWbzMsQte2Ntc5bZhodAfxsnU2r2dEf', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691344.1 CONN_ADD 207.103.132.63 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASbsn+HFLsP/zArB48U7IwrYkEpl5RDyuaja0rXCAefyD+89vM2/0oYIdeWIIg22layjXn2qN76RVd6H 2 
+1188691344.1 CONN_DEL 51.33.91.101 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe4Ea9CyS+a23UFipt87HYsNA/QJNcAQmzB1wo4GAacmoALOKAlGgeTcs4eBgXJx1q9iHpt+QDKgHYPT overlayswarm:connection lost 
+1188691344.3 CONN_ADD 213.127.241.191 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASzD/p7C55GNHw0wgD8MzdbYx8j6cw+Ye9EnC0FUAR0j5qJ1gCy6/8whrSCozCppGXTX4D55f38KURVB 5 
+1188691344.6 CONN_DEL 109.74.88.144 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAT089E605LYzLXUF5aTCWrgcGMQQ4jQPbvmBm9euAFTDlj6F6aLzdvRSHLNKUw4JeZsbyY12yF3MXjJt overlayswarm:connection lost 
+1188691344.9 CONN_DEL 11.254.243.177 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASzD/p7C55GNHw0wgD8MzdbYx8j6cw+Ye9EnC0FUAR0j5qJ1gCy6/8whrSCozCppGXTX4D55f38KURVB overlayswarm:local close 
+1188691347.4 CONN_ADD 247.180.17.188 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc 6 
+1188691351.3 CONN_ADD 77.209.231.170 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU509SdIMeqHqYGqlIn6rRasJnjAj2krGixVEM11ABR0xV88ZJIGUXW4ALfpwMCBu63O27xhsvm6AkkH 2 
+1188691354.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc 6 2 {'q': 'SIMPLE brit ', 'id': '\xba9Z\xd8\xbe\xe4\xe4\x7f\x13\x8f\x0c.w\xb2\x86YK2\x83\xd3'} 
+1188691354.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc 0 'd1:ade2:id20:\xba9Z\xd8\xbe\xe4\xe4\x7f\x13\x8f\x0c.w\xb2\x86YK2\x83\xd3e' 
+1188691357.1 CONN_ADD 23.218.28.57 7763 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 
+1188691357.8 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 6 {'q': 'SIMPLE dl.tv ', 'id': '\x887 .\x83e\xcf\xf9\xdf\xc4(\xce\xfe\xc5E\xde\n\xfa\xfd\x19'} 
+1188691357.8 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 0 'd1:ade2:id20:\x887 .\x83e\xcf\xf9\xdf\xc4(\xce\xfe\xc5E\xde\n\xfa\xfd\x19e' 
+1188691359.8 CONN_DEL 169.36.124.23 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUobv4ZlYbbwSRrXgql5JrpPcodZamZqaqAOj268AcaBtgB6Pdbe2GWAek0XZo9VGVWcJ69RQpb16OGr overlayswarm:connection lost 
+1188691360.2 CONN_ADD 175.77.14.128 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq 6 
+1188691360.4 RECV_MSG 165.42.236.92 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 18, 'random peers': [{'ip': '106.231.250.97', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '27.87.237.25', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOCdNvgsa7J6sRgq//Rvh3w9WDKfIjVs5xuxfa4GAFO+2HUkDELPnUA+xCz3GHqNsONSINtVHbUJQjb9', 'port': 7764, 'similarity': 0}, {'ip': '101.229.211.182', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'Jon-Room'} 
+1188691360.6 SEND_MSG 12.175.137.96 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2077, 'random peers': [{'ip': '115.91.116.125', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7', 'port': 7771, 'similarity': 0}, {'ip': '0.23.142.150', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '252.61.50.232', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '227.254.12.81', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '58.45.16.250', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIibpPXZodVcbWH4OaeQHMnA8WVjIHcnAM0APTZ2AcXj/wdZ6oWbzMsQte2Ntc5bZhodAfxsnU2r2dEf', 'port': 7764, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'ip': '62.73.151.229', 'similarity': 0, 'port': 54545, 'nfiles': 949, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '13.109.217.119', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '150.252.227.124', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '52.179.252.201', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691360.6 CONN_DEL 158.253.227.172 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAMKQBSDLWW8z+CCu7ZeR6Zg+A4g3xZ7BGEd3Z+BATZvMAwSwYYB2ArUnj6awSkE8ULDLgYxZTm58HNn overlayswarm:connection lost 
+1188691363.4 CONN_ADD 167.204.142.4 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcTt6muAe498hg1yOrcaIvl3s5fMz6ucSWGFS9t0ADRxe1cAZsZ+FQRDefYPTMX3E43Wr2Tq8OZXsE82 2 
+1188691363.4 RECV_MSG 31.169.126.181 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcTt6muAe498hg1yOrcaIvl3s5fMz6ucSWGFS9t0ADRxe1cAZsZ+FQRDefYPTMX3E43Wr2Tq8OZXsE82 2 PASSIVE_BC {'name': 'j23', 'taste buddies': [{'ip': '58.192.41.169', 'age': 473, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf3KPGK31cKyUXKp6+TM4cD6lffGqQOUG12G6v8gAdbCximyhZcmKbf/fP1jxrZwxIfFnyGZ6Sosl8fw', 'port': 32034, 'preferences': ['U3IljVgaEFCgm8RWQ6V5i9OSIYw=', 'qfboyvkmMFh/5LW7hTNJLCuTFds=', 'DtuuT6Ll+j6dF4XccZZ6PAtyQiY=', '0OBpuBpNMJlo9SrNCO0yKsOx9Vs=', 'wahnsoyRDbmeUDPXUg9pfmyJp60=', 'IAYBzPSbgM0shS/WBEqY56wYnUI=', '/+6OWrnf9FJAeRBxkwcabnqd9k4=', 'J8tUXgj1qit2bvTiMwlix9rl1B8=', 'OhAYOT5bN0pjc5gMzWO9ST80pU4=', '/tCregeW4r7UYpHIC721nhWtkuk=']}, {'ip': '218.95.61.146', 'age': 236, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfBO5z5U7ECMie7g3AaYx+W6HiEBaATYS8Gpe0XfASNs8jjNKqkCv/AnJM3VWMPf6iLEcj6yr9pfuaTo', 'port': 7762, 'preferences': ['WTkRWTYx4HITyuF1BUqxdArxgws=', 'MjYu2F6bUcdboJhzmwJ4AKs9eKk=', '1zTMNW5gl9t+Oru1VhPcOhgVdmk=', '+f+QkpjXr16kyWFNjsP1dZl4YiY=', 'lOOLzM3vMyRW/Pl+p7rDr/oND14=', 'X2EF1N1B7S0YHbkryx0l1/8D8qk=', 'hh7+KtrY9yPNsDop2SWaNHS5mM0=', 'bFpmywsCtQJ3pvw5LQHRHftt8qw=', '147Z2VTsFONOSPvdlNTIAjlVXIA=', 'rzg9rFxFKYqo67fqwSCEsRmZxSc=']}, {'ip': '92.84.186.48', 'age': 924, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAef44jcCXsCpThG31NoNEa8FtbDd6PieIR2iU4CUAby8Abgy6FFDXXVI3rEM/pzG6GuWcALoEpnUpj/w', 'port': 6881, 'preferences': ['bbSh9Ge2N1ZL8iHnarPAGhlM2QY=', '6VZ9zvpyBOs3weeIAQGa01pO/uA=', 'FFWHmo+wbFG2EeAd9oWg7YujCbI=', 'gKlw0hZBq9mlBB0+ecHtrVUAQDI=', 'ZjglgDOZX1xT1ehXOxlcQ8CseY4=', '7AkJI55XPXSPFr3IdGqNC78G5K0=', 'jLVNNvbcLO4utoTCa1S1+CfxPt0=', 'XvszSzidf2+qAcMu3rQo+YTOoFU=', 'owAVTwfsJ3XMQEoU2SEvhRVMxeM=', 'TPMtXidUAThkF0eucPFQXhpdCJ0=']}, {'ip': '197.176.218.97', 'age': 131, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdY/PvsP+EtbJykQzr43mEPVInNytYNqJ4G5bXZWAQljeGd0DiIN6Cj7zDuM/BkYxtghOfh+9Nrw+Ndw', 'port': 6881, 'preferences': ['Hgi4KHzHHEKrZKHxRMpYGo4AMD4=', '9VHcUKtWKwAVPD7OIVUqt/y4ip4=', '6zDPYSJEo728TLQV/yCfdI3ZGm4=', 'BDRSmVZWF4Xv4wLePT11zFiNG+4=', 'KnsefoMCLa0iEmPriT6ILdWjl7U=', 'hI8O0ceEeuKz1BF+gzMzwcyBUzY=', 'XGIJwNu7BSpOu7hoKAfOWZXBSSA=', '+Rn+L5tDYhpCK4ioR90bXBbX3Fc=', 'pNBNIw74H8t8f4S8WFFUAfPQQTA=', 'W0NNC3s9J8JfNuYY8ivCjdPioqU=']}, {'ip': '232.236.195.235', 'age': 598, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdNySqeo6dUaAi0UAdJ4CGT1C0sLuTFrJ6h0ygHEAYaNL32H8vgyyWoB6QhpkItn3/GTunL6DwUsILBr', 'port': 6881, 'preferences': ['TZxJw/cGWf5Kf9URHWamTrH+Z28=', 'l6hcnQcHx2K0FiTkw4eXbpPMFOw=', '5mnLcSpwGf/LSN214YLt4K3myxM=', '+e5fmJqFlQBGVBeW2OXPy9tSfC8=', '+6sWPGNcaYe//1V6WXZet6qhDhQ=']}], 'random peers': [{'ip': '124.244.198.122', 'age': 819, 'port': 7774, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAIvpoqeLPjcHE14vb52IlsCsL/D/F2ONqvxiMhQAD5wcfYy6iuf1jHPiOY+yhFPn5LSbaX5DFoMlw4B'}, {'ip': '119.90.10.67', 'age': 236, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABcCCC2rpUwAOwufzwGuwf/p8GI1wmLcE06ksTGiAe33wEuokrXdfvwliewRKs6sveYGre+8onqJbkjh'}, {'ip': 'superpeer4.das2.ewi.tudelft.nl', 'age': 1758, 'port': 7004, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA'}, {'ip': '62.156.225.60', 'age': 131, 'port': 7774, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/O5b+hTW3Nrf2o1JAIaKomB7Z94R802h3MG2m9Ae7QXzTmoey3oodDZXjwaK39hu1wJcyH3sLWc5EK'}, {'ip': '233.196.183.17', 'age': 632, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMRF1P2P8eOcKUfjfem5JnWsEkLhaUOrrRRFCRa8ADXPjdUtWjeFQVrYJKCaqBKKzLkWJjnGuaQHlCk+'}, {'ip': '191.155.231.161', 'age': 367, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5rIATU3B+XkWATKLDx89JOEXPx8xooq0e+NZxcAUMrVP2iXbByH+pupIDb5UukzSdUSfodLByVAtJg'}, {'ip': '230.77.61.87', 'age': 632, 'port': 7774, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZVdqh+mneXwgChoId6J9U2/p03TimVO8UjPVRZUAA2GN75uqhJr67UACIjbXnTQ7utDx6ThLCogJfIo'}, {'ip': '221.181.29.150', 'age': 236, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAENbulKRByBLRV7lZBhGTt4dAWNU3dEf4/pUpaeSAZFc2nJhUL8Z1avCHFs7DoEAUo6N8y+4bPcj3Bdb'}, {'ip': '131.198.85.238', 'age': 318, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUdohh6JeqhkYV3PN/2nT2POo8GM1UqqGETP1KAKAAeA2Np38ICdlxwjhCmfZ4u9R8j9i31sdNm6dmn7'}, {'ip': '179.120.36.162', 'age': 159, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAOpAz6zAcfwxexw9v12R9yfvzPrCYS1poX1qAK7ATC9+uUdd+8IPldRBhErlDxibSNqfNGHYAJdAzLS'}], 'preferences': ['xFsTol9zd1bFp1mxmDpESpIqcVE=']} 
+1188691363.5 SEND_MSG 191.124.6.10 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcTt6muAe498hg1yOrcaIvl3s5fMz6ucSWGFS9t0ADRxe1cAZsZ+FQRDefYPTMX3E43Wr2Tq8OZXsE82 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '115.195.208.126', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '136.243.177.179', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '245.77.138.153', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '252.163.13.78', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '73.51.99.133', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '207.123.209.184', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '135.222.34.93', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '219.43.64.76', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '225.148.244.156', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691364.7 CONN_ADD 14.155.41.48 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWxQG7SUCfv1mqXI6aENBNB9MIA0+2546q6TRnUlAL237nGhMdKSSqfHtmcI4XQbg6LNsCWy+DVO3HSe 2 
+1188691365.4 RECV_MSG 34.231.136.94 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWxQG7SUCfv1mqXI6aENBNB9MIA0+2546q6TRnUlAL237nGhMdKSSqfHtmcI4XQbg6LNsCWy+DVO3HSe 2 PASSIVE_BC {'name': 'orioger85', 'taste buddies': [], 'random peers': [{'ip': 'superpeer8.das2.ewi.tudelft.nl', 'age': 118, 'port': 7008, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU'}], 'preferences': []} 
+1188691365.5 SEND_MSG 167.32.48.85 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWxQG7SUCfv1mqXI6aENBNB9MIA0+2546q6TRnUlAL237nGhMdKSSqfHtmcI4XQbg6LNsCWy+DVO3HSe 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '243.200.185.48', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '99.244.120.132', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '177.214.208.236', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '103.80.4.202', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv'}, {'ip': '131.211.150.145', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '146.17.118.245', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '121.91.67.221', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '127.252.71.126', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '94.156.104.222', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691366.9 CONN_ADD 198.79.166.130 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASodEn3niQvuVmg309Y/bk5aWNl/Sn6wqybSK2AwARRfPcZQGXl/KRhVo2oiOMG5r53yCSh0JcuyBztS 2 
+1188691367.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 7 {'q': 'SIMPLE dl.tv ', 'id': 'z[\x07\x0c\x0f\xb7\xc6<\xb8\x1b\x95\xf5\xd5\xba\xe9\x1d\x8f\x9a\x1eN'} 
+1188691367.4 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 0 'd1:ade2:id20:z[\x07\x0c\x0f\xb7\xc6<\xb8\x1b\x95\xf5\xd5\xba\xe9\x1d\x8f\x9a\x1eNe' 
+1188691367.4 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 2 {'q': 'SIMPLE cool ', 'id': '\x0em\x1e\x9fH\x90\xb0\xca\x90\x86\xa3\xc4\xca\x06>>\xd1\xf9}\x89'} 
+1188691367.4 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 0 'd1:ade2:id20:\x0em\x1e\x9fH\x90\xb0\xca\x90\x86\xa3\xc4\xca\x06>>\xd1\xf9}\x89e' 
+1188691368.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 8 {'q': 'SIMPLE dl.tv ', 'id': '~\x99@w\x01\x18J\x83\xd6\xb3YH\xcb+\xe0\rk<\x95\xb2'} 
+1188691368.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 0 'd1:ade2:id20:~\x99@w\x01\x18J\x83\xd6\xb3YH\xcb+\xe0\rk<\x95\xb2e' 
+1188691368.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 9 {'q': 'SIMPLE dl.tv ', 'id': '\xe5\xcf\xb4\xd3tP)\x83 ;\x8f\x12\xdb1@2\x0c\x13uJ'} 
+1188691368.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 0 'd1:ade2:id20:\xe5\xcf\xb4\xd3tP)\x83 ;\x8f\x12\xdb1@2\x0c\x13uJe' 
+1188691369.4 CONN_DEL 9.74.97.233 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeg/a0WEB2kJbeGjeRfA2fquxFNFYsgOI0Xb+nmpAVIWltgFuSPnF2Q1XjHwBBAFEPvBHzuO6mvTEpZs overlayswarm:connection lost 
+1188691370.3 CONN_ADD 85.147.217.80 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 
+1188691371.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 10 {'q': 'SIMPLE dl.tv ', 'id': '\xb7\x0b\x0fb5FW\x1e\xfa\x7f\xb3)`b\x80&\xd2"\x0f?'} 
+1188691371.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 0 'd1:ade2:id20:\xb7\x0b\x0fb5FW\x1e\xfa\x7f\xb3)`b\x80&\xd2"\x0f?e' 
+1188691371.9 RECV_MSG 217.224.183.52 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 18, 'random peers': [{'ip': '143.202.99.196', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '59.41.218.91', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk', 'port': 7002, 'similarity': 0}, {'ip': '135.42.15.52', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU', 'port': 7008, 'similarity': 0}, {'ip': '113.238.111.67', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'ip': '180.249.229.14', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '172.133.103.253', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/O5b+hTW3Nrf2o1JAIaKomB7Z94R802h3MG2m9Ae7QXzTmoey3oodDZXjwaK39hu1wJcyH3sLWc5EK', 'port': 7774, 'similarity': 0}, {'ip': '52.247.111.19', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'port': 7766, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'PG-DELL'} 
+1188691372.4 SEND_MSG 225.12.224.80 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2086, 'random peers': [{'ip': '36.36.142.177', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '110.39.44.122', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '183.86.58.210', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'ip': '27.223.125.66', 'similarity': 0, 'port': 54545, 'nfiles': 949, 'oversion': 6}, {'ip': '229.25.152.109', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7', 'port': 7771, 'similarity': 0}, {'ip': '165.21.61.240', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '177.232.108.13', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M', 'port': 50000, 'similarity': 0}, {'ip': '223.238.90.122', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '46.4.80.90', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOAkxv/0qc5YIsqWobOlEEmP7P8z9daSivw0fU+vAejyfY/O7rU3H/7GJnyebSqxafH1qhhIyhdx523w', 'ip': '49.224.48.231', 'similarity': 0, 'port': 7765, 'nfiles': 295, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691372.4 CONN_DEL 47.134.255.89 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc overlayswarm:connection lost 
+1188691372.6 CONN_DEL 79.163.110.228 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc0PZJeUaj8RStpsLTyJ/0szs28v1FnzJS62A9+TATpPTpnolUtgC4nimVcoDylFcAex6FljTsNeB0fc overlayswarm:connection lost 
+1188691375.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 1 {'q': 'SIMPLE seeed vs zdf ', 'id': '\x12\xdd\xe8\n\x8e\x86\xb3Z\xd9\x8a\x063\x9d\xcfHW\x15\x95Q)'} 
+1188691375.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 0 'd1:ade2:id20:\x12\xdd\xe8\n\x8e\x86\xb3Z\xd9\x8a\x063\x9d\xcfHW\x15\x95Q)e' 
+1188691379.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 2 {'q': 'SIMPLE seeed vs zdf ', 'id': '\xf0\x1f\xa9\xe4_\xd5$!7\xf4p\t\x07\xb5y\xfbc5\xb3O'} 
+1188691379.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 0 'd1:ade2:id20:\xf0\x1f\xa9\xe4_\xd5$!7\xf4p\t\x07\xb5y\xfbc5\xb3Oe' 
+1188691380.3 CONN_ADD 202.233.201.108 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAA3P4PF2JX4KF3qOaxh1DyHtAkrDT2f2XX0ZmF78ABhrShgGJCuipeHzq1XJ3WthdMnG26YVbFEK6N99 2 
+1188691381.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 6 3 {'q': 'SIMPLE seeed vs zdf ', 'id': 'm\xc2\xd7h\xcb=\xf7\x8f\xd1C\x8a\x87i\x16\xa9[\xba\xfd\xe2D'} 
+1188691381.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 0 'd1:ade2:id20:m\xc2\xd7h\xcb=\xf7\x8f\xd1C\x8a\x87i\x16\xa9[\xba\xfd\xe2De' 
+1188691382.4 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 6 2 {'q': 'SIMPLE shemale ', 'id': '\xd7\x9e\x80\x1cl\xac>\xf8\n\xafW\x8fU\xa1\xbd\xa7i\x90\xa6\xf6'} 
+1188691382.4 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 0 'd1:ade2:id20:\xd7\x9e\x80\x1cl\xac>\xf8\n\xafW\x8fU\xa1\xbd\xa7i\x90\xa6\xf6e' 
+1188691382.4 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 6 3 {'q': 'SIMPLE shemale ', 'id': 'F<X\xcc\x9f\x1dA\xb6O=\x1cl\x96\xa85\xc6$\xc5t\n'} 
+1188691382.4 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 0 'd1:ade2:id20:F<X\xcc\x9f\x1dA\xb6O=\x1cl\x96\xa85\xc6$\xc5t\ne' 
+1188691385.5 CONN_ADD 126.82.87.186 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAROXlOSXFHSCd4tozK5g3g44rsdGBnfTSc0fWplTAf7G2mMcNLDZLeFijgZNoxdlYxcnLEccgHYWy6JD 6 
+1188691385.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAROXlOSXFHSCd4tozK5g3g44rsdGBnfTSc0fWplTAf7G2mMcNLDZLeFijgZNoxdlYxcnLEccgHYWy6JD 6 1 {'q': 'SIMPLE death note 32 ', 'id': '~>,\xfb\xb1z\x0e\xb6\xce\x8d\xfc\xaf\x96\x94\x92S\x96\x9c\x94\x16'} 
+1188691385.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAROXlOSXFHSCd4tozK5g3g44rsdGBnfTSc0fWplTAf7G2mMcNLDZLeFijgZNoxdlYxcnLEccgHYWy6JD 0 'd1:ade2:id20:~>,\xfb\xb1z\x0e\xb6\xce\x8d\xfc\xaf\x96\x94\x92S\x96\x9c\x94\x16e' 
+1188691386.7 CONN_ADD 40.216.240.82 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAajtKN3+NL36YxykteuvSflnWv2g6eduwktkNncCARllVpoa9FshKL7kQf5mbSpzMoQrhHXu26GYB4fH 2 
+1188691389.6 CONN_DEL 5.125.223.184 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALtinCNB77hftcNL1nLgh8FoZU0yQNu8hmdNZ4WXAYFj/iep5b485SL6E8hXKWRWqZtl+PeifI4akELg overlayswarm:connection lost 
+1188691391.5 BUCA_STA 9505  2088 2088 100  1581 12096  62 62  0 9 10 
+1188691391.9 CONN_DEL 109.36.83.255 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKsL5SrNPjsautmji9QG1W/DixKpHGCBTz0MQJL3AetGXA3XCjkgp7rhCb5h7mmXp8tBl1N7w5SbCxef overlayswarm:connection lost 
+1188691391.9 CONN_DEL 3.58.19.142 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOaDjVYzkeGA77xBqQUd3G1zomCgEfUf0E79l9oJAeP/zEpBUxmB0yUW0yldlJ8PCkL9OBXilEQgAqhF overlayswarm:connection lost 
+1188691391.9 CONN_DEL 65.61.22.43 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALde/wc2KxBgoWUO+OmStEye1+z77O9mLedVe1bCAY4yOq0dMyNanX7wbwerQMiXZY4qmC/eBIrchFq2 overlayswarm:connection lost 
+1188691391.9 CONN_DEL 62.93.77.90 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv overlayswarm:connection lost 
+1188691391.9 CONN_DEL 106.198.147.72 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv keepalive:'Not connected to permid' 
+1188691396.0 CONN_ADD 3.89.123.102 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGTdaWOre+S7zONZ9C5FfgS4WkI9nk3vAFwUhM2fALs3dG06i/kjcVYKgvuJzOsOetZL0OlChP8vFM7k 2 
+1188691396.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq 6 0 {'q': 'SIMPLE vanilla fudge ', 'id': '\x8b\xcf$V\xa7HI\x98\xf5N\xca\x94x\x12\x1cV\xd7\x1ak\xfd'} 
+1188691396.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq 0 'd1:ade2:id20:\x8b\xcf$V\xa7HI\x98\xf5N\xca\x94x\x12\x1cV\xd7\x1ak\xfde' 
+1188691396.8 RECV_MSG 40.8.7.67 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGTdaWOre+S7zONZ9C5FfgS4WkI9nk3vAFwUhM2fALs3dG06i/kjcVYKgvuJzOsOetZL0OlChP8vFM7k 2 PASSIVE_BC {'name': 'spinner', 'taste buddies': [{'ip': '37.195.182.227', 'age': 64, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb', 'port': 7763, 'preferences': ['22n3l6neCq+av5gXp+5M6WNEpRQ=', 'yNFhqLD4FMHKJvkwSL1lXQPnbPc=', '0EkPdtoQhl6aStPkAyAMJuYY6Ec=', 'q8wIOzRWFSX6HX3tHb62Vvch80g=', '9roTJGsG6gvSGS/wC46itRVIaPI=', 'WtoqGLWMsSfYlv1sU3+HFKpooIU=', 'owPrYnhGLYk+wYdsgZ/F9icERCY=', '4Vx7WrI+Qd6P135c6xSVx6mJs4M=', 'nLiFr4J2epp1LPIFC1+UQ+GQwi0=']}, {'ip': '72.163.253.83', 'age': 1, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'port': 7767, 'preferences': ['+KxsV6fJJYZcEha4D4UyS2a+Al0=', 'oLV8rJe+vv9ss10/6XeGp8tYB9U=', '/9BLYJcvDtf0xl+mWYHrTpKQgis=', 'pe6+bzyun1GZxoylnlAlYwY1gf0=', 'LOW7YQep53UjbohSveDTnUZVd1w=', 'VYDDioTGpUD6DMQyxMMTf8DVTO8=', 'mQAJeXOR+fLwQlyMf7P3+SdGNMA=']}], 'random peers': [{'ip': 'superpeer2.cs.vu.nl', 'age': 64, 'port': 7002, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk'}], 'preferences': []} 
+1188691397.0 SEND_MSG 39.229.174.22 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGTdaWOre+S7zONZ9C5FfgS4WkI9nk3vAFwUhM2fALs3dG06i/kjcVYKgvuJzOsOetZL0OlChP8vFM7k 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '186.200.226.33', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '40.121.89.239', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '7.250.28.250', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '152.45.226.20', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '96.199.70.255', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '147.9.183.51', 'age': 0, 'port': 50000, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M'}, {'ip': '2.69.22.93', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '152.180.183.224', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691397.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 6 11 {'q': 'SIMPLE office ', 'id': '\xa4*\xbc\xde\x12\x1f\xeb\x84\xefH\xb2r/\x92\xfe\xc3hD:\x85'} 
+1188691397.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc 0 'd1:ade2:id20:\xa4*\xbc\xde\x12\x1f\xeb\x84\xefH\xb2r/\x92\xfe\xc3hD:\x85e' 
+1188691397.5 CONN_DEL 233.51.183.134 50000 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL2/aDQgkFtvSaBrI2FILqwP8CGACVlLNNkMsbj9AVpELrtcLXilyiRRizCVo2u5LmBLsCTna/2snp9M overlayswarm:connection lost 
+1188691402.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 4 {'q': 'SIMPLE fable ', 'id': '\xf6\x03\xd8\x85{\x1b\x92Q\xc0\xa1o\xbf\x9b8%\xae\xd0\xf0P\xf0'} 
+1188691402.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:\xf6\x03\xd8\x85{\x1b\x92Q\xc0\xa1o\xbf\x9b8%\xae\xd0\xf0P\xf0e' 
+1188691403.0 CONN_ADD 67.232.41.103 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIVlP6RU+MGlzhmZA4/XvRQR6P+UCaRij18zmbEAQq3yQ5Maw4auxmSKhIMdJHCpMlxErBM/vGbNS0Z 2 
+1188691403.1 RECV_MSG 29.70.231.152 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIVlP6RU+MGlzhmZA4/XvRQR6P+UCaRij18zmbEAQq3yQ5Maw4auxmSKhIMdJHCpMlxErBM/vGbNS0Z 2 PASSIVE_BC {'name': 't-1000', 'taste buddies': [], 'random peers': [{'ip': '191.46.70.161', 'age': 90, 'port': 26017, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC8TOXRp2+Y6JQDcws8YMQswwwolUYK/A6T4r6NoAMUFLV6ud7wGlM+BgpBfzZq/hq0PcuOPuT2ZJEAw'}, {'ip': 'superpeer6.das2.ewi.tudelft.nl', 'age': 240, 'port': 7006, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd'}, {'ip': '183.132.115.167', 'age': 59, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYeaL9ZgSj6HeLn0t7gDIdgF5GuMomfjjaZ4N7DHAIKSO1ZWbLuNPfK0MXIGod4LZ2Q3OWeC2K4gW/se'}, {'ip': 'superpeer5.das2.ewi.tudelft.nl', 'age': 180, 'port': 7005, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu'}], 'preferences': []} 
+1188691403.3 SEND_MSG 182.251.211.24 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIVlP6RU+MGlzhmZA4/XvRQR6P+UCaRij18zmbEAQq3yQ5Maw4auxmSKhIMdJHCpMlxErBM/vGbNS0Z 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '214.34.9.116', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '116.240.215.13', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '129.251.76.72', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '93.133.164.108', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '27.154.163.55', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '106.190.15.28', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '4.175.129.141', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691404.4 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 6 2 {'q': 'SIMPLE ufc 74 ', 'id': 'm\x0e\x06?\x81+\n\xaaJ\x87\xf8\xb7\xf2\xaal1\xca\x91\x94\xc0'} 
+1188691404.4 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa 0 'd1:ade2:id20:m\x0e\x06?\x81+\n\xaaJ\x87\xf8\xb7\xf2\xaal1\xca\x91\x94\xc0e' 
+1188691405.9 CONN_ADD 109.5.171.35 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSyl1+6YL8owBx6skLSnkH3IAUa2OrgXyoXo1ZgADW/JjLfyEgxQw9r6QgLxFIsSfQ9qu+I9E9K+Ti2 2 
+1188691407.0 CONN_ADD 237.34.223.138 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 6 
+1188691407.2 RECV_MSG 213.79.243.5 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 261, 'random peers': [{'similarity': 0, 'ip': '154.224.123.28', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdKIkz837tiibf+MXLiRzwkmJdPnMoM9upyFQbRdAA0XFRQjVIdIibVn1irCeBs+zZOy4fjUIWlhIHMF', 'oversion': 6, 'nfiles': 23416, 'port': 7025}, {'ip': '253.55.74.69', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfTOXIIKGSuG7I7hUciVO0bjcdT4pehXjOGD2LjvAY/C8lJnnKZOKoPTNHuBALJWcumcTDa6ris/HsoL', 'port': 6881, 'similarity': 0}, {'ip': '41.153.251.235', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYlxPwkN8rv8MYsHEtd5DN/qU0Dq0rylrwSMUtozAGNES3lDL6zgMra+GkgQRHSiIQjhXeVduhMS/4P/', 'port': 6881, 'similarity': 0}, {'ip': '136.176.233.83', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcW1L5lgJ5Cqv0977TNy7cgYOCSO0B6Q07gFXHVsAYa6/SlVPRWgUrgGjzW7oQjgA5EYvBJwKBvsaQsL', 'port': 6881, 'similarity': 0}, {'ip': '72.251.96.57', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHRZFQ+s/OwWeUBuc/r0MMmYm13sqvnSLSgoMJhIAYjVAX4BKVtxnBnxQZ3iLphj9HvkFthgmDcRNbew', 'port': 7774, 'similarity': 0}, {'ip': '242.231.236.44', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADEJAxP6+C2k83YF4gdXgi3AuRjtBHZCJWRhFdViAapaFEl3o3+Soc2Akyc/8aSMbX9gaXUQbBggUFdH', 'port': 7762, 'similarity': 0}, {'ip': '242.119.135.25', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg', 'port': 7768, 'similarity': 0}, {'ip': '88.117.181.81', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'ip': '140.195.202.52', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC9+dgTwgTYeOiQnAsjtO5drURgnOU+zS86O6ZhQAY8LdWFYK6XUCIg9fU/hMsQ0IHAEhp/2yZgSUzKc', 'port': 5737, 'similarity': 0}], 'collected torrents': ['\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', "\x16Kp \x8a)\xe0\xa1\xa1p\x13\xab\xac\xa8d\x83\x01'<\xd0", '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\x1bq\xba\xd2b\x8c\xab\xc6\xf3dlu\x13{\x95GzJ>\x7f', '3~S\x8e\x17\xb2\xef\xd4\x87v\xcb$\xab\x0c``n\xb5\x98\xb6', '\x90\x1c\x9cDa\x17)=\x9bg\x85\xcb\xb9\xf3\xb8\x9f\xb5\xa3\x05h', '\xd4\x07\x06\xb4.\x85/T\xc1\xe0\x19W\xfeq\xb3\x08\xca\x85\xe6\x89', '\xd8y\xbf1\xbc|R\x16\xb1\xdb^#\xad\xf2\xf0\xa0*cI\xd9', '\xdb\xc8$\x8b\xa7\xc0O\xf1\x88\x80\xa1\x8e\x02\xd5\x7f8Da\x94?', '\\\xc4#Y*\x08\xb1\xbe"\\\x15\x93\x0b\xee\xeeS|\xa4B\x98', '\x9c\xb8\x85\xaf\x82vz\x9au,\xf2\x05\x0b_\x94C\xe1\x90\xc2-', "o'\xfb\x88\x1f\x93]ILjK1Nt\xd0\xc6G\x99\x80`", 'dM\xdd\xce\xcd\x80\x06\xc6*6\xce\x03\xd4\xf4\x8d\xe1\x10__E', "v\x0b\x8bw\xbfM\x1f\xa9y\x92\x12Y\xa3\xc9l\x19\x7f\xce'\xf9", "=2]Y\xf9x\x8eVZ\xf1$m'6'\x91S\x00\xe7\x19", '\xe7(\x9e\xa9\xca\xa4\n\xa4\xe0\xc1c\x03\x873r$S*3\x94', '\x03\x08\x1f\xd1\xe9\x19\x1c\x05\xbe\x1f\xe6u\xacr\x84>\x7fA\xde\n', 'i6\r\x91<\xd4w."\x13\xef/u\xe9\x04J\xf8/\xfd\x10', '\xa8\xcf\xe1J8\xe0\x06|\xb62\xf0|\xf75\x96C\x9c\x86\xb3\x05', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2'], 'nfiles': 20, 'name': 'NB-Pina'} 
+1188691407.2 SEND_MSG 135.150.223.63 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2096, 'random peers': [{'ip': '13.73.181.136', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '104.157.193.47', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '153.219.186.167', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '77.137.32.158', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7', 'port': 7771, 'similarity': 0}, {'ip': '67.250.138.201', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '156.107.48.77', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '134.30.100.231', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb', 'ip': '9.236.243.136', 'similarity': 0, 'port': 7763, 'nfiles': 4965, 'oversion': 6}, {'ip': '180.141.255.135', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIibpPXZodVcbWH4OaeQHMnA8WVjIHcnAM0APTZ2AcXj/wdZ6oWbzMsQte2Ntc5bZhodAfxsnU2r2dEf', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691408.7 CONN_DEL 233.32.185.161 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcf2KHVGDDT45nfTWYDzhI6ZlwsqFGIpqD/msz36AG2Stfal8gdq1TyEeT01Vqmmaw0h6rdnkAannIaQ overlayswarm:connection lost 
+1188691408.9 CONN_ADD 252.124.201.51 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc 6 
+1188691412.9 CONN_ADD 29.154.217.25 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN 6 
+1188691412.9 RECV_MSG 133.249.8.246 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 319, 'random peers': [{'ip': '150.24.169.134', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaUKsFKOjMnAPIvcy5IvKz5CPAWafoVQj5y+5wtNAf/ogHl+YltJm9NDnH750ZmeQcdSR2ZUh+RdUKEb', 'port': 7762, 'similarity': 0}, {'ip': '249.13.148.109', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAD/et74U/crKXoHkjG1aBLdhIQYqVe0aDbwciuUxAIZDoVFBB+RCzewPYwsMDtC0QhgJXDLCvGk/emdz', 'port': 7766, 'similarity': 0}, {'ip': '164.54.48.135', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN30ORVwWeOE7XqQPM5ZLZSxISnLx4WGNwVHkVQsAdEhVxSZQT/pRwQI8cBfTDNAtHFD3w3kn/nI+1pF', 'port': 10050, 'similarity': 0}, {'ip': '101.101.78.227', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACV/7d+zSCQHdqmeTDJwaXGjXrlC3Ffen5R6D1kRAWHh+lpcr5kiG/1pkvHSTycXc+EYwZef27ogYHXU', 'port': 7766, 'similarity': 0}, {'ip': '84.6.175.92', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbduv2Rl5EHyWP2/LgAKn9kx2rLl7y87jy831qIDAAH47ysyElQYddQ51J3U7sbHz+9gRINYnk6MTth9', 'port': 6881, 'similarity': 0}, {'ip': '164.192.149.175', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfBO5z5U7ECMie7g3AaYx+W6HiEBaATYS8Gpe0XfASNs8jjNKqkCv/AnJM3VWMPf6iLEcj6yr9pfuaTo', 'port': 7762, 'similarity': 0}, {'similarity': 0, 'ip': '43.75.102.199', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'oversion': 6, 'nfiles': 111, 'port': 7773}, {'ip': '176.83.251.227', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWY3lrq7SwMxk2x1lITaFV6w9+MaDMCAfc8dhlSWAf4TJH9zfPYXNVuNq1O1udqBOs682Ap4PzkBs+eC', 'port': 7773, 'similarity': 0}, {'similarity': 0, 'ip': '167.235.150.199', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'oversion': 6, 'nfiles': 2253, 'port': 54545}], 'collected torrents': ['\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2', '\xd8Y\xad\xaek\xa4a*\x01\xcdT\x06\x1f\xa3\x8b\x07\xabsmX', '\xdb\xb4\xbb\xf5\xbet\xef\xb6@P\x19\x9d\x94\xb8\xbe\xdc\x17*\xe1\xf8', '\x9c\xb8\x85\xaf\x82vz\x9au,\xf2\x05\x0b_\x94C\xe1\x90\xc2-', '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\xc0\xf5A\xde6Db<\x86\r\xd6R\xcfDxc\x15\xd51\x16', "v\x0b\x8bw\xbfM\x1f\xa9y\x92\x12Y\xa3\xc9l\x19\x7f\xce'\xf9", '^K\x04CHM\xebt\xac\x04\x8cH\\9\x1f\xe7\xdcc\xf4c', '\x16\xa63XI\xc1\x81\x07\xaf\x8cH-\xab&\xed\xd3*\xdb\x92\xa2', '\xa7\\\xc8-\xaa\xba\xe8\xfa\xa2\xf0t*qd\xbe\xe3\xcb\xe3L\x1b', '\x12}l\xa6\xac\x7f\xeb\xbf\xf9\xeb\xf1^\xe5\x92x\xe3n*\xf6\xfa', '\xc6\x1b\xa0\xc7\xd7\x1f~\x13\x91\x92pku\xdcn\xa5\xe4~<3', 'R\xf1\xd8\x99\xe7\xe9|\x102\xbd\xc3T\xcc\x00\xdcD\x99Z\x00\t', '\x8f&\xaf\x17`8\xf3]\x1fh\xdd\xe1\r2+\x8eMD\xa7\xac', '\x90\x1c\x9cDa\x17)=\x9bg\x85\xcb\xb9\xf3\xb8\x9f\xb5\xa3\x05h', "\x99\x00\tys\x91\xf9\xf2\xf0B\\\x8c\x7f\xb3\xf7\xf9'F4\xc0", 'N:(\xfb\xaa{]\x9f\xa3\xe1[\x8fr\xac\xab\xd1\rN\x16\x83', '\xac\xe1\xcfi`\x07bWY\x8c*\xd1\x04}\x9e\xaf\x91\x96\xd2\xc2', "=2]Y\xf9x\x8eVZ\xf1$m'6'\x91S\x00\xe7\x19", "\xf2l\xb8\x83\x1dg9'\xbf9\t\xeb\xa4S\xdf\xe4\x0by\x8a\xa5", 'S\xf3\x16&m\r\xf4\xabr\xf4t\x18\xaa%42\x8f\x87\xcd&', '\x11G\x7f\x8e\x1d\xf9pV\xefb\x9b\x02\x98\x04k\xbfw\xd2\x83\x05', '\xb0\xd5k\x955\xf2\x1d\xf4\xfcn\xdf\xaat\x00^]\xd4\x9e\xd6\xef', "o'\xfb\x88\x1f\x93]ILjK1Nt\xd0\xc6G\x99\x80`", 'u\xe5\x04\x87\xe5\n7\x10\x8a\xd5\xdd\xcc\xe6Pb6\xc50\xa2W', '\x03;\xb4\xc6\xbay\x11&\xef\x0f\x88\xb6\xf6\xd4\xd8V\x10k\x84\x10', '\xcb\xbe\x9dt\x1cS\xd9?\xe4\x00\xb0\xecL\x81nO\x8d.\xb4\xca', '\x04H\xe2\x8c\x16b\x1d\xee7R\x01\xa1\x16\x8fJ#i\x1b%\r', ';0\x9c\xe2^\x0e\xff`\x19\x01\xa9\x00\xf9n\x0fv\x9e\x85\x80\xeb'], 'nfiles': 30, 'name': 'PHARTT'} 
+1188691412.9 SEND_MSG 35.77.242.150 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2102, 'random peers': [{'ip': '224.246.97.215', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '9.114.149.154', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '73.19.122.18', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '221.52.231.14', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '24.109.115.227', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7', 'port': 7771, 'similarity': 0}, {'ip': '160.123.225.120', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '22.0.15.162', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '242.222.171.133', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'port': 7773, 'similarity': 0}, {'ip': '106.154.161.166', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691415.6 CONN_ADD 10.52.227.35 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASae3XmgW6+KfHYh3qNfB920R/yA1DVUqvjXuS5OAHsPBqDblIrIdBpmTlSJ6b5jlxtOYk9eRho+h3At 2 
+1188691415.6 RECV_MSG 208.1.98.16 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASae3XmgW6+KfHYh3qNfB920R/yA1DVUqvjXuS5OAHsPBqDblIrIdBpmTlSJ6b5jlxtOYk9eRho+h3At 2 PASSIVE_BC {'name': 'robert', 'taste buddies': [{'ip': '51.48.69.148', 'age': 540, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAca7KKQeshKtGLb5u8N952KqnbzvSgV6Otgwrmn7ALpoEYB0qTE2QSB30dK6STmbD6oDtSMsxxQdobaW', 'port': 7764, 'preferences': ['Pd0uLocjCDjKbpKUoph+hfgXtB0=', 'qMNJd12K5ehoh4CSU9RO3wKyQSk=', 'fkShKl41WgOxCbKcVrPc1RFhP4E=', 'sDiM/wmG9rWNxKRPpO8jKJyRWdc=', '/6WlSGY4j1EF+/OaEO71wPugbCA=']}, {'ip': '199.223.182.221', 'age': 150, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATea76jbi8e5VtvR7YreeCOM+VRoKkWBd9VlGuMfAGL9Ya+mO30NEKASQ76JMPd+Zdw/HMLMQAvOHDWJ', 'port': 6881, 'preferences': ['4ioyLgPRZ1dOA3ENdt2YpOdGQfQ=', '5g+dZHHmtKXnKFvmvBjDE2ygxCk=', 'KqmgLfbQHkyFN5O1rGfYvBY+cNw=', 'alJwxel+g0CerF44yHgc+Vfh1Uw=', 'pBW1lPqidXb6TUtyXlc7LaYcAbc=', 'qMNJd12K5ehoh4CSU9RO3wKyQSk=', 'e1FPMuBTwrb42z9hbbodEhEj28I=', '3o0g9KCYnOUoZstuQBy1d7BjnUY=', 'bxnmC5MKwvzcAZHdNRyP8Hvenu4=', 'vPeQJpgWWMxCOuW8kuoyGx18jI0=']}, {'ip': '190.17.14.67', 'age': 1139, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf6gOSE2js+HPOG33c9tn/WP7+F/a0BJeEVWGmF1ABugcVPbltse6zxC7V/bttMZM8E1m4a7uJRSr9o1', 'port': 6881, 'preferences': ['Y8xLhPzHY835+SNnjGswaZaFODI=', 'qdpoREnQkI8gIZHd4RJAry6flmI=', 'z+elt3lc0zaLHxCjAlxrZFBQB8M=', 'Imbre9VEBWcA3mHIimBss3SKVD8=', 'kgvPs3BzGBGTFzhEEDJUqtNMuOg=', 'xdz9CCrcnGW8QjgRwN9Me6f96aI=', 'oc4/SdVOAG95J57IGpS7zVTOJHc=', 'eu/oUB1zotWxcygJOCgJUDqunrA=', 'amUauN8THCQtJFibmLOT/+6Cous=', 'UJmgX8qcVORHv5cnkMNyFBXNu9M=']}], 'random peers': [{'ip': '169.124.77.174', 'age': 1300, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPC+0lHr4BSLClZUwz5bX2vi4t+Pp50qrQL1j5ksAQ3mRJpFFEtZ0VZXsEPF6eDSgbggYhQ2TI1Dwe5Z'}, {'ip': '23.228.250.140', 'age': 1300, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU+CkZ0OS4p48mja9vkteCXL4KHKImFmZ/G5i2WEAJ/x++Qz5seKCFpIPaZlM5Fa5qqV0AMp0w+vlsa+'}, {'ip': '215.117.201.91', 'age': 1184, 'port': 59736, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbE4oWI0rzijk25+BxyDXPmLtT8c1Zq5PPY2F3e+ACUVUlE6IdhCzz2gruieqV41Hzq+McpDGr6R0Wqi'}, {'ip': '152.48.19.38', 'age': 907, 'port': 50100, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ2agzll0pPiGDqje2MAXsv3Glk+HLUsKnPEbvsKAb4CtYKZP9X5nSHUpXeX/F3yX3MJVIq8rytAL5xF'}, {'ip': '250.109.159.105', 'age': 276, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ8jaDP8xrpd0rALjvrIW5BV32k+B1487NVLENB7AYENLAvRy+RRiJ1gnlmwPKpXj799IFxFXTQOUjs3'}, {'ip': '12.181.123.245', 'age': 273, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARwpiOkFl9pFIGiAR1Bp9CCgDVc+UlOiQk50S4muAIOSWaxoYmku2i84ETJheTpcfWUqnLqJ9uiqIPOB'}, {'ip': '94.168.167.199', 'age': 1300, 'port': 4662, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfFX6mYRrBwo0BZLUF+oOQXEbToo3c5VXKnfsavEAS/KquhPJq2ISbYxaehxhH0qfMBo2VcFB58tl2/w'}, {'ip': '171.162.54.160', 'age': 1146, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALeInlJCuZo3muUj4OsNSgLVBYxUq9Sn2BjfJEf1AG0xeQufS1KBq/sWs52rAIyUN0xE1LqBThrJRn7h'}, {'ip': '73.216.236.102', 'age': 88, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARv+okJ6kV5OO1Gx29hGN2OGlqLwTtqNul5nkP6qAXnFfuB/KSfFdagkIG2K5s/qaMNsN113shCl8KPD'}, {'ip': '168.184.193.167', 'age': 15304, 'port': 7763, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYNRhPGTJ7H0y5huq//p93XsyjZgj/yxXJBMZ8ijABbfyjcSrnalgTDkN5qEOANOJkwTJTjt+X8KG+nj'}], 'preferences': ['Vz7i3j5pub8nDv/N9tjY47UAvvI=', 'k0h5Jfvdnfl7XQYqxw3BWd4xpY0=', 'f+AYosCRM4hFM68IKqVX8FklPCU=', 'qMNJd12K5ehoh4CSU9RO3wKyQSk=', 'x6V6HMSUWQkxPYWr0NfbKoBXk5k=']} 
+1188691415.8 SEND_MSG 148.24.74.223 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASae3XmgW6+KfHYh3qNfB920R/yA1DVUqvjXuS5OAHsPBqDblIrIdBpmTlSJ6b5jlxtOYk9eRho+h3At 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '38.232.39.228', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '51.176.46.177', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '28.47.30.49', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '10.190.210.214', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '64.40.85.117', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '58.82.32.197', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '122.125.253.231', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '46.149.173.254', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '234.12.62.121', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691416.2 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 5 {'q': 'SIMPLE hogtied ', 'id': '\xdd\xcb\xa5\xff\xf29,\x96\xc1\x17\xfb\rn\x14\x13G\xf2\xc2+S'} 
+1188691416.2 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:\xdd\xcb\xa5\xff\xf29,\x96\xc1\x17\xfb\rn\x14\x13G\xf2\xc2+Se' 
+1188691422.2 CONN_ADD 136.213.11.240 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 6 
+1188691422.2 RECV_MSG 212.190.186.176 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 273, 'random peers': [{'ip': '35.135.222.96', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAb6ySSv6ffSQQglKdrUwEFP5W7OceIkPyoMyn/IuAOrI2T7cSKeF2xdzGHJEDYgeIuy4VshDewg5HvKl', 'port': 6881, 'similarity': 0}, {'ip': '67.7.187.183', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcCjvvY+6uB5fkOuat3r83M5UeB9OJoBPE91+OAmAaaUQybCxu8r3dHXd/geYARQKrb28AxihCG2iVk2', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '101.35.64.245', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJtb/ktbJo0Di7MKBrsyI81zfQ34Udnvrsal/SanAP9QQTV2O0D/KFd3cS5mtL0/r48v65KhJzu0pLdh', 'oversion': 6, 'nfiles': 433, 'port': 7770}, {'ip': '226.110.56.212', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa8tB3Vf4Boo68j/6KEIyJ5MZUXLcFHVLphVmDNNABmSHNE2nVR3NXYxYIvLdCSuJMp/AK9SNida+zf4', 'port': 6881, 'similarity': 0}, {'ip': '92.236.30.178', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL7fsNzTvwMapJzUzDLD352v9W+h7JOH7StbIgblAIiYIl5O0I9ZhNrUX5cfyTLtnhzz9wvwamv1MgWS', 'port': 7762, 'similarity': 0}, {'similarity': 0, 'ip': '34.163.212.168', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAT8svDz56hztH53BxA4TBYf5eLHclSMsZveKgP4VAHK0376GQ8FfpsoTsm11gQlQhNaXFevTKjH31wez', 'oversion': 6, 'nfiles': 212, 'port': 7764}, {'ip': '222.62.136.189', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAasSiWmjtEQZzoq2Gps6ksigoU+NBE7zgSKCDkgfARoGe3e41eIEoGLEE+knnqEPYCTSZRYEqtDKBO5i', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '160.78.236.39', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'oversion': 6, 'nfiles': 3647, 'port': 7764}, {'ip': '245.248.188.208', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJOaf2HGMkKe70PYmsX/zqNrylwJXSy/dK5Y7dYGAOzgzJvY3+WiadZWyUJEKmlfMwu+nVF+I7rkl3Ii', 'port': 7772, 'similarity': 0}], 'collected torrents': ['\x8f&\xaf\x17`8\xf3]\x1fh\xdd\xe1\r2+\x8eMD\xa7\xac', 'I\xb1\x85P\xbeg\x0c\xe2G\xcf\xcd\x95\xad\x10\xb27\xb3\xfa\xb0?', '*{\x1e~\x83\x02-\xad"\x12c\xeb\x89>\x88-\xd5\xa3\x97\xb5', '\x10 \xde \x1a\xa2\xd9%\xdd\xdbR9\x1f@\rS_.\x19_', '\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', '\xa8\xcf\xe1J8\xe0\x06|\xb62\xf0|\xf75\x96C\x9c\x86\xb3\x05', '\xa0\xc6R\xb7N\x08Z[fUc\x18\xfb\x9eU\xeb\xab\xf01\xce', '\x98\xf2*\x12\xb9\xbc1\xe5> \xd8\xb1\xe5<Kc\x1c\xbap\xb0', '\xe8\xcaz\x14\xe1\xb2\xb5+\x1f\xe5\x1c\xd6\xf0w\xc2\xa4\xd0Z\xf9\x8c', '\xcb^R\x1bRb>0\x93&\xf61\x01h\x0f\xe4D\xadF\xea', 'D\xc4\x1c\xfd5s\x00\r\xd7\xa6<RWK\x8c6\xe1\x14\xeaU', "v\x0b\x8bw\xbfM\x1f\xa9y\x92\x12Y\xa3\xc9l\x19\x7f\xce'\xf9", '\xe0\xcc\xadk\x0b\xd1\xe9\xebi\x10n\xe5\xc9\xa0?G\x15\xab\xdb\xeb', '\x11G\x7f\x8e\x1d\xf9pV\xefb\x9b\x02\x98\x04k\xbfw\xd2\x83\x05', '\xb0\xd5k\x955\xf2\x1d\xf4\xfcn\xdf\xaat\x00^]\xd4\x9e\xd6\xef', '\x17\xa4\xff> \x13;\x1dJ%Z\xc2\xa7\x1a\xff"\xd7c\x98\xd6', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2', '\xd8y\xbf1\xbc|R\x16\xb1\xdb^#\xad\xf2\xf0\xa0*cI\xd9', 's\xc4\x03m\xe75n\xeb\x93&\x0e\xfdM\xbc8\x0e\x0f\xa9\xd7l', '\x97\xa8\\\x9d\x07\x07\xc7b\xb4\x16$\xe4\xc3\x87\x97n\x93\xcc\x14\xec', '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\xac\xe1\xcfi`\x07bWY\x8c*\xd1\x04}\x9e\xaf\x91\x96\xd2\xc2'], 'nfiles': 22, 'name': 'KAMAJA'} 
+1188691422.2 SEND_MSG 131.50.124.126 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2112, 'random peers': [{'ip': '254.82.7.57', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7', 'port': 7771, 'similarity': 0}, {'ip': '122.114.119.6', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '252.84.194.137', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '73.44.114.209', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '233.224.91.191', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOAkxv/0qc5YIsqWobOlEEmP7P8z9daSivw0fU+vAejyfY/O7rU3H/7GJnyebSqxafH1qhhIyhdx523w', 'ip': '24.67.60.80', 'similarity': 0, 'port': 7765, 'nfiles': 295, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'ip': '11.169.137.174', 'similarity': 0, 'port': 54545, 'nfiles': 949, 'oversion': 6}, {'ip': '207.235.132.48', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '211.56.160.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691422.9 CONN_ADD 177.82.226.126 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAgGrWi/6zYhs0JQ9m2WzWU7nRxZm945X1koeD5sAT17ge2ZV2AMCGb9SafAQzpHsQQgDzy6vQyBDLnQ 6 
+1188691425.2 CONN_ADD 119.17.252.36 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaYsTw8q8kNc/CMVi+KauBk2dd94N2gzFnqU3WCCADo0qjLYIRzDJHoyUWUxlpmWg0JfkIvgTWxprdFW 2 
+1188691425.8 RECV_MSG 251.48.24.240 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaYsTw8q8kNc/CMVi+KauBk2dd94N2gzFnqU3WCCADo0qjLYIRzDJHoyUWUxlpmWg0JfkIvgTWxprdFW 2 PASSIVE_BC {'name': 'Sal-PC', 'taste buddies': [{'ip': '130.16.90.51', 'age': 2148, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaAA3DYMKUXcc8OZFuWhvhqXG65vRl4XMCQFZcwPAb92ocW6muV4lfwPvkY+VqdYAMD6I17RQcucAe/x', 'port': 6881, 'preferences': ['Wi3rCE8uEzktuJ1MHykQ/jPAYGI=', 'fbTMgsmvdnYJzBrx4fk6v65ctsA=', '7E+McPB9JqtlKcdoBkl7js5Vdg8=', 'M+nFaMtrvLZbN8txU8pgMc96sE4=', 'HxJffA2zodEYqlffjBC9o6YCksc=', 'piQEnfCNXEreBQO3Q5IW5sY0v9U=', 'BT85DSWwhR5l+sZQbwV7Lgzi7V4=', 'p3oTgSDM90lBB5dGdnP51fKAT2E=', 'lXu3MD2oqMyf6MbV6CjNF2aQUCY=', 'ZlLmld7dEivs0jl+IoW3QOqggd4=']}, {'ip': '146.254.201.223', 'age': 265258, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUAiNR2WmvdnDLvDaBvOvLqhW0wmAWV7f7bjr14bAI//IR0Dy72Woothb8NL7sCVuqe5uoiMb4Md90WG', 'port': 6881, 'preferences': ['ZlYeoFUubYd3/bavh2NKv5J/Gac=', 'yM9V2V57+F4twYzMa3SiGsouX0g=', 'OUbb7o/nTyI0ArGpAiMHLI0E/DM=', 'MlaxStFhnnXYZB7zD+s2Xj1kzaE=', 'TfRNgp6dfaMP17e0wkhTshhafaE=', '9nOgB16+55lP3hFGN8c5oEg5rd0=', 'Kw1g5oNnp9uAO6t49p/tA7aFU84=', 'bQj4wJ6xMPDwjWy+EAnfq2xvnPY=', 'XU1azJXcAB2pPJS4dR8ZDX8GO4o=', 'd4gd3L6Kc9d/kIm9t6utyv8wplU=']}], 'random peers': [{'ip': '66.239.129.136', 'age': 3385, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbOQN0UrRJ29yCsmp5OFqX5bzK7oz47VcQq52EoiAGsuyDgr2wy8FQQO63U/488zr9OU13b9USGP4IP+'}, {'ip': '4.149.110.91', 'age': 14426, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPKYnH2qxePPuWpHJ+nj4naJO5/5bLsN/ieKVxzUAQXYvzxE56hcRsJOTkiNjG+ZbyVUEUYPkTNceZKN'}, {'ip': '62.71.96.59', 'age': 12406, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK54N9RFz1oMg7ROyB1SUqZtKsNy9V9wbujgF2zVACOAQ7xFkD4EsgB5q9Ym7cMpf11alx4Yr7UFIF2l'}, {'ip': '101.233.163.137', 'age': 17256, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXFhjTTlNOeEl9g1Rhpp2tVlegUkA3g+BXBgKf7PASXHa7w5Hb09V6YMC5DCE2R1n2y3QRr1VAiL/T4k'}, {'ip': '147.222.54.22', 'age': 1148, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFW58KtJe7G3nbLbl8D3sDE6WVtRHV8wTvHG1pSFAMFXMNps0bpnO06AYLCX74I8J7o7EyBFUAdQ3QGY'}, {'ip': '201.60.243.204', 'age': 5356, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALCihkayiAqUYMJHi1Ax4ptxN2Nzwyv1dl3AXW/RAT979DYZulFw+nMFwd04bR4Vn+dhWwFa2KP5hJuF'}, {'ip': '253.0.211.204', 'age': 399, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfbl87pz6gnUuGkT+WjpxQNAD0fDVO+bGZGpzq/sARu57n0SlosmAeKAWT/peT4IMb7+2iRu31zIOyyx'}, {'ip': '52.58.183.102', 'age': 12227, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFKSCuZWjY3o22xekT8V1DGcGTUpddXXCmv9o4HAAAtmVMkW0VJwt953weRHlO2FyWrGSuC7j0kwo0yA'}, {'ip': '58.46.17.189', 'age': 14251, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZP5sNaXqgwUVCIlet/XltW8u58NkNwTBxRl9qfuAcfTsLsEl5D5lnSAehHoaBKzJKFLXC1wdclIWfgi'}, {'ip': '24.66.185.49', 'age': 160, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeBYMcGakBzO8/uuVdfP4rRTyqerRBDthAaer6DJASmdeSBMo4rhRLqJGdrEp/y7NRbXl5MZthbUt4+k'}], 'preferences': ['DaRQM2OnCZmGYSYjr7Ztl5OBVNY=', 'P9TbEGYaBSXgoO5ELI5eYP9LrQ8=', 'Yw7hffxedbdaXDk0dqzNprfeJ8Q=', 'wugmMtcaL6pJr15JHKFTqhHL5hk=', 'UBUWHIMVKlEft57GMQEBOPtKwaI=', 'cAlXfs0L+GKbQP7t5j5ToRpqU2o=', 'KRXsJIGlSzCpeDgeAhAhm7If51g=', 'Eyy5LGPkFij9wQ2BLjkiyk8x9B0=', 'PAFogElFd0vtM49bopCHaAwNSQE=', '9pSB1bzWAr9am7WIcx2dIWZd/NU=', 'YpX2fSJvDGhw7LSQV/Wr0jrqC0k=', 'N61GRmQoorhnG7r3Uotiy52n+lw=', 'ngXWdO7hAw3QixAooM6hrfJI4sM=', 'bQj4wJ6xMPDwjWy+EAnfq2xvnPY=']} 
+1188691425.8 SEND_MSG 162.9.242.122 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaYsTw8q8kNc/CMVi+KauBk2dd94N2gzFnqU3WCCADo0qjLYIRzDJHoyUWUxlpmWg0JfkIvgTWxprdFW 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '226.208.153.126', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '65.65.82.27', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '206.240.188.16', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '170.204.133.192', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7'}, {'ip': '136.64.94.127', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '133.152.168.220', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '199.75.251.22', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '81.196.98.170', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}, {'ip': '231.243.125.132', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '49.148.133.20', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691425.8 CONN_DEL 122.210.208.116 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcYsBj4iE1yCi8FvFbF9xR8vBvYlG+z1VrrVHgVbASBSZ9JSyBSRFQ1IY7EGOX55oMe0XeYmOY8eSma7 overlayswarm:connection lost 
+1188691431.7 CONN_ADD 127.183.3.206 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFFB7MLhm7ZdbXdT2BIr4cR6x59yXqdzhz6Z0M3KAVZZShx3idsnoe4UOimRIKE61ed3F8UovTjnu5qr 2 
+1188691433.9 CONN_DEL 241.4.121.248 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK3vuT3wd0cXJ4etwQsZceMVlqxwcDEan/Z7Hf/ZAcMDz5ZD4NCQWjHNT/3w2IN47lqaZmRq4f1nHffi overlayswarm:connection lost 
+1188691435.1 CONN_ADD 127.14.215.111 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP 6 
+1188691435.3 RECV_MSG 13.170.229.176 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 130, 'random peers': [{'ip': '233.37.53.37', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '85.175.79.96', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAezGLwb65yVDwWePHMA7ubidUBS0xhYnZGkODUImAOvg+rfEKNyedhuujIqChX1lO8U0v58MHjO2HbO5', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '65.89.140.78', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALtinCNB77hftcNL1nLgh8FoZU0yQNu8hmdNZ4WXAYFj/iep5b485SL6E8hXKWRWqZtl+PeifI4akELg', 'oversion': 6, 'nfiles': 503, 'port': 7764}, {'ip': '43.117.11.175', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdc/inM+FhZcdZb0ukkcqgzoV78fQV2WGD3ZRPGFAaKxr4cHLECDL/XzKHLchXZA8P60dkRRmTRuxnMD', 'port': 6881, 'similarity': 0}, {'ip': '18.28.13.73', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaIImcghy2ysM6WljJHm9pYSJWpBL65vducfeyY7AD8bWw/UBQH6MtgF6L0nSI4ZbKir7Pikhpv6EVth', 'port': 7764, 'similarity': 0}, {'ip': '111.101.208.255', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'port': 54545, 'similarity': 0}, {'ip': '137.213.203.5', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB', 'port': 7762, 'similarity': 0}, {'ip': '106.173.242.45', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeW65yZlJvbhwmkryJY3XQEZ9X3Vjhb+8o5DNCWWAXgoBb8SVkt5mNGfj1CD7D+Pzc26zIBDnoxKEn++', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '60.241.107.19', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdHw8VpFhHdn3HbqHoD1I9V1JwU/H80fxgm8fYaEAelQTzF3QgMADg7CfI/9eBOak4LL9fzoMrOZYegQ', 'oversion': 6, 'nfiles': 473, 'port': 7773}], 'collected torrents': ['\x03;\xb4\xc6\xbay\x11&\xef\x0f\x88\xb6\xf6\xd4\xd8V\x10k\x84\x10', '\x11\xdb\xf4\xef^t\x0c\xf2i3\xa39\xd1Z<\x05\xb3\x03\x088', '\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2', ';0\x9c\xe2^\x0e\xff`\x19\x01\xa9\x00\xf9n\x0fv\x9e\x85\x80\xeb', '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\xa0O\x9b\xe2K\x90`\xa12)\xcf7\xd7,s\x10\xc2\xa7z^', '*{\x1e~\x83\x02-\xad"\x12c\xeb\x89>\x88-\xd5\xa3\x97\xb5', '\x8f&\xaf\x17`8\xf3]\x1fh\xdd\xe1\r2+\x8eMD\xa7\xac'], 'nfiles': 9, 'name': 'iBook.local'} 
+1188691435.4 SEND_MSG 20.163.88.93 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2124, 'random peers': [{'ip': '61.63.177.222', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '243.100.187.249', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '109.254.89.230', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '44.205.74.74', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'port': 7764, 'similarity': 0}, {'ip': '202.92.96.109', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAW7KQbAfJF8zUiLVF9ivvOIq9gxrbOrYsGmNItGKAfr4Ufkoy4OpT5Y7WyApCHZWC3YietKJXxQQeA3z', 'port': 7768, 'similarity': 0}, {'ip': '30.86.31.176', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '101.64.94.22', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '2.22.33.233', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '220.243.62.117', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691435.4 CONN_DEL 198.67.111.228 7765 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHqgRfQs0OOBIukjPmrebMVtY4lBwZrmi4VJ4LbBAcnw50abmtBIsI7XUXWzTOiIaRHLVtlUBDYr13/6 overlayswarm:connection lost 
+1188691437.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 6 0 {'q': 'SIMPLE burning man ', 'id': '\xdeL\x02v\x9b\xd0\xd6\xa7S\xb5\x8f\x9d\xd1\x7f\xc5\x04\xa2\xc1\x9bC'} 
+1188691437.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 0 'd1:ade2:id20:\xdeL\x02v\x9b\xd0\xd6\xa7S\xb5\x8f\x9d\xd1\x7f\xc5\x04\xa2\xc1\x9bCe' 
+1188691439.4 CONN_ADD 84.104.56.22 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKgm00seD977usBtMzTjsn0jeAJexqFnAzYImOejAZ2WMvOygsRYvi8gnUmtiPN4utntmADnhkHf9r+k 6 
+1188691439.4 RECV_MSG 133.61.139.147 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKgm00seD977usBtMzTjsn0jeAJexqFnAzYImOejAZ2WMvOygsRYvi8gnUmtiPN4utntmADnhkHf9r+k 6 PASSIVE_BC {'preferences': ['PTJdWfl4jlZa8SRtJzYnkVMA5xk='], 'ndls': 1, 'taste buddies': [{'ip': '34.78.201.34', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf6gOSE2js+HPOG33c9tn/WP7+F/a0BJeEVWGmF1ABugcVPbltse6zxC7V/bttMZM8E1m4a7uJRSr9o1', 'port': 6881, 'similarity': 1316}, {'ip': '141.93.248.75', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEVA2Gin9bRq7Fr3cT92SXM6bGxoLmp5tvbM4FRqAP/juVDpz9nMl/RTPwim/iivmZ//CZB7tLKQLqNG', 'port': 6881, 'similarity': 1316}, {'ip': '18.231.157.60', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcPCAE7Z87ovfSCctirlJZjuvOkVL1HrHdG+j4A+ASYWT4WrlSYtI6DU1k0cYbvRRjml/+u8cH1PC5bW', 'port': 6881, 'similarity': 918}, {'similarity': 868, 'ip': '55.187.253.192', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK3vuT3wd0cXJ4etwQsZceMVlqxwcDEan/Z7Hf/ZAcMDz5ZD4NCQWjHNT/3w2IN47lqaZmRq4f1nHffi', 'oversion': 6, 'nfiles': 3889, 'port': 7764}, {'ip': '204.177.123.110', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAS151K/aBPEa2xXEPWJ+Au0CKbzKZBUkrMoymomKAR6nVWl2w/cQdFfgvbFobWh8N5Peu1CZQ7Fkyrbj', 'port': 6881, 'similarity': 816}, {'ip': '73.95.109.222', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbzzdx0e1im5pcEzCJV63ftYZSHDMYKT2M5ksUAlAENC3NC3cgWL8wl+RvTA5RVXc+0s4eZX8a5gOLLd', 'port': 6881, 'similarity': 661}, {'ip': '87.131.162.78', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFyhoYJafyYofe4sapbn4/tIwq0YQslvwHt+jjffAfu/WX2uN6IuX3UCZUWe8Gp5WTNqpbFF5ckrexid', 'port': 6881, 'similarity': 588}, {'ip': '197.243.141.25', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ8jaDP8xrpd0rALjvrIW5BV32k+B1487NVLENB7AYENLAvRy+RRiJ1gnlmwPKpXj799IFxFXTQOUjs3', 'port': 7762, 'similarity': 500}, {'ip': '129.175.93.13', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeDTpdak5cLjB/JxAGmpVRvNZ154GSZf2V3nhqIhAe189q6cYrcmC8O6LdeFjvhSFSPHpFOtNmAYUM5D', 'port': 6881, 'similarity': 421}, {'similarity': 316, 'ip': '29.91.222.56', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALuqKE+vUu61/OiWjjuF5TWPfwwcWJB1mniAyErHARjcJjfqCZJif54tEVhIny0LZS71CL/L/8PCVuN4', 'oversion': 6, 'nfiles': 2975, 'port': 7764}], 'connectable': 0, 'npeers': 513, 'random peers': [{'similarity': 37, 'ip': '102.86.158.155', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGvcdlssus9KuDm7HC/OO1UVvsRb982xHR9ynM46ATAMQjGt87cXiwkgsFxRvxA0WeL1ZCSK8mJJEk7V', 'oversion': 6, 'nfiles': 4930, 'port': 7763}, {'ip': '225.170.52.93', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAJ1FB3Mzb/ZpdA1kmu1HUp9fW5SbH0k0+/9uT+7Aa0BjGlOd+EQchLxqQZD7EHBfQ239nzv3GvjKD7/', 'port': 7762, 'similarity': 220}, {'similarity': 26, 'ip': '88.221.25.56', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'oversion': 6, 'nfiles': 230, 'port': 7766}, {'similarity': 316, 'ip': '142.123.54.67', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALuqKE+vUu61/OiWjjuF5TWPfwwcWJB1mniAyErHARjcJjfqCZJif54tEVhIny0LZS71CL/L/8PCVuN4', 'oversion': 6, 'nfiles': 2975, 'port': 7764}, {'ip': '144.233.228.94', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIS+294zvit3jA668lQ8IXpQlPOfX4yG9JdsV8MAJ381b4aARPIF+FN9GoteSCKKes5mNuYQWdObktp', 'port': 53800, 'similarity': 129}, {'ip': '119.174.200.230', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGhA7L6MXDjNCjdWpd4ghM+CNZ9NpIJy43G4rAiUASkXeHnSMkiWVibwNU0oi5jZSABC6K1mrb0xlUWQ', 'port': 51500, 'similarity': 0}, {'similarity': 18, 'ip': '63.199.62.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADMiQw21JSPLtUpCIcjcBGGPGmGqSs8I5g5U93n4AcTe3+cqcVaJbXUGOxR/tuRf4gKQHGDdWkViYKrS', 'oversion': 6, 'nfiles': 312, 'port': 7770}, {'similarity': 37, 'ip': '246.148.23.80', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAT8svDz56hztH53BxA4TBYf5eLHclSMsZveKgP4VAHK0376GQ8FfpsoTsm11gQlQhNaXFevTKjH31wez', 'oversion': 6, 'nfiles': 208, 'port': 7764}, {'ip': '141.94.53.181', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSP6T1HaJ/gZULqv4umHF+vRgcXuQQina3H2a8CACKqCNWwrvK9acJyXt6KHzsfC6egCOBf8O+dy0Sf', 'port': 7764, 'similarity': 0}], 'collected torrents': ['\x10 \xde \x1a\xa2\xd9%\xdd\xdbR9\x1f@\rS_.\x19_', ';0\x9c\xe2^\x0e\xff`\x19\x01\xa9\x00\xf9n\x0fv\x9e\x85\x80\xeb', '\\\xc4#Y*\x08\xb1\xbe"\\\x15\x93\x0b\xee\xeeS|\xa4B\x98', '\xc0\xf5A\xde6Db<\x86\r\xd6R\xcfDxc\x15\xd51\x16', '\x9c\xb8\x85\xaf\x82vz\x9au,\xf2\x05\x0b_\x94C\xe1\x90\xc2-', "\xa3\xbc}\x1a\xb6\xde0\x96\x1e\xa4X\xf3@\xfe\xa3\xc9\xe6'\xed\xba", '\xe4\x0f8\xcd]\x94\xd0c\xebx\\j\x91"h\xf6\x03\x0e\x17\xd3', '*{\x1e~\x83\x02-\xad"\x12c\xeb\x89>\x88-\xd5\xa3\x97\xb5', "'7o\xa2\x8b\xb0\x8b\xe7,4\xc8\xaf\x9aU\xbfX\xd5\xe4\x82\x9c", '\xe6\x81?\x16s\n\x06\xcb\xf6r\xe13\xd9<\x96\xdb^\xfc|7', 'A\xfc87\xef\r\n\x88?ew\xdd\xc4\x98\xe3Pj\x1e\x9f\xdf', "\xf2l\xb8\x83\x1dg9'\xbf9\t\xeb\xa4S\xdf\xe4\x0by\x8a\xa5", '\xc2\xe8&2\xd7\x1a/\xaaI\xaf^I\x1c\xa1S\xaa\x11\xcb\xe6\x19', '\xd4\xfeu\x89a\xd6\xb7\xefX{\xa6\xea\x8f\x9e\x92_s\xbcL\xe5', "=2]Y\xf9x\x8eVZ\xf1$m'6'\x91S\x00\xe7\x19", '\x91\xbeR\t\xad"\xcd\xd1\x90P\xfdF\x89\x8a!\x92\xab\xb5uu', '\x93Z\xf2\x9d7\x0e\x12o\xdb^M\xfe\xcf\xc0<\x9d\\\x92\xd0H', '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\xe0\xec)\xc7\xed\x06)}\xc1Rb\x80\x81\x18T\xf2\x17\x11\xc6h', 'D\xc4\x1c\xfd5s\x00\r\xd7\xa6<RWK\x8c6\xe1\x14\xeaU', '\xdbi\xf7\x97\xa9\xde\n\xaf\x9a\xbf\x98\x17\xa7\xeeL\xe9cD\xa5\x14', '\xac\xb8\xae\x91\xfb\xa2\x00\x04\xa0\x9e\xcc\x0c\xb4`\xfdVxQ\x90X', 'RP6\x812\t\x8d\xddO2MF\xe6ue\xed\xf1Oj\xc6', 'P\x02\x10\xdd\x84;\xb5\xc8A\xe8\x04\x18\xac\x8d!\x943\x97|\x97', '\xd8Y\xad\xaek\xa4a*\x01\xcdT\x06\x1f\xa3\x8b\x07\xabsmX', '\xddD\x9avI \xc1\t\x03\x85\xebu\xb4\xde?\xe4\xfe\xa1+i', '\xc4r\xbc\xbfCE\xc3*z\xc2Oz\xac,E\x98\x91\xff\x1d\xb2', 'I"\xe1=\x05Zv\xed\x9b:9\xa4\xcf\x12\xd4\xcc\x8b(\xf1\xe8', 'C\x9f\x87\xa2h\xf5\x12\x9bQ\xe1\x90\xbf\xa6\xf2\x11\xd9\x14I\x99\xcb', '\xc4\xe8{\x86*\xc9\xe2(\xaa\xd0j7\x85\xf4\xfb\xcc\xc0.@\x01', '\xd8y\xbf1\xbc|R\x16\xb1\xdb^#\xad\xf2\xf0\xa0*cI\xd9', '1M\xb0\x80\x188v\x9b\xfd\x103zp\x86T\x1c\xf2\xf4\xe4M', 'P\xdc\xd5\xa3\n\xb6\x83X\xbd\x18!\xd8g1\xfe#\xc6v\x1b\x85', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2', '\xcc\r\x81\xdd\x82W\xde\xa4E\x1d)\xc1\x9e\x0c\xdf=U\xe4\x88+', '\r\x8db\xec\xd2h>\x9f\xa4\xf5\x80}\xda&\xb4\x84(\x02\x10\x85', 'lm\xb8|\x07M\x8b\xbd\xc6\x0e\xa3\xd9\x16\x95A\xea\x16\xd6\xcc!', '\xed\x19\xc1I\xff\xa2\x03s\xaf\xa0@;b\x01a\xf0\x14Y=.', '\x17\xa4\xff> \x13;\x1dJ%Z\xc2\xa7\x1a\xff"\xd7c\x98\xd6', '\xff\xbdhv\x7f^\xb1\x9b\xbc\x00<,\xf1C^l\x9bue\xb2', '\xe9\xb4\xdf\xdd\xadEu\xfb\x14\xd6\x10\xd3?x\x1fz\xf3eL\r', "v\x0b\x8bw\xbfM\x1f\xa9y\x92\x12Y\xa3\xc9l\x19\x7f\xce'\xf9", '\x88/\xcb\xcc\xd1\xc2}\x02\xeb\xeb%\x1d\xa9\xf8\xdf\x17d\xb0\x0e~', ')\xb1\x80/\xc3\x1c\xc36l|\x9c\xf2\x1a\x85\x11[\x85?\xf7\x82', '\x8e8\xd7D\xe5d\xe3\x1d\x85/\x91\xcc\x1b%\xd4\x11\x8c\x86L\x1e', '\xa0O\x9b\xe2K\x90`\xa12)\xcf7\xd7,s\x10\xc2\xa7z^', '\x7f-\xf2ff\x8ci\xb6\x82\x87\xecTY\xd6\xf5\xfe\x150\xbf\x1e', '\xd08+\x9aY\xdd\xfb\xf8\x99\x89\xb4_\x96\r\xf8\xaeH#\xfe\x85', '\x1b\xff\x02\xec7X\xc2\x82\xc0\x99\x06/\x0e<\x0bH\x8c\x12\x12\xb4', '\x14\x87\x9e\xc7L\x04\xf7\xd1%N]"h/\t\x1e@V{\x8d'], 'nfiles': 54, 'name': 'izzo'} 
+1188691439.4 SEND_MSG 83.36.133.167 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKgm00seD977usBtMzTjsn0jeAJexqFnAzYImOejAZ2WMvOygsRYvi8gnUmtiPN4utntmADnhkHf9r+k 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2130, 'random peers': [{'ip': '174.136.244.5', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '151.106.50.218', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '2.173.21.66', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '200.13.178.185', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALuqKE+vUu61/OiWjjuF5TWPfwwcWJB1mniAyErHARjcJjfqCZJif54tEVhIny0LZS71CL/L/8PCVuN4', 'ip': '29.28.127.20', 'similarity': 0, 'port': 7764, 'nfiles': 1740, 'oversion': 6}, {'ip': '22.148.189.17', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '27.248.206.224', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '16.44.201.255', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '198.231.160.77', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK3vuT3wd0cXJ4etwQsZceMVlqxwcDEan/Z7Hf/ZAcMDz5ZD4NCQWjHNT/3w2IN47lqaZmRq4f1nHffi', 'ip': '61.12.48.13', 'similarity': 0, 'port': 7764, 'nfiles': 3153, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691441.1 CONN_ADD 191.176.20.174 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAD8396BuV9IxYbYP1I6Ql8f+inuAv4prQXGJESFxARFMZ8yHrSUBAlngfbLeZXKlq3vWOhweFr1oU7Gz 6 
+1188691441.1 RECV_MSG 86.91.37.44 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAD8396BuV9IxYbYP1I6Ql8f+inuAv4prQXGJESFxARFMZ8yHrSUBAlngfbLeZXKlq3vWOhweFr1oU7Gz 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 8, 'random peers': [], 'collected torrents': [], 'nfiles': 0, 'name': 'Ezra-PC'} 
+1188691441.1 SEND_MSG 96.128.26.18 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAD8396BuV9IxYbYP1I6Ql8f+inuAv4prQXGJESFxARFMZ8yHrSUBAlngfbLeZXKlq3vWOhweFr1oU7Gz 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2131, 'random peers': [{'ip': '186.113.3.106', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '114.151.195.32', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '64.125.136.10', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALuqKE+vUu61/OiWjjuF5TWPfwwcWJB1mniAyErHARjcJjfqCZJif54tEVhIny0LZS71CL/L/8PCVuN4', 'ip': '32.12.176.208', 'similarity': 0, 'port': 7764, 'nfiles': 1740, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '144.200.169.51', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '169.89.56.19', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '185.59.50.71', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '145.212.103.235', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '157.13.23.125', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '255.124.135.158', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691447.4 CONN_DEL 247.170.243.73 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIVlP6RU+MGlzhmZA4/XvRQR6P+UCaRij18zmbEAQq3yQ5Maw4auxmSKhIMdJHCpMlxErBM/vGbNS0Z overlayswarm:connection lost 
+1188691447.6 CONN_ADD 213.71.159.93 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAMFm4gJhJfXlvIcAg292476SY2e2ZYRl+3mUSxfAVvVhovIO8HkHDQDX4TjXqbYI4rhqeXUmO1tcCTr 6 
+1188691448.4 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 6 6 {'q': 'SIMPLE nine inch nails ', 'id': '9"3\n\xc46\xc6\xce\x0f\xf3\xdbiY\x1dD\x89h\xdf\x7f\x13'} 
+1188691448.4 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B 0 'd1:ade2:id20:9"3\n\xc46\xc6\xce\x0f\xf3\xdbiY\x1dD\x89h\xdf\x7f\x13e' 
+1188691450.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAMFm4gJhJfXlvIcAg292476SY2e2ZYRl+3mUSxfAVvVhovIO8HkHDQDX4TjXqbYI4rhqeXUmO1tcCTr 6 0 {'q': 'SIMPLE battle kruger ', 'id': '\xac\xc6SF=_@\x9e\xb6\x942\xe2\x87B\x9e\xee\x06\xd5A\xb0'} 
+1188691450.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAMFm4gJhJfXlvIcAg292476SY2e2ZYRl+3mUSxfAVvVhovIO8HkHDQDX4TjXqbYI4rhqeXUmO1tcCTr 0 'd1:ade2:id20:\xac\xc6SF=_@\x9e\xb6\x942\xe2\x87B\x9e\xee\x06\xd5A\xb0e' 
+1188691451.9 BUCA_STA 9506  2132 2132 100  1586 12104  67 67  0 10 10 
+1188691451.9 CONN_DEL 21.152.131.18 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHgzvbtGK8FRbACTsOmBCMuekUrzaogWUomiJnkgAXzpAJFYSpBDtxcK1qg+GzqAtjeVJotP6pbs+U0a overlayswarm:connection lost 
+1188691451.9 CONN_DEL 248.33.128.219 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAff+aqEjPDZ2QAJhnffr7ol6srcUV3vo91sq0lE4AFLihI1PV7sFAcs0E6U3jpnJ3eb8ovf2/Tdac17U overlayswarm:connection lost 
+1188691451.9 CONN_DEL 45.70.140.110 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXTtTEuwMlzUL/o23+6lbFsvSq/+oyBNIlZMobhVALgZYyQtaJ28QVOSbLj1+ieJ+utXGGWVppP2HkVH overlayswarm:connection lost 
+1188691451.9 CONN_DEL 206.35.229.180 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLxzuZsaYbHZXMNf2hR327Swd3grVjOLAFrBvnEACY6Z0WdNogYWIkCFjEYm6bc4NwpCFmC4fKejJzk overlayswarm:connection lost 
+1188691451.9 CONN_DEL 8.194.98.137 26017 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC8TOXRp2+Y6JQDcws8YMQswwwolUYK/A6T4r6NoAMUFLV6ud7wGlM+BgpBfzZq/hq0PcuOPuT2ZJEAw overlayswarm:connection lost 
+1188691451.9 CONN_DEL 231.155.1.147 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG2ptg0VjgHE9s3QjYVPYALhGuaxpU/n7z68i5F7ABDAOjzujTw/kD5zfe73D/fw6GCfNFFgyUnWa3pN overlayswarm:connection lost 
+1188691451.9 CONN_DEL 170.91.253.200 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfJQhwh8ZfqJPkCpWFs4J64W8wjJBCqdPvarxBFWAF+ms1WxVN+BRtJoUr5Z5ggo+eThmw62cMpUXJQw overlayswarm:connection lost 
+1188691456.7 CONN_DEL 109.228.56.173 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGTdaWOre+S7zONZ9C5FfgS4WkI9nk3vAFwUhM2fALs3dG06i/kjcVYKgvuJzOsOetZL0OlChP8vFM7k overlayswarm:connection lost 
+1188691456.8 CONN_ADD 194.104.43.86 7763 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMBsIkOf4NQZefln7iBWXklgrJ68MOULijCqgYj/AdU4Ny3QGcCferCj6Upabgk2fcsrMna0aWEUyqUu 6 
+1188691457.0 RECV_MSG 140.240.241.108 7763 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMBsIkOf4NQZefln7iBWXklgrJ68MOULijCqgYj/AdU4Ny3QGcCferCj6Upabgk2fcsrMna0aWEUyqUu 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 18, 'random peers': [{'ip': '56.245.87.105', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '37.136.65.49', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '188.44.68.33', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWLCwIeXtSFCbXn5rwpNlwLUlJsfbhX04/7AM6J6AUnF4/RdfQ3Lmv8okHr9EkCjGdGC2ai8EfcHmIeR', 'port': 7764, 'similarity': 0}, {'ip': '181.105.127.96', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFY3lSHHMzF8T7LFp+1G2+y9UNO56SSGZLfDOYZtAAkKJ85kAZrooaafKgSi3fKv8oTBgFAHU7Dw6s9Z', 'port': 7772, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'smudge'} 
+1188691457.2 SEND_MSG 67.82.159.108 7763 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMBsIkOf4NQZefln7iBWXklgrJ68MOULijCqgYj/AdU4Ny3QGcCferCj6Upabgk2fcsrMna0aWEUyqUu 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2134, 'random peers': [{'ip': '211.132.142.30', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '172.26.57.162', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '138.180.222.203', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '153.137.159.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '114.204.255.94', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '127.125.64.195', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALuqKE+vUu61/OiWjjuF5TWPfwwcWJB1mniAyErHARjcJjfqCZJif54tEVhIny0LZS71CL/L/8PCVuN4', 'ip': '242.26.237.53', 'similarity': 0, 'port': 7764, 'nfiles': 1740, 'oversion': 6}, {'ip': '235.60.33.198', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '12.211.160.247', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '239.200.209.149', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADMiQw21JSPLtUpCIcjcBGGPGmGqSs8I5g5U93n4AcTe3+cqcVaJbXUGOxR/tuRf4gKQHGDdWkViYKrS', 'port': 7770, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691458.1 CONN_DEL 18.215.8.132 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3MP63sdnQS5dhaeAuLWJ9fJqHGcjQ4jtXIT6y9AKrOZbIhpb5t0OzpanvSZ8LU51XDS/03PNSHXzls overlayswarm:connection lost 
+1188691458.7 CONN_DEL 114.162.158.195 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACU+VZoqIN1q1EK8zo6qtmIJidddAJwr85benj5IAHYAy39++m8Y5EFEWOqAVYDC+7QBKKanFJ9SDPni overlayswarm:connection lost 
+1188691461.6 CONN_ADD 244.17.185.93 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHjTifzjoY5XlNZyaMJQS+xAICdHUQ8xytBqUDOJAXWg1Uvj+Kz045dE2r100hBDqhedaPqYTKV7Sara 6 
+1188691461.6 RECV_MSG 63.195.242.182 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHjTifzjoY5XlNZyaMJQS+xAICdHUQ8xytBqUDOJAXWg1Uvj+Kz045dE2r100hBDqhedaPqYTKV7Sara 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 67, 'random peers': [{'ip': '20.66.152.180', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '172.133.61.37', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '182.42.214.43', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeF1ZfkGhb92OXWEgtK7+1wOISLu35Ww4AGs9nS6ANpeIFC77mmIwnPQG2gNbl2zvzDWUwMVIcSIG5s1', 'port': 6878, 'similarity': 0}, {'ip': '192.171.238.13', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ75ii63ba1zGvHd8Am1hLujksc5yc575R4T6I1RAbNKjt162TB/24WyzXTjqVyCHglFbBlKX6ToEuNl', 'port': 7763, 'similarity': 0}, {'ip': '115.224.111.2', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '149.14.42.208', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu', 'port': 7005, 'similarity': 0}, {'ip': '225.248.193.94', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk', 'port': 7002, 'similarity': 0}, {'ip': '107.204.134.123', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaJlEpntmrwMQ3zhL+NmXf7qhHekQod0VdakZ/Y0AX8EbasJ0hxI3+2cOoiU0KrzY3MT0fZ+eJuJ9iXX', 'port': 7762, 'similarity': 0}, {'similarity': 0, 'ip': '181.209.255.107', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZa2rtfRshNUkCalS+BQqyC2TwS+JLOLI45JrJBgAEmj9qaZ41vom2nWVHFxyUgw97oKUW7jV5h8bakc', 'oversion': 6, 'nfiles': 352, 'port': 7769}], 'collected torrents': ['\x8f&\xaf\x17`8\xf3]\x1fh\xdd\xe1\r2+\x8eMD\xa7\xac'], 'nfiles': 1, 'name': 'MotherShip'} 
+1188691462.0 SEND_MSG 165.99.173.144 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHjTifzjoY5XlNZyaMJQS+xAICdHUQ8xytBqUDOJAXWg1Uvj+Kz045dE2r100hBDqhedaPqYTKV7Sara 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2137, 'random peers': [{'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '108.75.252.114', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '37.121.219.60', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '166.218.62.165', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '97.128.230.53', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '77.85.221.22', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '130.70.132.34', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'port': 7766, 'similarity': 0}, {'ip': '196.92.237.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '5.227.232.189', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '169.83.55.17', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '0.200.159.90', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691464.4 CONN_DEL 90.144.119.72 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFalOMskSKfwo8yY9KzfaM3YRh26ZE3mfUJB+irnAF2zvFQKsTsB8pfq5E7kxdAQ0YIs8rCWV9rm2Wqe overlayswarm:connection lost 
+1188691464.7 CONN_ADD 236.4.221.173 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaoAeWfOp7GOtb+Bp6ye4g5u4Cvi0QJ3Bd+qh4xAYrsiW2q94YV9OdPW1NdDq2eL037FruPLsiVVCIb 6 
+1188691465.0 RECV_MSG 7.59.66.62 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaoAeWfOp7GOtb+Bp6ye4g5u4Cvi0QJ3Bd+qh4xAYrsiW2q94YV9OdPW1NdDq2eL037FruPLsiVVCIb 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 68, 'random peers': [{'ip': '14.74.75.48', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ75ii63ba1zGvHd8Am1hLujksc5yc575R4T6I1RAbNKjt162TB/24WyzXTjqVyCHglFbBlKX6ToEuNl', 'port': 7763, 'similarity': 0}, {'similarity': 0, 'ip': '58.60.127.151', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAW7KQbAfJF8zUiLVF9ivvOIq9gxrbOrYsGmNItGKAfr4Ufkoy4OpT5Y7WyApCHZWC3YietKJXxQQeA3z', 'oversion': 6, 'nfiles': 119, 'port': 7768}, {'ip': '72.110.6.155', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB', 'port': 7762, 'similarity': 0}, {'ip': '26.162.62.60', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYeaL9ZgSj6HeLn0t7gDIdgF5GuMomfjjaZ4N7DHAIKSO1ZWbLuNPfK0MXIGod4LZ2Q3OWeC2K4gW/se', 'port': 7765, 'similarity': 0}, {'ip': '172.59.215.236', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'ip': '68.233.165.186', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfFX6mYRrBwo0BZLUF+oOQXEbToo3c5VXKnfsavEAS/KquhPJq2ISbYxaehxhH0qfMBo2VcFB58tl2/w', 'port': 4662, 'similarity': 0}, {'ip': '96.140.128.39', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '156.122.58.44', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '41.49.226.126', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC9+dgTwgTYeOiQnAsjtO5drURgnOU+zS86O6ZhQAY8LdWFYK6XUCIg9fU/hMsQ0IHAEhp/2yZgSUzKc', 'port': 5737, 'similarity': 0}], 'collected torrents': ['\r\x8db\xec\xd2h>\x9f\xa4\xf5\x80}\xda&\xb4\x84(\x02\x10\x85', 'PK\xf3\xd2\xa6\x12\x84\x00-\xd9\xe8\x8f\xff[\xd0\xa4\\ \x0c\x8e', '\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z'], 'nfiles': 3, 'name': 'richardgsmiths-powerbook-g4-15.local'} 
+1188691465.2 SEND_MSG 93.224.119.14 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaoAeWfOp7GOtb+Bp6ye4g5u4Cvi0QJ3Bd+qh4xAYrsiW2q94YV9OdPW1NdDq2eL037FruPLsiVVCIb 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2138, 'random peers': [{'ip': '59.211.247.52', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdHw8VpFhHdn3HbqHoD1I9V1JwU/H80fxgm8fYaEAelQTzF3QgMADg7CfI/9eBOak4LL9fzoMrOZYegQ', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGvcdlssus9KuDm7HC/OO1UVvsRb982xHR9ynM46ATAMQjGt87cXiwkgsFxRvxA0WeL1ZCSK8mJJEk7V', 'ip': '97.209.104.136', 'similarity': 0, 'port': 7763, 'nfiles': 1898, 'oversion': 6}, {'ip': '212.186.11.140', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '180.79.130.85', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '18.198.255.162', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '219.217.120.105', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '42.72.184.243', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '46.38.10.136', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf', 'ip': '219.60.107.213', 'similarity': 0, 'port': 7764, 'nfiles': 4916, 'oversion': 6}, {'ip': '245.2.94.140', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691465.3 CONN_ADD 151.19.14.237 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXGP2UgzBPPwVHFQN7yk9Zi6Ugzhvh3VMKcOhzuIAOd8ji28YnAE6jJ/9jBa4pzSkQZEbcv5BBNGq9Pv 2 
+1188691465.3 RECV_MSG 176.223.109.62 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXGP2UgzBPPwVHFQN7yk9Zi6Ugzhvh3VMKcOhzuIAOd8ji28YnAE6jJ/9jBa4pzSkQZEbcv5BBNGq9Pv 2 PASSIVE_BC {'name': 'R2D2', 'taste buddies': [{'ip': '117.167.3.105', 'age': 24118, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKcpqvIangtRLk0ppeO0ODkyX4le4sSDLMrYWP8+AbYqnIWyJU8fSOXbLAqn1vfSn6AitkEWCzCronHb', 'port': 6881, 'preferences': ['3hj1VRh8P0apyug7HAgJRvbieMA=', 'l6hcnQcHx2K0FiTkw4eXbpPMFOw=', 'gG6g0wgGwp/ONHaqIiDnRI4Joqo=', 'Ul/MSgjdC2HJJn0vxojXErDXPS0=', 'm8frohqpgmBJ98jlBirXCmS7rac=', 'Y3ACHBYRBlzSgD1BMVOvP/TbulU=', 'uqYx/AS2cxx1z9WHmKDXDUaz97E=', 'usDYN0v1CAVTpnEjAX3BDqV64fo=', 'ZWJNaGHUhaeEqO/Xl5/S1nfJYQs=', 'UwLjYmUVUu0yFn4sj5V6siG7OZg=']}, {'ip': '200.189.78.49', 'age': 575, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf/Qgr2M4qWSCeCpUmD7q1Ys6lsBvxO5ryLk4ubJACPUNZO9vYh1+ZyCI8H90TjqaHHifP/kLElUNdeA', 'port': 7762, 'preferences': ['gBowk7XQWArdJlVDBY0O5donbMk=', 'cKJzGSiyMNSHAIgCSRdvFwv4TtI=', 'donrd5hyUerhsW9tUeiwUsdtvEY=', 'alzigVomQ53XTT1FvtE+cyMqOfc=', 'XKkZE+X+7O8BPCr7QmXGN34lQuc=', 'EiYHML9xqSDhve0sXnxKHbO2X2I=', 'dPQo3cdekpj91PNxVJG4quw6XOc=', 'sDiM/wmG9rWNxKRPpO8jKJyRWdc=', 'ZZXFmBrHcFG/OUG91p5l7M+al6g=', 'tpxXyg46B6RtX95NtOgiC6ib3n4=']}], 'random peers': [{'ip': '3.149.0.58', 'age': 3451, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABfto2xw3Fk9VoUIKNbV5mCY/UOuzsS9LJlJZ1/JAaXV4zzZS7WKf2aZPBWBbj9Ibed0XX5Z8bhBRihO'}, {'ip': '123.147.29.243', 'age': 823, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZSqRs1GD9PV5eGixEdhWjSherWUCUGT5OQPOqvfAZBn624AxqRxPlCBe77NHWqy4oiealTdLrVK8DcR'}, {'ip': '231.209.144.175', 'age': 1700, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZVnTBdqEA78479T/6VdyCIR1xjHPhBQZ7TeaIl/AV6EB746jiyQTZOeKMNLZiEujcPqTdEhS5T471Af'}, {'ip': '101.239.112.63', 'age': 1552, 'port': 7774, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYK3qrpWTumcxbd1oikmiDHqA+/DewC8D5aZunndAN+jDAr6/NCgX2WXyIlVaFz+oYx9MOVqTUoiLPjt'}, {'ip': '52.187.4.89', 'age': 2170, 'port': 7769, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUc7AZzwy0XxaVnLSzFx14ov8FLVAvkDHpTLSwr+AYFAQap+unWFsNb4JL4+Myvwp1CA9gbwBrThTBOW'}, {'ip': '241.199.21.18', 'age': 1697, 'port': 54545, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl'}, {'ip': '193.8.37.225', 'age': 24550, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACR0tsnrfNCFKQSwK/b31zFW0UzeIgQm7yaCr6IkAEVLT0P+M4DN5dJEk5nyJBleeJS2Zv8s/MG/dUvk'}, {'ip': '24.246.16.1', 'age': 11594, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyoXUE3vMFY7hLlvilbjrmXvPhypwmCeQnIeZiQADLizv7r8IlquXV8OoPXDaO6Z0gBjLTgWgFEpKTn'}, {'ip': '76.136.151.146', 'age': 2865, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACl/E3N3V22pRDcSjE9B1iIYTuhPbN8/peuIhaTuAJTw6STYhNbk8rs+KqEswbiVTr+BsMzLZPhuGCAE'}, {'ip': '70.235.22.162', 'age': 5203, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI5Alp9TzQyg5UX0zjtM+oMqgnQKnwH/THVriv2sAHXxKEECt4tOlv+qxiHZF1Y5yAu+92OFEmGR5K4u'}], 'preferences': ['wPTcuOHgCrM1WCMJHeYPI743n2Y=', '4m5H4hCwMESkfGNFfEgAsAr3u3A=', '7y/9eV+itp1ppAKFlxD3wUsnFiw=', '5LCUnmGIyaNyZvqKEQfq/Mi6ifw=', 'CRrAgI6T2mxY8RzM6zLPawrmtCI=']} 
+1188691465.4 SEND_MSG 22.121.31.95 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXGP2UgzBPPwVHFQN7yk9Zi6Ugzhvh3VMKcOhzuIAOd8ji28YnAE6jJ/9jBa4pzSkQZEbcv5BBNGq9Pv 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '141.90.210.85', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}, {'ip': '139.13.74.89', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '88.120.33.69', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '214.214.216.104', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '189.87.162.66', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '206.210.240.88', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '246.96.223.101', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '137.174.12.27', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}, {'ip': '40.18.163.122', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '237.226.63.231', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}], 'preferences': []} 
+1188691474.7 CONN_ADD 161.204.21.161 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 6 
+1188691475.2 RECV_MSG 9.161.204.76 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 1, 'npeers': 457, 'random peers': [{'ip': '118.125.238.150', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbduv2Rl5EHyWP2/LgAKn9kx2rLl7y87jy831qIDAAH47ysyElQYddQ51J3U7sbHz+9gRINYnk6MTth9', 'port': 6881, 'similarity': 0}, {'ip': '51.68.207.187', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbsQqQICAP3Rxm9T1Du2A5oDgOmzgJF5iT5cKK/SAUhUl8UujX/3SS7rsXGNACq7IEJ8LnZqsZQz/Lp+', 'port': 6881, 'similarity': 0}, {'ip': '205.114.106.214', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf6gOSE2js+HPOG33c9tn/WP7+F/a0BJeEVWGmF1ABugcVPbltse6zxC7V/bttMZM8E1m4a7uJRSr9o1', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '111.236.47.226', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'oversion': 6, 'nfiles': 2257, 'port': 54545}, {'ip': '233.79.189.170', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKGhsvEib2dliWyOFHH2NAafSWUkCLdIxblFNKnyAALwJSeAtjsJWQTytDv8O08yRlXETa5IANoUq9ge', 'port': 7764, 'similarity': 0}, {'similarity': 0, 'ip': '99.250.126.251', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/O5b+hTW3Nrf2o1JAIaKomB7Z94R802h3MG2m9Ae7QXzTmoey3oodDZXjwaK39hu1wJcyH3sLWc5EK', 'oversion': 6, 'nfiles': 4094, 'port': 7774}, {'ip': '155.151.28.182', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf214D+kZDAHVW0Ftof2QedA2+DRLF5R1n4dc5zyAM9AkzvKH2ykmHu+9Zbpks82dFEoAVucrsXU1t1Q', 'port': 6881, 'similarity': 0}, {'ip': '191.71.239.65', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3Ssv9ZolvKrWfU2ronUNaevYIZe94hNuMhMD6QAA+b6kryXUMqjOxaw9pMnRdWCO6g+yrtPQNrDaO1', 'port': 7762, 'similarity': 0}, {'ip': '223.49.72.232', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeDTpdak5cLjB/JxAGmpVRvNZ154GSZf2V3nhqIhAe189q6cYrcmC8O6LdeFjvhSFSPHpFOtNmAYUM5D', 'port': 6881, 'similarity': 0}], 'collected torrents': ["\x07.\xd7`\xe4s\x7f \x83\xa8\x86\xb1zXg\x81u'![", "\x99\x00\tys\x91\xf9\xf2\xf0B\\\x8c\x7f\xb3\xf7\xf9'F4\xc0", '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\x10 \xde \x1a\xa2\xd9%\xdd\xdbR9\x1f@\rS_.\x19_', 'A\xfc87\xef\r\n\x88?ew\xdd\xc4\x98\xe3Pj\x1e\x9f\xdf', '\x1aY\x97\xa8\xa1%\xdf\xca\xf1\xd6\xfa_\xe9\xd5\xae\xe2\xed\xda\xd7\xbc', '*{\x1e~\x83\x02-\xad"\x12c\xeb\x89>\x88-\xd5\xa3\x97\xb5', '\xfd\r\xbb)\xe9\xec\xfb\xa2\xb0B\xf8\xe7\x05p\xf8Zspp!', '\xc6\x1b\xa0\xc7\xd7\x1f~\x13\x91\x92pku\xdcn\xa5\xe4~<3', '"\xe5.0\xd9\xb8{z6sn-\x06\x86\xcf-\xed\xb2\xba\x04', '\x8a\x0f\x12\xc5<\x86O\x98\xe7a\xc6\xef\xe3\x93#\xd9C\x14\xe4\x8a', '\xd8Y\xad\xaek\xa4a*\x01\xcdT\x06\x1f\xa3\x8b\x07\xabsmX', "=2]Y\xf9x\x8eVZ\xf1$m'6'\x91S\x00\xe7\x19", '\xde\xf1\xe1mx\r\x19.(C\xe7\x00\x17\xb4\xe7Rz\x15\xa0\x18', '\xc77\x8f\xb0\x04bM"\x00\xd5\x8fIG\x826\xf2\xdc/\x1fz', '\xafd\x9fQ\xad\xa3L\xc0aw\xa1\xee\xbbm\xe5R\x1d\xf3\x1b&', "\x16Kp \x8a)\xe0\xa1\xa1p\x13\xab\xac\xa8d\x83\x01'<\xd0", 'I"\xe1=\x05Zv\xed\x9b:9\xa4\xcf\x12\xd4\xcc\x8b(\xf1\xe8', '\x10\xbd\x8a\xfa\xf5\xf3F\x81\xdaRnm\x86\xb8\xad\x84],\x97q', '\xe0\xcc\xadk\x0b\xd1\xe9\xebi\x10n\xe5\xc9\xa0?G\x15\xab\xdb\xeb', '\x1c\xb6\x8d\xa2\xb8f\x9f\x14\xf6\xc3!\x83\x12n;\x03\xee\xe0,u', '\x90\x1c\x9cDa\x17)=\x9bg\x85\xcb\xb9\xf3\xb8\x9f\xb5\xa3\x05h', '\x90\xe4\x89\xf8|\x08n\x18m7{(\xb9i\x92\xcb\x03\x9aY\xa2', '\x92\r\xda(e_\x1b\xc9\x91\x05\x04#\x97\xd8\x088%\xd6\xe8\x9a', '\xac\xe1\xcfi`\x07bWY\x8c*\xd1\x04}\x9e\xaf\x91\x96\xd2\xc2', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2', '\xbd\x1e\xba9\x11\x11\xf8\xa2\x05\xb8\xa2)\xf6\x96<\xe6t\x05\x88\xf7', '\x7f-\xf2ff\x8ci\xb6\x82\x87\xecTY\xd6\xf5\xfe\x150\xbf\x1e', "\xf2l\xb8\x83\x1dg9'\xbf9\t\xeb\xa4S\xdf\xe4\x0by\x8a\xa5", '\xb0\xd5k\x955\xf2\x1d\xf4\xfcn\xdf\xaat\x00^]\xd4\x9e\xd6\xef', '\xbet\xb8\x9e\x1c\x19\x96\xca\xef\x95|\xe5\xdcV\xc9\xf9\x04\xe0 t', 'D\xc4\x1c\xfd5s\x00\r\xd7\xa6<RWK\x8c6\xe1\x14\xeaU', 'x\xe5\xeb<\xcf9\xc7\xefT9\nj\xbe-\x11#r\xc3\xf2\x12', '34\x81\x8b0\xcc;\xf3\xa9\x80\xa6o\x0c\xeb\x99\xe3w\x9c\x12S', '3~S\x8e\x17\xb2\xef\xd4\x87v\xcb$\xab\x0c``n\xb5\x98\xb6', '{\xac\x98\xec\x02\x87\xb2\x95\xd7k\xcc\xcd\x14\x8f\xads\xfc\xdd&\xda', '\xa3x\xa4q\xd9\xd6\xda\xfa\x86\x1e\xff\xf4\xb59\x80v\xde\x89\x93\t', 'zT\xd2w3\x96\xf3X\xa3\xff\xa2z\xf2\xd4/8\x9e7\x82\xbf', 'T\xbb1\x99\xc0.\xaf_i\x90\xa0\x17Hx\xb7-i\x97\x07s', '\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', '\xcc\r\x81\xdd\x82W\xde\xa4E\x1d)\xc1\x9e\x0c\xdf=U\xe4\x88+', '\xdbi\xf7\x97\xa9\xde\n\xaf\x9a\xbf\x98\x17\xa7\xeeL\xe9cD\xa5\x14', 'G\xeep\x93\\\\\x97\xf2\x96\xc1\t\xf4\x82oV\x1bR\x1e\xb5\xbe', 'Z\x8a8~\xbe\xb0\xe7\xc5\xa7V\x9a\xd7;\xac,w\xc2\xfaH\xbf', '\xc8\xe2y\x0f\xad\xfaO\xd4\x89\xd5\xe7\xe9\xde\xac\xda\x07\xb9\xf1J\x88'], 'nfiles': 45, 'name': 'Maxs-MBP'} 
+1188691475.7 SEND_MSG 240.76.133.190 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2148, 'random peers': [{'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALuqKE+vUu61/OiWjjuF5TWPfwwcWJB1mniAyErHARjcJjfqCZJif54tEVhIny0LZS71CL/L/8PCVuN4', 'ip': '28.238.92.116', 'similarity': 0, 'port': 7764, 'nfiles': 1740, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK3vuT3wd0cXJ4etwQsZceMVlqxwcDEan/Z7Hf/ZAcMDz5ZD4NCQWjHNT/3w2IN47lqaZmRq4f1nHffi', 'ip': '33.251.179.137', 'similarity': 0, 'port': 7764, 'nfiles': 3153, 'oversion': 6}, {'ip': '138.159.88.243', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '226.20.74.149', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu', 'port': 7762, 'similarity': 0}, {'ip': '202.134.123.2', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '234.102.250.197', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '229.145.211.12', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '149.61.212.56', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '73.23.67.145', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691477.5 CONN_DEL 139.159.104.56 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN+A8INUJwWLfFbMMExV83B9vERhLryvoBARWFuiAVl0Aa5nL+He7Bo+4nkH+4i7JEE2Vn7i+AicDM1v overlayswarm:local close 
+1188691479.2 CONN_DEL 30.70.241.201 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM2rDxgPAwaS5efTVljvKcfGpLF1BmLIl2O9sae9AV9pplNqhWo42QNGL1DMzYQfU2+e62p9lRZJ9wrc overlayswarm:connection lost 
+1188691479.4 CONN_DEL 190.76.82.135 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABxvTxfxjzX/lW5ae8ebU6Bfe6YmZVd9AHgZ1OXmAC8a8eN54JjCHgwKjqKa7JOmzSPd9Kl6NU6HqNHc overlayswarm:connection lost 
+1188691483.9 CONN_ADD 50.249.225.117 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALw82JH7svl40hjmsEBBtHrcqIP1iFljNgECVpFOAREJ0UA2M2YE8dUUbYc8ROZTBeDOjKWRgbDOoD6r 6 
+1188691484.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALw82JH7svl40hjmsEBBtHrcqIP1iFljNgECVpFOAREJ0UA2M2YE8dUUbYc8ROZTBeDOjKWRgbDOoD6r 6 3 {'q': 'SIMPLE counter strike ', 'id': '2\xc91t\xd1\xc1zk\x04\xde\xbd.\xf06u\x03\xd2\x86W\xbe'} 
+1188691484.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALw82JH7svl40hjmsEBBtHrcqIP1iFljNgECVpFOAREJ0UA2M2YE8dUUbYc8ROZTBeDOjKWRgbDOoD6r 0 'd1:ade2:id20:2\xc91t\xd1\xc1zk\x04\xde\xbd.\xf06u\x03\xd2\x86W\xbee' 
+1188691485.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 3 {'q': 'SIMPLE sterell ', 'id': 'm\xfbqWn\xb4\x0cZY\x02 \xdc\xcc\xb5\x86\xbf\xf1\x9fE\x13'} 
+1188691485.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 0 'd1:ade2:id20:m\xfbqWn\xb4\x0cZY\x02 \xdc\xcc\xb5\x86\xbf\xf1\x9fE\x13e' 
+1188691486.6 CONN_DEL 16.239.137.180 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOLwCRcmKOJ1J81GUqMlx5ZYMEwqGsqntWJFONDfAbW9OwXUFsgP0MKSEh3PkLMoOy+E/elBwIXMfCKC overlayswarm:connection lost 
+1188691486.8 CONN_DEL 174.175.163.221 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZVxwuKvHfs9VPU4l6ZmnpBA5IrrcbZP3gc66+AfAQa/L21CrBZLErtWOtZgbdIllO7+D8antHohVH54 overlayswarm:connection lost 
+1188691491.7 CONN_ADD 211.126.85.145 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAR1XefmCtOtQjb4WIwRQ35aW291/aiAwcYwJJmpLAbfNeYOxyUVaD5Mx7Cj111APjT32HOo27a8rjNUj 2 
+1188691491.8 RECV_MSG 131.98.8.228 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAR1XefmCtOtQjb4WIwRQ35aW291/aiAwcYwJJmpLAbfNeYOxyUVaD5Mx7Cj111APjT32HOo27a8rjNUj 2 PASSIVE_BC {'name': 'PC-003', 'taste buddies': [], 'random peers': [], 'preferences': []} 
+1188691491.8 SEND_MSG 139.154.100.228 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAR1XefmCtOtQjb4WIwRQ35aW291/aiAwcYwJJmpLAbfNeYOxyUVaD5Mx7Cj111APjT32HOo27a8rjNUj 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '246.123.61.193', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '218.68.179.148', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '136.218.169.205', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}, {'ip': '119.8.165.81', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '250.16.60.172', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '91.46.161.40', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '251.31.35.217', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '173.241.168.54', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '234.37.238.66', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}, {'ip': '168.215.173.109', 'age': 0, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu'}], 'preferences': []} 
+1188691494.2 CONN_ADD 148.85.252.42 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6 6 
+1188691498.7 RECV_MSG 133.156.101.117 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6 6 PASSIVE_BC {'preferences': ['e6yY7AKHspXXa8zNFI+tc/zdJto=', 'rFGkT4DQbFok1VY2MitI+SFHr4Y=', 'io0VAO1WCMRczORcYrJlydqM4bY=', 'BEjijBZiHe43UgGhFo9KI2kbJQ0=', '6rxW+j6/bs5nCUPLuKRGHw9vfkA=', 'li2ED6j1H1SdX1J7NfJU7v0kZAU=', 'T+FiCG0Z55NxeTj7wf+12lT+QLQ=', '3YElcWR5vxX4FPDtRgkoraj+WiE='], 'ndls': 8, 'taste buddies': [{'ip': '137.38.51.63', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFW58KtJe7G3nbLbl8D3sDE6WVtRHV8wTvHG1pSFAMFXMNps0bpnO06AYLCX74I8J7o7EyBFUAdQ3QGY', 'port': 6881, 'similarity': 6620}, {'ip': '35.46.95.154', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZrdB13M2eqIfzsD69+kOXA+2aiE6EVjrNI3RAVAd7GFYWb8wHD0pffOHeHhzW1BcgClut/zSPSkEtw', 'port': 7762, 'similarity': 4211}, {'similarity': 3215, 'ip': '131.141.85.89', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZS4WTCseUZWoi88VnW62oBsKXOIhi0JKnim/RvSAGsMR9mrL25D6dDHpUAcTGcNIG91MfL26IPy8/2y', 'oversion': 6, 'nfiles': 757, 'port': 7764}, {'similarity': 3134, 'ip': '125.118.188.198', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUc7AZzwy0XxaVnLSzFx14ov8FLVAvkDHpTLSwr+AYFAQap+unWFsNb4JL4+Myvwp1CA9gbwBrThTBOW', 'oversion': 6, 'nfiles': 514, 'port': 7769}, {'similarity': 3134, 'ip': '104.207.254.75', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEx3yw1wNC06DyD4tgp6bnzNFPrYZrWgMIoZG5joAfl6rA8OeNewQZXqpMDCc+B9k/wOSTcUAask7N+q', 'oversion': 6, 'nfiles': 1643, 'port': 50128}, {'ip': '190.253.192.50', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMRF1P2P8eOcKUfjfem5JnWsEkLhaUOrrRRFCRa8ADXPjdUtWjeFQVrYJKCaqBKKzLkWJjnGuaQHlCk+', 'port': 7762, 'similarity': 939}, {'ip': '119.206.177.24', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMFEzOs9poNUO4QKobqkg0S1NddVXqoRaOglxn4bALe/2f1qrYH+VYrGBzlcAyPDZmxsDLN+tySY7KGP', 'port': 65535, 'similarity': 356}, {'ip': '183.65.30.75', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU0IFtwQnlUsk1uqgo+89GHMJBhqKLgU/dFXi2OOAYJyh+gOYOBJi/TALntk0/RFyzofxTyFmCo5d+VR', 'port': 6881, 'similarity': 270}, {'ip': '220.41.143.68', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC9+dgTwgTYeOiQnAsjtO5drURgnOU+zS86O6ZhQAY8LdWFYK6XUCIg9fU/hMsQ0IHAEhp/2yZgSUzKc', 'port': 5737, 'similarity': 251}, {'ip': '83.75.92.127', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALK++nqHnWocGKZTuYQpa//PZG++E3b65RnfUzUbAf5rUcbdhrSlXhQHQ0MuTQyv++hn/gkNyg+nWd0S', 'port': 6881, 'similarity': 106}], 'connectable': 1, 'npeers': 1687, 'random peers': [{'similarity': 14, 'ip': '30.125.32.251', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASXq28ZXiH3YtMG4d2ShG4E3FuNaJ+taG23j4jNVAPiSjBEfG+Kt0aqqzqYuaai7wSeSSU1Y3JYm4lWo', 'oversion': 6, 'nfiles': 853, 'port': 7764}, {'similarity': 0, 'ip': '12.238.245.100', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'oversion': 6, 'nfiles': 146, 'port': 7771}, {'similarity': 9, 'ip': '111.155.94.202', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdHw8VpFhHdn3HbqHoD1I9V1JwU/H80fxgm8fYaEAelQTzF3QgMADg7CfI/9eBOak4LL9fzoMrOZYegQ', 'oversion': 6, 'nfiles': 468, 'port': 7773}, {'similarity': 15, 'ip': '133.237.252.160', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHRZFQ+s/OwWeUBuc/r0MMmYm13sqvnSLSgoMJhIAYjVAX4BKVtxnBnxQZ3iLphj9HvkFthgmDcRNbew', 'oversion': 6, 'nfiles': 247, 'port': 7774}, {'ip': '34.197.60.118', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALtinCNB77hftcNL1nLgh8FoZU0yQNu8hmdNZ4WXAYFj/iep5b485SL6E8hXKWRWqZtl+PeifI4akELg', 'port': 7764, 'similarity': 9}, {'ip': '148.132.156.36', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa', 'port': 7768, 'similarity': 0}, {'ip': '75.149.73.241', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABcCCC2rpUwAOwufzwGuwf/p8GI1wmLcE06ksTGiAe33wEuokrXdfvwliewRKs6sveYGre+8onqJbkjh', 'port': 6881, 'similarity': 43}, {'ip': '214.46.144.120', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYMLY2x1wGtW1SAnET4YcVInuhsMO86DUIzZkeryAHXlZFsGNvw4D3Auch8VIusLHD/Pyo4TXSq3KAiY', 'port': 6881, 'similarity': 8}, {'similarity': 99, 'ip': '249.222.233.214', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYz1VZ7sb9HSC3MJyjf+eCzN9srdHPndMxuZ8m4KATQPEMXXCbLdNojv2OtR4C9S1/VC85qWboV6bvnq', 'oversion': 6, 'nfiles': 4933, 'port': 7765}, {'ip': '158.195.182.222', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZdE1aDHOfQ85Fwht9I6r2dxwix9sbxUvAzsU82eAPF5Y9SmSx1ga1z5MMuE1gAnUkx9zlv9wtHP+igW', 'port': 6881, 'similarity': 76}], 'collected torrents': ['\xdea\x08\x98\xa8N\x03_@\x7f\xce_%W`\xe3/K\xd5g', '\xb1\xc7.\xb6k\x96A@\n\x0c\xd6\x97\x0c\x0cD\x97\xec\x13a\xe5', 'x\xe0\x07\xff\x8a+\xf0\xe8\\\xfcj\\\x86|=\x81\xdb\x17\xd8x', '\x8a!\x1cL\xf7C\x0b\x17+\xe8a\x0eHF\xaa\xea\xa5\x84(\xf9', '\xa7\x90"\x8b#@\x96}\xb2\xe0\xc98\x1a(X1Z\xf8\r\xb8', 'Q\xe9\x8b8"\xd9b\xb4\xb1\xba\xe8k.(\xeb\x92\\\x9ce\xe5', '1\xcc\xc9LE\xba\xaf\xa3\x025V\xef\x97\xad4\xc2\xf2\x18\\\t', '\xf8\xaclW\xa7\xc9%\x86\\\x12\x16\xb8\x0f\x852Kf\xbe\x02]', '\xec^\xac\xc8G\xe8=\xe1I\xfa\xa5\x1b\xda\x17,\xb0s\xa5\x06\x9b', "\t\xacX+\xe7\xab\xf8\xa7\x05\t{'\xd3N\x8f\xd2U\xf7\xfc-", ',%\xe3\xb8\xa5\xf0\xc3hA\xf1\xf8K\xa0\xb6\x0b\xfe>Wl\x0b', '\xe9&f{ 2|\xb7"\xe5\xde>\xa0\x8b\x1c\xd7]\x8c\xfa\xd8', 'W=^\xa9\x1c\x93\xc2\xc1 [_\xeaQ\xf5\xaf0\xf5\xd7A\xe8', '\xab=\xaf\xd32/C\xef\xafCf\xc0\x81%\xf2U>\x8c\x02\x90', '\x9a@\xe5\x18\x13v/~\xee\xb8\x1e\x9c\xea\xe1\x07\x99$\xc1\xf1\xdf', '\xcc\xc9\x90{\xfbs\xb8\x17<,Ywg\xf97\x1f\n\xf5\xcc\xc7', '\xb9\x19\xa3\xd6O\xa6WS\xdc\xa5\xca\xbe]G\x8a\xaa,![\xef', '\x03\xba7\xb2b*\x04y\x9b\xc3\xa0\xf9\x05\x1d\xca\xa6\xc1\xd4\xde[', 'Kb\x83\x1b\x8b\xe6\xf6\xd2#c\x8b\x82\x02>\xaa7\x82\xa1R\xf9', '\x87\xb7\xf3\xeexi\xac.\x06\xb0U%\x9b\xe6\x8a\x1a\x89pH\x7f', '\x03\xe97\x9e\xb4\x1c)\x9a\xd8z&$\xb4(\xb5\x89\x0by\xedV', '\xaa1\xee\xb0/$\x89\x87\xad\x8e\x8bP\xbd\xa1\xabtO\xd8\x9a\xc7', '\x14\x19,\x98\x08g\xd67\x85\x9b\xc4?\xc3Uz\x14\x91d\n\xc1', '\xbd]\xefE\x13\x92\xd6\x88a\xa6\x04\xe9\x1e\x1f3\xe5\xba{ G', 'KW\x1f\xb3\xc0\x84\xc2x\xd3\xb1hC\x19\x88s\xbf`\xe4N\x94', '\x87U\xbf\xb8\x1b\xc0>N\xec\x9fw\xe7NlN7\xf8.f]', "\xd9\xa5\xef#\xa8\x82\t#\xf1\xd2>}\xedvc'K\x9e\xcd\xa4", 'T\xd2\xb8\xcd:\x8c\x85\xe0\x19\x1b2M\t\xc8\xf7\xa1[\xa4\xdct', '8T\xe6\x17L\xb4\xecB\x13O9"+\x13Zp\xa5\xbd\x99\xb5', '\xc8\x1c-\xf1ry\x07p\xb3q\xb5\xc7h9\xd1\\\x8dYX\xf4', '2\xe3\xf5\x0f;(}\x9a#\xae\xb3wy\xe3\xed\xf3\x14l\xc3\xc2', '\x18\x87uA\xf7\xefnO,o\x12\xab\x8aW\xed\x81\x97zN]', '\xf9\xb4\x8f\x94P\x04C25\x8c5\xca\xd2\xb5E\xb6w\x96/@', 'jG\x94\x9e\xbd\x04\xbf\x197f\xa9~\xf8\xb8\xa3\x97\x0b\xc1\xc4\xeb', '\xa2\x97\xc0\x1aU\xbf\x9f\xaf\x8e\xb4,\x97\x0f69n\xba?_\x82', 'GL\xb4\xaf\xc0\xac\xf6`\x1f\xb8\xacD`Cy%)e\xf8\x0b', '\xdd\xef\x93*j\n\xfdX~\xea<\xed\xb8\xdeQ\xe3\x91AC\xf6', '\xbb\xbc%\xc2\x9c\xe2ci{\xfd\xdc\xf8\xd7*\xc9G\xa0\x91\xfa\xab', 's\x97I=\r\x1cRi\xa1aS|Y1\x82}\xbc\x8c\x8aX', '\x91\xbeR\t\xad"\xcd\xd1\x90P\xfdF\x89\x8a!\x92\xab\xb5uu', '\xd4\xfeu\x89a\xd6\xb7\xefX{\xa6\xea\x8f\x9e\x92_s\xbcL\xe5', '\xe0B\xf2\x06\x93\xc2\x18\xe9Y\x93B\xf8\xd4\xef\xb2\xb3M\xc0\xae\xab', "\xfa\x8d\x99D\x17\x02\xd7\xaf`\x8f8\xa6\xc3\xdco\xae6'\x90\x83", "\x97[+o\xf7q\x85Z\x0eD'0\x1b\xab\x84Ly\x12\nN", '7\x0b\xc6\xe0Zce\xcd\xab\x86\xad+-\xf6\xfc#/Qp\x06', '\xc3\xbf\x83\x91=\x93n\xf1\x0b\x81\xc8\x91#(\xe5\xcf\xf7RW\xf3', '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\xd74\xcc5n`\x97\xdb~:\xbb\xb5V\x13\xdc:\x18\x15vi', '\xad\x95#|\xccs\xe9\xaco\x829p\xd8,\x8bz\x8b\xcay\xc4', 'L\xe1jw\x84!\xd2\xc0\xec\x90Z\xa5>\xcf\x06\xa2:\x16\xb4%'], 'nfiles': 289, 'name': 'DG-PC'} 
+1188691498.8 SEND_MSG 212.1.193.38 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2156, 'random peers': [{'ip': '217.75.210.226', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYz1VZ7sb9HSC3MJyjf+eCzN9srdHPndMxuZ8m4KATQPEMXXCbLdNojv2OtR4C9S1/VC85qWboV6bvnq', 'ip': '125.40.6.248', 'similarity': 0, 'port': 7765, 'nfiles': 1927, 'oversion': 6}, {'ip': '250.10.82.66', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '164.195.153.171', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '211.1.46.26', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f', 'port': 7770, 'similarity': 0}, {'ip': '6.154.251.201', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '0.57.62.102', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X', 'port': 7768, 'similarity': 0}, {'ip': '56.204.240.87', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl', 'port': 7765, 'similarity': 0}, {'ip': '223.252.54.121', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUc7AZzwy0XxaVnLSzFx14ov8FLVAvkDHpTLSwr+AYFAQap+unWFsNb4JL4+Myvwp1CA9gbwBrThTBOW', 'port': 7769, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691498.9 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 6 1 {'q': 'SIMPLE balls of fury ', 'id': '\xb4m\xa0$\x89\x80\xb2E1&,\xf3\x07O\xe5a\x80 \x93\xcf'} 
+1188691498.9 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 0 'd1:ade2:id20:\xb4m\xa0$\x89\x80\xb2E1&,\xf3\x07O\xe5a\x80 \x93\xcfe' 
+1188691501.1 CONN_ADD 79.224.46.83 8123 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQmUCdCxO43ndiA8ZgfZEoSQVGPaZDXo5weyHhFyAKqE8Atd05rLo0HuXxOQ9/6lPuw8cjYdYJVCVG3h 6 
+1188691504.2 CONN_DEL 203.153.149.88 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV6vx9IVQMs0uv2Z/4k34IWq90wMeSYjQne3KMVXATXG1zLwE1eRSaDM6g9YzQhw/cCYCei6yZv6bWsa overlayswarm:connection lost 
+1188691504.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGyvWGrcfZtlQ+bfsmDmC06nVQ1lEDrJRZNCeVthAcDv9TEQVjzyviPu+E1AO9Y0l/gMijPGrlUWN7xt 6 6 {'q': 'SIMPLE elizabethtown ', 'id': '\xc2\x088\x7f\xd7\xdeN\x81=\xeaY\x98\xe9\xc6\x8f\x04 \xf4Yj'} 
+1188691504.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGyvWGrcfZtlQ+bfsmDmC06nVQ1lEDrJRZNCeVthAcDv9TEQVjzyviPu+E1AO9Y0l/gMijPGrlUWN7xt 0 'd1:ade2:id20:\xc2\x088\x7f\xd7\xdeN\x81=\xeaY\x98\xe9\xc6\x8f\x04 \xf4Yje' 
+1188691509.7 CONN_ADD 156.36.97.103 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKCPs99ohZOqkrGFaQl3OO9wus+QWa3EsFQvY30VACOufAewrc8zo1804JV13SpeXJqBMviVJY75fhxP 2 
+1188691509.9 RECV_MSG 224.239.136.75 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKCPs99ohZOqkrGFaQl3OO9wus+QWa3EsFQvY30VACOufAewrc8zo1804JV13SpeXJqBMviVJY75fhxP 2 PASSIVE_BC {'name': 'Dell9100', 'taste buddies': [{'ip': '152.62.39.21', 'age': 9398, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFpUci+z0V1WEAaQVg3Y0jjkipXWW2JN/2ZAkBDEAbYBCOyN1eRc/IBHgmODv3wTvKfdjZ0QkaLPLNpo', 'port': 7763, 'preferences': ['8my4gx1nOSe/OQnrpFPf5At5iqU=', 'kb5SCa0izdGQUP1GiYohkqu1dXU=', 'gpmqIaI/+2pDKYR8D7z3l1QkW2Y=', 'FIeex0wE99ElTl0iaC8JHkBWe40=', 'SOZakjanRZIUqQxmOqIPlgI8RK4=', 'Z7l6yeJoHMrrfPMSJu6HvLc118M=', 'HWNAJ0S/EnyMOj1c++IZtHtI8mE=', '/9BLYJcvDtf0xl+mWYHrTpKQgis=', 'az6ijpm25pHZzLY3onacLlXQsmc=', 'tNtPCFdNRqAnVu2IlNIFItans/Y=']}, {'ip': '26.254.72.156', 'age': 21852, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbb7V+EDYq+KpDflLPnZ03r5500gr5gQgorXEQw0AG0nNmQptishgm+ZYHuuww/1ornJuIZF6AyxrHLP', 'port': 22222, 'preferences': ['AoqseI0zbQtIDBju7VZ49b4twTQ=', 'ehvO+kiyV3yQmRypESdi3r1vqw0=', 'lM7NEGWMl1ZSzUNAU/nNiCMvizo=', 'KqSCF2grhp+usNp1lqThJFzG7K0=', 'VuC23AryGs7ExXPgMT7/tyudml4=', 'm54w+Lu2+bHsBRhPm5Q1ksSwLUo=', '4twcoKXiqJ4dXmAs/JSXyfT1k0g=', 'Oq2vkiU3k0PcUiGl+Jg+y4gxd5U=']}, {'ip': '228.51.129.34', 'age': 22213, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGXd0c9CQhj4wmTpIp9HoIQoEwIM4VMrclcp1nmeAUhGPRnJv3u9WYbAbKtA5ywkQq/A8VSRFPLFqo9G', 'port': 6881, 'preferences': ['QaDlDvmD+6M+zBGqHU88h5s++5A=', '45/XKWRfv4R5ymCYlcM5HFnnFss=', 'vudu0fvwgMSx92crnCknczy+6w0=', 'hBGGQMP5/lrcz+aL4okOonCL9SA=', 'W3dMvhhN0l2vu3davVaLaX3fFNU=', 'hRgT9PVQiEvoYDNm0SWwQOgPTWg=', 'tcbnoLc+TQtSr0Bi0M2aEDCwIyw=', '7IGHmELEaKt11FkkNtidZ49kdsw=', 'QQg8GxDxqB33aY+wJ8vLkvT27gE=']}, {'ip': '82.204.184.165', 'age': 21315, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPyFMVgJ0OOOKLahblslUhTUsuCbCr1QfqMIe9bDAZJuOlHOESm/RPWqFJWEWikIC+MgjzwKnferMEso', 'port': 6881, 'preferences': ['vcho9geATQtE3mcJ+eg2958G2vQ=', 'uKvS6sps9TLLrRMfRJQpvJIuehM=', 'fnQ9klzxXdZR4uYa1rnxianIBRU=', '1ZI7RjW1U1yLqoFIQsBxihYDj1o=', 'cx+2BT9e1frXeUum2S4s4h68z/E=', 'puO+FvqNmtVNHnSDxNiRWK/JC+E=', 'J7NGH7R979826usWESHCJ1rVd4A=', 'O6IzLt54bNYRqlIUDWm4NJiRZ1k=', '/nu0hUjzJ0GlQLYem7W7AV+cofQ=', '56H/xxlXjbYlIHWIfApSPJ0qRsQ=']}, {'ip': '145.48.250.81', 'age': 21350, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXbWARpx3zeyHAAcH4cmiiBiUDvKKK/pqipowtGqAJBLG7fMfHy0Ywj3x9l3AjbKExgp1JLtIBJBjJuk', 'port': 6881, 'preferences': ['0TKfTL40m4qwzJb/bW1rDQ4zxeU=', 'LlE/I+UFSmJj7mEIz8l+KfETc/s=', '5Gw2RQ+HGQu0DC1shqZSJGvGcP4=', '2uvwk0pUjMLfKQefSjbG8+nL9H0=', 'Jpoo9O9Qt952wi4K4Snz4/UuyoI=', 'U0ZE32i9hyflyXptJAQjdTT2dkU=', 'S1cdECcKA8NGzgboPxhV44MXMPI=', 'z+5Z+14whuBB6FABlKq7syELYlM=', '8tJ3e7FhQOJW1BazA4mYBi8Acw4=', 'sTvttvDoO3qQh2Z59aM9uo9Qdoo=']}, {'ip': '93.96.41.68', 'age': 505, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASi9qUvdNmrH+GifzxuB/TtBqxnlPozCJkXYgowWADITDUS+LpJS/eYiuWR2U9dkTxXdVsmXz6xwjP1t', 'port': 6881, 'preferences': ['YFPQiZl4G+ezKql8VjC985xgpHI=', 'CqHwjO6WVP8gLhC68zbJ8fvaBb8=', 'wAYH++SzBqDmtJ6wq5MQW2fke9Q=', 'TyRPlAgiR0fop3E4HjlysEnOX2A=', '9e0EjQYA3aPIPJojzNLoj5biXHM=', '2+d4RwPe0HVinsK75BWTAO5DBDE=', 'fDpgqNFAlg9Cv2epKOgJRWZhT60=', 'LRgikZwpV5yv+XcD5InFbow+9Yo=', 'JlJW8h4+52/u2KnFYVGbaQG3dcE=', 'PLlKNOeGuIoOL08RzTbN8WD2Qls=']}], 'random peers': [{'ip': '109.64.185.177', 'age': 22213, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYbPxpjfU5bKMzngdWeqC+RuCIVlDWBvvgh8FEFmAAB4BZ2DMiMqdV/sJtyekGDH9J3JR0RBSUJEN9Xy'}, {'ip': '62.66.33.97', 'age': 8835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVj22Kw4gjSskCb1TYqlaBh4Hsqm4Bg8jYkutFk6AZh9fOsGrdA1WAnEAygdyUEe03KjcRW+gW82BFsG'}, {'ip': '197.8.162.70', 'age': 14677, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAN0Ce9MrR/Grideu5mR+aQylGrd2RPYYKhgawAD2LBc0tjWb9XT7xf768l721QNIUOPaBMe+reX3j'}, {'ip': '173.40.153.146', 'age': 22213, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFZm5n9Qp5D0mrxOip6jL+oQT5nsmfMsxttF9QVPAKfWWefsyx8Ry+Pf5dFCHRs6zuXtlmTIKLpk/uHA'}, {'ip': '33.56.135.87', 'age': 22213, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfMKf5aBmVWg0SpUzPjkOLfYSGYtCAYKPR1csL/GAI3a0zwo8CrNHqK9/rPsGSr/N8QNsN2NQ54qoL8H'}, {'ip': '31.249.141.123', 'age': 8841, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5QgqwR2LLDKoOqoIv12u1F2sK/oe5fXXmJ5eynALDhBJLVMmNpMJoxsaCxydr0+JCnV+33Fmngcf0X'}, {'ip': '215.195.77.235', 'age': 12681, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATSm8sUDFywi/EsyJdIXKE9mhBDORM0YCnDzwVxGAI+Vr4hJPAWSOREQT/aJVT/26F+0AR/jUJgXD3VR'}, {'ip': '157.74.92.212', 'age': 22213, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbcSLL0oDAhxreN7dn+n7+nPdp310+tsW7DesAhPAVJxvWJNOt8oY0vdpBbXUxOkssdbkqSk/QWjFiQI'}, {'ip': '116.49.31.248', 'age': 22213, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAckfq7BE/0iEFtlPq8wxTzrY5xe4rmScGLoHs/zNAQdooBsUqEsUSh239BlkvhFB7in0ob2UK7IXY4cz'}, {'ip': '182.195.241.20', 'age': 22213, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAORsPaRy6b9rFG99tZp3AeU55f4hyY0jbOxj5Yt+AGJSq2zN1dl6guo7yK4FTKr4AunsAvdznwaU1oPv'}], 'preferences': ['Mb9RNAufnhk1+/9yNGTUMbQGOGo=', 'I9m1vPSiYfP2yuentL4Wk4H8UiM=', 'ZX2oZf0NZPWnpHHNlxgLBpfs018=', 'EUjnLGNBuCs+R0laKs8BtUpyklI=', '4M5ZQU4RtYgyX5v+2Up1DRk1Mec=', 'MTCfzyzuJXGpH3KvvrXweob1Tac=', '1cYMyHXVd1bEGTmMt73b5cEQKjI=', 'F/7Ksf3BCeZKZFQjB9Pp3ZYRzfY=', 'fe+Q6Y8ivNkY1+hECBCpwt76mRY=', 'I9UvhA3Zcj62yuJX3O+Oqq/sA7w=', 'CEZIcjvmGHTR/9a2KEXn1/+PXeM=', 'S/pyErgvhQ9zNneFH7/g4KXVxkw=', 'y+5Lv42qMvbNtPlALkmvhQYtvkQ=', 'RY8uk1kWRBpoEhZJYs8vTJZIggc=', '5L8dKC79SU4eLwfN+KIJSVCSSKU=', '45/XKWRfv4R5ymCYlcM5HFnnFss=', 'EIG/s5Sb/s5Na6ImyHebYD0T/vo=', '4twcoKXiqJ4dXmAs/JSXyfT1k0g=', '/cDHFyNV48Nsz21Jgm8UorWuVzA=', 'yMvyQWrrpYSpGQVkT0y1TZTVicM=', 'nVfY1qu6BEQqUEg/GWs71gZZpBs=', 'wXYcV/sQ9IaVZklnKMTScq/RHmQ=', '8RGDH3TB8bTQOzLQ/bt4HgjPaNc=']} 
+1188691510.1 SEND_MSG 232.88.126.11 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKCPs99ohZOqkrGFaQl3OO9wus+QWa3EsFQvY30VACOufAewrc8zo1804JV13SpeXJqBMviVJY75fhxP 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '185.132.103.148', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '105.250.157.42', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '201.167.175.20', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '84.61.161.185', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}, {'ip': '147.10.222.153', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '102.126.154.164', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '104.117.170.93', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '94.189.219.92', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f'}, {'ip': '190.156.255.177', 'age': 0, 'port': 7765, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl'}, {'ip': '191.92.12.30', 'age': 0, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X'}], 'preferences': []} 
+1188691512.0 BUCA_STA 9507  2168 2168 100  1588 12109  60 60  0 10 10 
+1188691512.3 CONN_DEL 185.59.69.186 7765 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl overlayswarm:connection lost 
+1188691512.3 CONN_DEL 77.63.203.41 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f overlayswarm:connection lost 
+1188691512.3 CONN_DEL 31.45.199.52 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcvEGJQwrFOXmAtZ1yhtIg9Dl9gXf9nuSM0G+t8gAVf0xmoycIXRnUp1gTWG5KFp6nWFTfT98ju23u6B overlayswarm:connection lost 
+1188691512.3 CONN_DEL 184.75.225.76 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ0RVgW0YXc03m0M18bWDPCYh5aTGQmpQHX1Cc5EAZBqB2SGmLcj9xOSF+37jsZDab1UfBA6Xka7XgOd overlayswarm:connection lost 
+1188691512.3 CONN_DEL 185.186.116.255 7765 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOl7h+NFsgetqRHndJriziXO4W+od3JO26bHy0igAJMRvHMeS9gZ6gVhAlA0ULHu/4+MKUoqjbom4WZl keepalive:'Not connected to permid' 
+1188691512.3 CONN_DEL 26.230.179.95 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGGPVCgybUdS0asukEX8WHCxxz4un2pdPwM/29dRAc0xMR5JypzMmBOq9qqlF3bFHnEwKiNveTR4Gx+f keepalive:'Not connected to permid' 
+1188691512.9 CONN_DEL 191.192.118.44 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X overlayswarm:connection lost 
+1188691515.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 6 2 {'q': 'SIMPLE usagi ', 'id': '"d)\x07\x08/\x18l\x06\x18\x9e\xc3\x8a\xe8u\t\xb6\xd9&\xdc'} 
+1188691515.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg 0 'd1:ade2:id20:"d)\x07\x08/\x18l\x06\x18\x9e\xc3\x8a\xe8u\t\xb6\xd9&\xdce' 
+1188691518.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHjTifzjoY5XlNZyaMJQS+xAICdHUQ8xytBqUDOJAXWg1Uvj+Kz045dE2r100hBDqhedaPqYTKV7Sara 6 0 {'q': 'SIMPLE bleach 138 ', 'id': 'G\xb5/\x1b\x187\xa0%\x100\xd4>\xe3\xee/\xcd\n\xe1W\x85'} 
+1188691518.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHjTifzjoY5XlNZyaMJQS+xAICdHUQ8xytBqUDOJAXWg1Uvj+Kz045dE2r100hBDqhedaPqYTKV7Sara 0 'd1:ade2:id20:G\xb5/\x1b\x187\xa0%\x100\xd4>\xe3\xee/\xcd\n\xe1W\x85e' 
+1188691520.3 CONN_ADD 82.253.182.40 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAmcXDqxbFxL973vaSfH0C58G+SOFehIWFsKsCbRAJzzvG2KBmFC78AfoWr6ZkysDIJTO1ztYgHDRZda 6 
+1188691520.5 RECV_MSG 106.13.36.91 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAmcXDqxbFxL973vaSfH0C58G+SOFehIWFsKsCbRAJzzvG2KBmFC78AfoWr6ZkysDIJTO1ztYgHDRZda 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 134, 'random peers': [{'ip': '127.36.31.80', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfeoB4DB/uKLFY2AnGDZefiUGz7EZ+uHcWnYoLXTASKzTGnbOmuN2DAtgL+TXA04Wm1eM/qB2tVzvWNn', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '40.59.208.108', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'oversion': 6, 'nfiles': 2268, 'port': 54545}, {'ip': '250.217.3.59', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdUY77cjWwiZc2zrAhtmlZmD9vQzfz6YB4LiFK9MAH+vobOmQvqdQ1UY2rImpxhioVSZy4TzvZ7bGUXp', 'port': 7768, 'similarity': 0}, {'ip': '30.121.154.6', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeIk0vBmwA4U11aLfnX4C6pSjK1jZwvzyBWV/eKOAShWDJrkMzZT637mf/ofbegfb7Q8xQOduXqPUkJb', 'port': 6881, 'similarity': 0}, {'ip': '216.7.140.158', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfbl87pz6gnUuGkT+WjpxQNAD0fDVO+bGZGpzq/sARu57n0SlosmAeKAWT/peT4IMb7+2iRu31zIOyyx', 'port': 6881, 'similarity': 0}, {'ip': '82.194.220.30', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbBEBNZasHvUEbAFsLKTl6+5qRiEYsprNX7JVdgxAeZAIjXjy29rO6qsjIpq8mdbA3lal+cHpgI4vmqs', 'port': 7771, 'similarity': 0}, {'ip': '225.142.211.20', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU', 'port': 7008, 'similarity': 0}, {'ip': '65.185.149.116', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaJlEpntmrwMQ3zhL+NmXf7qhHekQod0VdakZ/Y0AX8EbasJ0hxI3+2cOoiU0KrzY3MT0fZ+eJuJ9iXX', 'port': 7762, 'similarity': 0}, {'ip': '95.118.116.79', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYeaL9ZgSj6HeLn0t7gDIdgF5GuMomfjjaZ4N7DHAIKSO1ZWbLuNPfK0MXIGod4LZ2Q3OWeC2K4gW/se', 'port': 7765, 'similarity': 0}], 'collected torrents': ['\xb0\xd5k\x955\xf2\x1d\xf4\xfcn\xdf\xaat\x00^]\xd4\x9e\xd6\xef', '\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', 'o\x8dB]\x86$&\xfe?o8e\xbf\x1b\x8a\xbb\x13Z\xdd*', '\xe0\xcc\xadk\x0b\xd1\xe9\xebi\x10n\xe5\xc9\xa0?G\x15\xab\xdb\xeb', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2'], 'nfiles': 5, 'name': 'keithkelly'} 
+1188691520.7 SEND_MSG 103.92.189.169 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAmcXDqxbFxL973vaSfH0C58G+SOFehIWFsKsCbRAJzzvG2KBmFC78AfoWr6ZkysDIJTO1ztYgHDRZda 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2007, 'random peers': [{'ip': '253.174.147.53', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '132.89.252.160', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '14.44.58.166', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '117.225.88.70', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '116.130.130.196', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '208.164.27.41', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '130.167.155.122', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '36.134.120.8', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEx3yw1wNC06DyD4tgp6bnzNFPrYZrWgMIoZG5joAfl6rA8OeNewQZXqpMDCc+B9k/wOSTcUAask7N+q', 'port': 50128, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYz1VZ7sb9HSC3MJyjf+eCzN9srdHPndMxuZ8m4KATQPEMXXCbLdNojv2OtR4C9S1/VC85qWboV6bvnq', 'ip': '200.121.198.245', 'similarity': 0, 'port': 7765, 'nfiles': 1927, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691521.6 CONN_ADD 142.118.97.146 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcw4yX4cVqZm++8z/fUFfPbYmHM5XbfybHMNg//vAermlReI136TGUd3A4J0yyUG6BzoUiY4jZxM0xH3 2 
+1188691521.7 RECV_MSG 7.119.232.69 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcw4yX4cVqZm++8z/fUFfPbYmHM5XbfybHMNg//vAermlReI136TGUd3A4J0yyUG6BzoUiY4jZxM0xH3 2 PASSIVE_BC {'name': 'GATEWAY', 'taste buddies': [], 'random peers': [], 'preferences': ['5Ifll/ciDMDZEfZU1/Yq7mmhnIg=']} 
+1188691521.7 SEND_MSG 246.31.88.105 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcw4yX4cVqZm++8z/fUFfPbYmHM5XbfybHMNg//vAermlReI136TGUd3A4J0yyUG6BzoUiY4jZxM0xH3 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '82.241.125.187', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '216.210.69.244', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq'}, {'ip': '156.129.113.235', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '124.11.212.24', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '4.12.80.63', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '57.17.31.194', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '6.175.237.100', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691524.4 CONN_DEL 169.149.27.192 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMK3TeW1q0VQlz9E7Uf3VG1S8ZRbwIekjK6H4odsAS4Gn9rUCndYF4XemqwSfF11hn3xnVorBpJoWyMj overlayswarm:local close 
+1188691527.6 CONN_DEL 169.112.235.65 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g overlayswarm:connection lost 
+1188691534.5 CONN_ADD 23.192.45.207 9101 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 6 
+1188691534.6 CONN_DEL 149.160.51.255 8123 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQmUCdCxO43ndiA8ZgfZEoSQVGPaZDXo5weyHhFyAKqE8Atd05rLo0HuXxOQ9/6lPuw8cjYdYJVCVG3h overlayswarm:connection lost 
+1188691535.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 6 4 {'q': 'SIMPLE mythbustersmythbusters driving ', 'id': '>\xd3\xd2~\xc6\xf8\xd2\x8d\xc1\x8b\x05\xedt\\H\x1a\xef\x12?\xeb'} 
+1188691535.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 0 'd1:ade2:id20:>\xd3\xd2~\xc6\xf8\xd2\x8d\xc1\x8b\x05\xedt\\H\x1a\xef\x12?\xebe' 
+1188691539.2 CONN_ADD 55.142.46.11 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX 6 
+1188691539.8 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 6 0 {'q': 'SIMPLE superbad ', 'id': '\xf5G\xa9=\xa8\x84\r\xca\xd8i\xa2+?\x16\xfd\xd0h\xfdo~'} 
+1188691539.8 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 0 'd1:ade2:id20:\xf5G\xa9=\xa8\x84\r\xca\xd8i\xa2+?\x16\xfd\xd0h\xfdo~e' 
+1188691543.8 RECV_MSG 30.34.60.217 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX 6 PASSIVE_BC {'preferences': ['Tjoo+6p7XZ+j4VuPcqyr0Q1OFoM=', 'jBkwLwG+LwkqDT0EvxPf/AMm7nA=', 'bG24fAdNi73GDqPZFpVB6hbWzCE=', 'ZcuVh4+iUEGV0PgPyKzxXSkUhVU=', 'Uk1b9/G+WEfMVejX5p2r9UU0D5w=', 'JUV14MWsgkn8dgG1ZlBbRxTcMRQ='], 'ndls': 6, 'taste buddies': [{'similarity': 13931, 'ip': '148.232.186.1', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX', 'oversion': 6, 'nfiles': 304, 'port': 7764}, {'ip': '125.190.165.241', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ8jaDP8xrpd0rALjvrIW5BV32k+B1487NVLENB7AYENLAvRy+RRiJ1gnlmwPKpXj799IFxFXTQOUjs3', 'port': 7762, 'similarity': 710}, {'ip': '205.226.135.248', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAV7fnFNe8bRts/5S9gmnfFHokNKIbDui313z9vuVAPi/bAd0li0HRwkWPa3NHP0PbLam1VIyDrHzpViS', 'port': 7762, 'similarity': 251}, {'ip': '62.42.192.255', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYX3i+qA/VCpbtVsJTXDQqamppf9HxjQzLv4eJxjAev9fmZTywhxSGHQoKxmrHWkgL91FGvsLuWf1Diz', 'port': 6881, 'similarity': 209}, {'ip': '45.30.247.146', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa4exUBtrnscYvoks17a73uObkbIQD40NjL0KMeHAF3rAOTtuRSJRmHcFu7U2ee8nD4b14w3cBJAPgyM', 'port': 6881, 'similarity': 180}, {'ip': '13.18.147.99', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABxDIIL2jH9b3fdR1xutyifZ1QTCSYMcnY+7GC31AaSfS/FXRSwvOnxWz36lfrxy+osO6eZmd1Eibg1s', 'port': 6881, 'similarity': 180}, {'similarity': 172, 'ip': '196.234.127.160', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'oversion': 6, 'nfiles': 4835, 'port': 7767}], 'connectable': 1, 'npeers': 1125, 'random peers': [{'ip': '131.197.200.242', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAJ1FB3Mzb/ZpdA1kmu1HUp9fW5SbH0k0+/9uT+7Aa0BjGlOd+EQchLxqQZD7EHBfQ239nzv3GvjKD7/', 'port': 7762, 'similarity': 104}, {'similarity': 84, 'ip': '212.175.125.137', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVbBNHkXEu9ME9UGbB/iqOidA4lVTQIzxyL6VmR5ALyckey4zgt2Vo8wGuaudnrGfrQ1AvUoALn/kpAe', 'oversion': 6, 'nfiles': 156, 'port': 7767}, {'similarity': 72, 'ip': '214.19.94.4', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAO2xz/n3IgFjoO7nMeriYE8JQBJnqWUxylXhA05DANhjHR30xpH7iMhECwVSI/5vBA5HNbI4rxd3RI8v', 'oversion': 6, 'nfiles': 4842, 'port': 7764}, {'ip': '117.250.234.182', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOFNqGfTaz08QygR8jcSnyi1qzf5r2SSpVtqC0HcAOR5TEOdM0v3WWIxZuEhUCdbTcwOJY+sSWxyOT7f', 'port': 34343, 'similarity': 63}, {'similarity': 0, 'ip': '12.18.137.190', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARAsShF3rb0jAQj9HnVv496JtUWgpEEIu5Vo59hGAVXCWIDEpdt0/3RplM/NZqpv1xC2+lZPbN7+RsAA', 'oversion': 6, 'nfiles': 197, 'port': 7766}, {'similarity': 172, 'ip': '255.147.0.96', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'oversion': 6, 'nfiles': 4835, 'port': 7767}, {'similarity': 104, 'ip': '244.20.164.173', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJqCVPdshUWm1hkJb0STHKp6+P9VAtCJgWWh9fUgAFrNoxotYkyMw9mKlnWcH8tIDOYLNa++auhFIPUE', 'oversion': 6, 'nfiles': 810, 'port': 7771}], 'collected torrents': ['s!O2\xb8\x81%\r\xc9\xe2\x97\xd0o\xd7\x8aiE\xd8\xf5\xed', '\xddu\x92(+\x99|\xb35\x9644=\xb3=\xec\x18\n;\x10', '\xd8\xb9\xb1\x89\x8d\x01\x0ea!\xac%\xc3\xfa\xc6A\x00z>\xd1\xf5', 'z<\x97fo\xec\xa15_\xb2\xde\x90(2t\xc2\x8c~\xae@', '\xfab\xd6\xe8\x05\x8fWCd|6)C\xd1\xb1(\x89:\xb9\xfe', '\x87^6`\xa7Q\xd0\x9d_\xbb\xf6\x80\xc9\n\xda\x1aC\x1a\x10n', '\x8f\xa4\xde\x88\xdc\x10\xe3\x80\xda\x99\r\x91\x8b\x9f]\x1fqE\xf4\x1a', '\xa0~Wb\x03\xbc\x9a\x93A\xe0\x13\t\xaf\xda\xc4\x1b\xcf\xd4\xd5\xa4', 'KW\nd\xbc\x19\xd7\xacJ$\xa9\xe6\x0f_\x80L\x11\rM\x18', "x\x1b\xa3\x9e'I\xd1\xbf\xc4\xed!_\xe5\x17<\xcf\x89\xcao\x1a", 'RM[\xf7\xf1\xbeXG\xccU\xe8\xd7\xe6\x9d\xab\xf5E4\x0f\x9c', '\xfb\xb6\xb8odt\xad\xa4I\xeb\xe0\x163\xa3\xa5\x16V\x99\x03\x80', '2\xe3\xf5\x0f;(}\x9a#\xae\xb3wy\xe3\xed\xf3\x14l\xc3\xc2', '\xa0\xc6R\xb7N\x08Z[fUc\x18\xfb\x9eU\xeb\xab\xf01\xce', 'W\x91\xe4\xbe\xf9\xec\xfb\xd3\xe9\xb2R\x15\x95\xbcU\x90\xca\xd0\xb3L', '\x1b\xef1\xd3\x04\xda\x86\xac\xe7\xcb\xe2<\xd9\x93\x99-C\x15\xf9\x89', '\x83b\xa1\xe9\xc6\xfa7`\xce\x99\xcf\x9d0\xaf6\xa5\xfc\x0bKJ', 'r\xca\x0c\xf5^\xc5\xbf\xa2\xad\x89@\xf8p\xbeI\xfe\x92\xd59\xdb', '\xee\xa7\xd9\xab\x03\x98\x98\x943\x0b\r\x02R\xf8\x0f\xd5\xd0\xbb\xf9\xc5', "\xe5!\x16\xf3\x0e\x8d#\xe9\xe1\x13\xc0[\x16Qh\xadO\xa8'K", 'e\xdfSNxJ+\x90*\xe4q\x00\x87\xe9\xe2\xe3\xa2"\x9a\x7f', '`s\\U\x14.bcik.\xeb\xdf44\xb1\xc9\xd8W\x97', '%Eu\xe0\xc5\xac\x82I\xfcv\x01\xb5fP[G\x14\xdc1\x14', '\xe7\xd3PArC\xb2\xfb\xfe\x07\xe4\x90C\x9d\xebrE\xbd\x8a\xea', '\x13\x89\x967\xc6\x1b\x97\xbaW\x9b\x92\xbdw\xce\xd6\xfd\x99\xf0\x16X', 'Y\xf2\x06\xb59\x8d\x16\x19L\xd1![.\x9c\xb9\xee\xe3(\xe3T', '8T\xe6\x17L\xb4\xecB\x13O9"+\x13Zp\xa5\xbd\x99\xb5', '\xd1\xbc\xb2\xcb$C\xe3\xbbKJ\x80r\n[\x19\x16q\x8c\xdf\xcb', '\xa9\x12\xf0\xceb\xd3\xb3y\x89\xb7\x16\x82\x06\x02\xfc \xf1//\xa1', '\xcd\xc4\x8e\x9f~4\x0f\xe5\x06H4\xbf\x05\xed\xeb\xa0\xae\x81\xef\xf9', '\x04t=`\x8eQ]\xfb)\r|\xe1\x1cI\x8f\xb8\xed\xe23\x98', '\x87\xb7\xf3\xeexi\xac.\x06\xb0U%\x9b\xe6\x8a\x1a\x89pH\x7f', '\xde\xd0?!C\x1b\xc8W\xe7-\x91\x02\xe8\x8b6\xdd\xd9\xcf\xf5:', '\xd4\x07\x06\xb4.\x85/T\xc1\xe0\x19W\xfeq\xb3\x08\xca\x85\xe6\x89', '\xebm7\xbf\x8c\x0bq\r\xb9\x13\x17\x9fE\x9a}/\\\xe19\x1d', '\x87x\x00\xe5V$\x9e~`Tt\xa2\x1fFo\x96U\xa9-\xe5', '\xc6\x1b\xa0\xc7\xd7\x1f~\x13\x91\x92pku\xdcn\xa5\xe4~<3', '\xf6\x12xL\xfb+\x15#\xc6\xff\xaf)\x1c\x01V\xb5\xdd\xb0\x7f\x99', 'T[?\x0ex\xe9\x02o\x13\x19\xb1\x80\xbbhF\xc2saK2', 'U\x02@\xa8\xe8\\\x99X\xb8k\xa4\x8e\xca#\xa8\xa9\xcf;\x8c\xf9', 'rh\xf1uW\x80\x9f4.\x80\x1f\x1f\xd19\x9b\xbcg\xf0\xd5\xef', '"\xe5.0\xd9\xb8{z6sn-\x06\x86\xcf-\xed\xb2\xba\x04', '\xde\xed\x12\xffZ\xb1`\xf4He\x0c\x8eJ4\xc6d\x02\x1d\x9cP', 'j%\xe7\xb6u\x88\xf0\xa81\xbc\xa2\xea\xacu\xdd|}\x88\x0e\x0b', '3\x9c\xa8:\xf9\x87\xbfy,\xae\xf2\x9a\x03\xbeA\xa5\x93\xd1P}', '\xcb\xe8B\xe7\xeb\xe5(;\x1f9;\xc1XD\xbbz\xdb\x1bTm', "m\xafS+`\x81\xdf\xac\xe3\x1d\xd8'\x95\xf6\xcd*\xc2\xda\xa9?", '\x8a!\x1cL\xf7C\x0b\x17+\xe8a\x0eHF\xaa\xea\xa5\x84(\xf9', 'y\xdb\xf5S*M|!Q\xdb%\x81\x95\xb0~a\x905\x9d\x8d', '\xda\x07s\x90\xdd\xd0P~i\xe6\xe2\x18`\xf5B\xc2Tj\x05\xcb'], 'nfiles': 195, 'name': 'gamer'} 
+1188691543.8 SEND_MSG 219.168.216.183 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2017, 'random peers': [{'ip': '123.39.222.238', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '194.208.68.237', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq', 'port': 7772, 'similarity': 0}, {'ip': '96.234.166.127', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '175.158.229.43', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '94.1.80.161', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'ip': '154.92.5.12', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '144.70.212.246', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '131.166.141.242', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX', 'port': 7764, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'ip': '33.58.142.231', 'similarity': 0, 'port': 54545, 'nfiles': 949, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691545.4 CONN_DEL 68.126.33.178 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHe+1fZgl1jLoMJIEY179JwEAhkgkOIYzVLbt3g5AY2WQ7uPo7RgDLDjZOsf26r1GDehMYhvyP/pA1mq overlayswarm:connection lost 
+1188691545.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 6 5 {'q': 'SIMPLE mythbusters driving ', 'id': '\x8a2\x06A\x8b\x04H\xceW\x15\xcar\x0f\nLg\xad\xc6y\x17'} 
+1188691545.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 0 'd1:ade2:id20:\x8a2\x06A\x8b\x04H\xceW\x15\xcar\x0f\nLg\xad\xc6y\x17e' 
+1188691547.2 CONN_ADD 148.236.115.117 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 6 
+1188691547.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 6 4 {'q': 'SIMPLE brasileirinhas ', 'id': '"-\xb9\x97\x82\xfd@\xb4\x0f8c\xb1.\xf2\xd8\x13\xdbh\xd3:'} 
+1188691547.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 0 'd1:ade2:id20:"-\xb9\x97\x82\xfd@\xb4\x0f8c\xb1.\xf2\xd8\x13\xdbh\xd3:e' 
+1188691553.2 CONN_ADD 231.46.235.30 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAA3lZAIaufb2HQa62l3xn7k2sYHvluTWX4zcTDnqAIs/wnLp6InIWrVEXs2ZF543mz+9kDxLY9fa6hW2 2 
+1188691562.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 0 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '\xe0\x1e\xfd\xac\xd4VF\x97\x17\xa3\x15#\x8a\xd8\xd9\xd50\xaes"'} 
+1188691562.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:\xe0\x1e\xfd\xac\xd4VF\x97\x17\xa3\x15#\x8a\xd8\xd9\xd50\xaes"e' 
+1188691562.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 6 5 {'q': 'SIMPLE brasileirinhas ', 'id': 'R?:v\x9b&\xadn\x19\xa0\xf9\x0bN\x15\xa3\xc2\xbe\xdc\xd9A'} 
+1188691562.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X 0 'd1:ade2:id20:R?:v\x9b&\xadn\x19\xa0\xf9\x0bN\x15\xa3\xc2\xbe\xdc\xd9Ae' 
+1188691571.4 CONN_ADD 253.128.173.167 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 
+1188691572.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 6 0 {'q': 'SIMPLE metallica ', 'id': '\xd6\x91!\xa2`\xd8(\xe6\x01qA\xc2\xfdv\x8a\x1e\xe5h\xe0\x9e'} 
+1188691572.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 0 'd1:ade2:id20:\xd6\x91!\xa2`\xd8(\xe6\x01qA\xc2\xfdv\x8a\x1e\xe5h\xe0\x9ee' 
+1188691572.3 BUCA_STA 9508  2018 2018 100  1588 12112  58 58  0 7 10 
+1188691572.3 CONN_DEL 178.39.239.9 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIia3G/I2kekwz9jZcRadrqg2nHY2rfdeIjbqHUIADkzcS1mew490b+csY0MdpzgIljrTZmrOVd9pIhX overlayswarm:connection lost 
+1188691572.3 CONN_DEL 132.69.145.179 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQt1HJVhsHObMjRmnoaZnZpuP3y/HfANBJfgwUHUAYT5fbb4gwk50rHerfQ3K+NLKgKtfL3I+hyDd71D overlayswarm:connection lost 
+1188691572.3 CONN_DEL 173.198.247.182 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF/Lk9P06tZGGJ0LmvhQYaa/eFB1XohzqAsT3zUbAdBjkW6s91ww6UOA9htCge+ly2iHBKc/vq9tvB8o overlayswarm:connection lost 
+1188691572.3 CONN_DEL 206.79.174.102 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAE3u3Y0cFFXtRMXcQT+PHBjSqa+UvHzsxtH3xNQAI2zvOIUJf1vmaSjF7kIrrCZW9zSYox8XliqZzlI overlayswarm:connection lost 
+1188691572.8 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 1 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': 'E\x964eS|\xfb{\xdf\xc8\x1e\xc6\xc8\xc4\x0c\xa7\xdabO\x0f'} 
+1188691572.8 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:E\x964eS|\xfb{\xdf\xc8\x1e\xc6\xc8\xc4\x0c\xa7\xdabO\x0fe' 
+1188691575.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 2 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': 'f\x95!\x97\xb4*\x1d\x90\x18\xed2\x81\xea\xa9\xae\xe5`\x8c\xbff'} 
+1188691575.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:f\x95!\x97\xb4*\x1d\x90\x18\xed2\x81\xea\xa9\xae\xe5`\x8c\xbffe' 
+1188691575.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 3 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': 'D9\xe0\xa3\xb9\xb4`\xb6wr\x0c:\x06X2\x9b\x137\xcd\xd8'} 
+1188691575.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:D9\xe0\xa3\xb9\xb4`\xb6wr\x0c:\x06X2\x9b\x137\xcd\xd8e' 
+1188691575.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 4 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '\xa7^\x11\x80\x83\xaf\x00]\x89r\xb1\xda}C\xcc\xfa\xb92\xad\x9c'} 
+1188691575.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:\xa7^\x11\x80\x83\xaf\x00]\x89r\xb1\xda}C\xcc\xfa\xb92\xad\x9ce' 
+1188691575.7 CONN_ADD 18.33.116.27 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWf7jl3enENjILEBY38r1/Pbn1bqRQDFDuyrtzxlAVpJdSIuIhNeE3KhAwoA43zdD1wMFdbTQsA4C/Qk 2 
+1188691590.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 6 1 {'q': 'SIMPLE def leppard ', 'id': '\xc2h\xbc\xc7\x8ae\xa0\xf9\x96!_\x83\xd9\n?S\xdc&\xc3\xcd'} 
+1188691590.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi 0 'd1:ade2:id20:\xc2h\xbc\xc7\x8ae\xa0\xf9\x96!_\x83\xd9\n?S\xdc&\xc3\xcde' 
+1188691597.1 CONN_ADD 76.108.175.189 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKJbHdjkM3CCU1toIzd2tth498ZYEhifXt7JH6+AAJmF0cZeuwxW6M0ZjzziEeRR2SQ9nCOyuPKzBh4x 2 
+1188691597.1 RECV_MSG 87.35.29.208 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKJbHdjkM3CCU1toIzd2tth498ZYEhifXt7JH6+AAJmF0cZeuwxW6M0ZjzziEeRR2SQ9nCOyuPKzBh4x 2 PASSIVE_BC {'name': 'robert-44smxtw9', 'taste buddies': [{'ip': '242.196.196.19', 'age': 1920, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAIvpoqeLPjcHE14vb52IlsCsL/D/F2ONqvxiMhQAD5wcfYy6iuf1jHPiOY+yhFPn5LSbaX5DFoMlw4B', 'port': 7774, 'preferences': ['2Hm/Mbx8Uhax214jrfLwoCpjSdk=', 'qM/hSjjgBny2MvB89zWWQ5yGswU=']}, {'ip': '47.5.161.237', 'age': 893, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ8jaDP8xrpd0rALjvrIW5BV32k+B1487NVLENB7AYENLAvRy+RRiJ1gnlmwPKpXj799IFxFXTQOUjs3', 'port': 7762, 'preferences': ['cZdtVoZIRqz5NdktoAANY65cVxM=', 'GWo8kLQn7FPE0ykEOG9Mol6u2zI=', '22n3l6neCq+av5gXp+5M6WNEpRQ=', 'Y4Y0O63tVkeSXgrCqgxpCgJez48=', 'f+SJraXWBBYpBc+5BFUkHthyQFM=', '5KtG+gKYFVQC2cqnSL4ybKga3Xs=', 'BoVSgoFy5vMZ3gJGaXdiRyZzUYQ=', 'HYMY/zP9ucOdDsUjfGnJH+HG2t8=', 'REBmBLYGDOo9sRnG+pfo2PDKb3s=', 'vV3vRROS1ohhpgTpHh8z5bp7IEc=']}, {'ip': '4.155.68.3', 'age': 8962, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcPCAE7Z87ovfSCctirlJZjuvOkVL1HrHdG+j4A+ASYWT4WrlSYtI6DU1k0cYbvRRjml/+u8cH1PC5bW', 'port': 6881, 'preferences': ['/ib4/jNysQ6bor70khih6MF9nqA=', 'rHFjsJ+I2Jj9gcmc9PZenTrTpBs=', 'p4mxMVHlXygbWuOVz0FFhPUjOmQ=', '7EYjdqh+iuDIIzXSsxhFelzEqj4=', 'h1Ya3fGZY3lrOvYhuc1wv5aLLz4=', '+mLW6AWPV0NkfDYpQ9GxKIk6uf4=', 'Fc7iH5wIYQ+z1y8+U4VBNjoaUo0=', 'B0PRQGoHvSzMyvh5FmhxXRaB1rk=', 'l6hcnQcHx2K0FiTkw4eXbpPMFOw=', 'XA9hLoZ0XtoQvDYDGc/1KT5RY3E=']}], 'random peers': [{'ip': '255.103.126.28', 'age': 6445, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASXq28ZXiH3YtMG4d2ShG4E3FuNaJ+taG23j4jNVAPiSjBEfG+Kt0aqqzqYuaai7wSeSSU1Y3JYm4lWo'}, {'ip': '45.49.198.78', 'age': 2380, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC2OtO8rDcCNaWYqNrkkZVdsXGW/Lw9lqf3NT5G8ASeg492CwnGZ0g6aHna0HQGEc/KkWeIUGtAs8zgZ'}, {'ip': '87.41.133.225', 'age': 2813, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL7fsNzTvwMapJzUzDLD352v9W+h7JOH7StbIgblAIiYIl5O0I9ZhNrUX5cfyTLtnhzz9wvwamv1MgWS'}, {'ip': '71.184.45.228', 'age': 2919, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc8NsEJBBieCTExn6H9eFmpkvpAX1/DzYWsZzjqyAFM7Q+nmAe0mm1xgubkEEA7YHBXsjFk/Jp4Trye2'}, {'ip': '56.194.221.126', 'age': 3310, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYAcShmnhisiIQVP5Q9NtQojMvEWzV7CMaHA41HIAJ47AuXNh898D3SrxaNIF/Z7xlFdS6+zYz+vG6lN'}, {'ip': '239.121.243.142', 'age': 7131, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYYdz8zM0KduhNu7NmC2hxRDZohhGaDHtSZtyinnAKyyTg6jT0+qIt1iTnORuk1hhGjIbClWWb21XFeW'}, {'ip': '192.91.99.117', 'age': 3236, 'port': 7768, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdUY77cjWwiZc2zrAhtmlZmD9vQzfz6YB4LiFK9MAH+vobOmQvqdQ1UY2rImpxhioVSZy4TzvZ7bGUXp'}, {'ip': '96.144.97.101', 'age': 5249, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGH3Mf51KmNVd5Gy9oXcmt4zMnUyO4ACF8WfQnCDAGX2A1KAEkpvveaSEZckWxrS7p/DdHJVX9vhHhP9'}, {'ip': '236.116.105.79', 'age': 3236, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYnDZIr9lV87N2fFwQ3xLCpRBhaeBSmex2yUcYPGAHWk2H7SVV5ojzQgWV4mTikDe7PKYYeet2+5qoFR'}, {'ip': '83.7.228.133', 'age': 3259, 'port': 56216, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEANVpA4H8psMiZjjSqOKhHQT21a5fUeb8FZn6dqEKABZxZWgRWkUlbkwvLZ4buW0cZ0L9oGEbKrx2x163'}], 'preferences': ['+mLW6AWPV0NkfDYpQ9GxKIk6uf4=', 'qM/hSjjgBny2MvB89zWWQ5yGswU=', 'SWCa7CpRZGSg/3sBR3o2HkpOFWo=', 'zgbYYIIwNDBZvuqu/h+5s/WFiSQ=']} 
+1188691597.2 SEND_MSG 90.49.170.179 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKJbHdjkM3CCU1toIzd2tth498ZYEhifXt7JH6+AAJmF0cZeuwxW6M0ZjzziEeRR2SQ9nCOyuPKzBh4x 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '190.49.127.161', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '178.116.32.142', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '107.191.191.61', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '102.113.39.187', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '165.72.140.90', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '155.149.219.182', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '119.112.139.231', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691597.5 CONN_ADD 30.183.137.182 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPzqSaku0/2vKJ2KUfBZCg8x+r5qQq67m2Hc4NrBAOLKrNigxiiu1sKLREvs1VZJ5njYgL5qtSp1UPDL 6 
+1188691597.5 RECV_MSG 197.70.193.97 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPzqSaku0/2vKJ2KUfBZCg8x+r5qQq67m2Hc4NrBAOLKrNigxiiu1sKLREvs1VZJ5njYgL5qtSp1UPDL 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 187, 'random peers': [{'ip': '212.19.229.41', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbI153geQizRT93XSgIxQih9CmUPUpTx7OuPHMTbAUxIAP8ABgE+k7Zs18UgMivbGWBdsMBdGuEWkd7R', 'port': 6881, 'similarity': 0}, {'ip': '186.25.91.46', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU', 'port': 7008, 'similarity': 0}, {'ip': '218.28.230.199', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe4Ea9CyS+a23UFipt87HYsNA/QJNcAQmzB1wo4GAacmoALOKAlGgeTcs4eBgXJx1q9iHpt+QDKgHYPT', 'port': 7764, 'similarity': 0}, {'similarity': 0, 'ip': '204.85.15.21', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZa2rtfRshNUkCalS+BQqyC2TwS+JLOLI45JrJBgAEmj9qaZ41vom2nWVHFxyUgw97oKUW7jV5h8bakc', 'oversion': 6, 'nfiles': 369, 'port': 7769}, {'similarity': 0, 'ip': '105.122.173.116', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdHw8VpFhHdn3HbqHoD1I9V1JwU/H80fxgm8fYaEAelQTzF3QgMADg7CfI/9eBOak4LL9fzoMrOZYegQ', 'oversion': 6, 'nfiles': 498, 'port': 7773}, {'ip': '76.253.34.162', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAb+YqNm9ZvVXaPBKwrHtoYUnOVePXxILEaWZEPL5AdNUUkAcVOsmkW8iYhFeGVPHbvACjCoY7AL+c7Bd', 'port': 422, 'similarity': 0}, {'similarity': 0, 'ip': '159.207.66.230', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'oversion': 6, 'nfiles': 166, 'port': 7771}, {'ip': '110.203.123.36', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '184.6.77.194', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'port': 7772, 'similarity': 0}], 'collected torrents': ['$\x86\xb2x\xda\x0c\\\xfcg,\x05\x13N\x97\xb7\xe1\xcb\xe2\x83\xe6', '\x12}l\xa6\xac\x7f\xeb\xbf\xf9\xeb\xf1^\xe5\x92x\xe3n*\xf6\xfa', '\xe88\xa6\xa3\xc8\xbcO\\\xd9\xe8u\xb2\xaf\xb3\xdf^U}\x87\xc3', '\xaa\xd7@\xe9\xfa\x8a\xcd\x03\xe9=\xda\xc5\x0e1Nd\n\xef\x8f!', '\xac\xe1\xcfi`\x07bWY\x8c*\xd1\x04}\x9e\xaf\x91\x96\xd2\xc2', "\xf2l\xb8\x83\x1dg9'\xbf9\t\xeb\xa4S\xdf\xe4\x0by\x8a\xa5", '\r\x8db\xec\xd2h>\x9f\xa4\xf5\x80}\xda&\xb4\x84(\x02\x10\x85', 'o\xf5\x9c\xff\x92\x0e\x83\xcb^\xbb\x00Ep\x836.:\xb4\xe64', '*{\x1e~\x83\x02-\xad"\x12c\xeb\x89>\x88-\xd5\xa3\x97\xb5', 'D\xc4\x1c\xfd5s\x00\r\xd7\xa6<RWK\x8c6\xe1\x14\xeaU', '\x04t=`\x8eQ]\xfb)\r|\xe1\x1cI\x8f\xb8\xed\xe23\x98', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2'], 'nfiles': 12, 'name': 'ZENI'} 
+1188691597.8 SEND_MSG 119.194.116.217 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPzqSaku0/2vKJ2KUfBZCg8x+r5qQq67m2Hc4NrBAOLKrNigxiiu1sKLREvs1VZJ5njYgL5qtSp1UPDL 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2032, 'random peers': [{'ip': '142.23.240.36', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '35.167.165.151', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '110.62.33.139', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '154.203.24.74', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '4.27.90.192', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '244.171.241.88', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '108.59.36.2', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '215.78.120.213', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'port': 7767, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '126.251.156.168', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691598.4 CONN_DEL 143.106.97.160 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY3/MbfvgHreul0QBqRJ+pdwX2jgrZNiCWviUVvCAY6x4oLiwLCoEwlCYGPIYtj6mm1qgFdWNKj4mXuL overlayswarm:connection lost 
+1188691600.2 CONN_ADD 248.165.207.122 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY8giS2wU/ZTjB4S9Y3+ChVGqgoBq/ttNOf3lFkwAdSKEjtqjZy+JMhTUAck1T6qeuLb8VkW0xXaZK1I 6 
+1188691600.5 CONN_DEL 65.40.77.143 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY8giS2wU/ZTjB4S9Y3+ChVGqgoBq/ttNOf3lFkwAdSKEjtqjZy+JMhTUAck1T6qeuLb8VkW0xXaZK1I overlayswarm:local close 
+1188691604.2 CONN_ADD 93.188.173.68 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGcymk4ZhHkgvjoE+lQJvTvjIDD1UUxp50Lvh7q8ACwOHUtBsCC9f9Pohlw7SyLJU0kIaYZL7bgLtQiG 2 
+1188691605.4 CONN_ADD 18.128.105.169 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX/1T2dVBW4GPes6jUjOcPGeC9f276h4hf8B1umPAdY1I3aE4Qcqlm9A6ZIRfIfLAIiw8/3tLEB/XPRw 6 
+1188691605.6 RECV_MSG 39.148.227.138 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX/1T2dVBW4GPes6jUjOcPGeC9f276h4hf8B1umPAdY1I3aE4Qcqlm9A6ZIRfIfLAIiw8/3tLEB/XPRw 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 40, 'random peers': [{'ip': '219.234.176.127', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '120.103.246.221', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '208.87.168.5', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'similarity': 0, 'ip': '210.111.52.152', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'oversion': 6, 'nfiles': 250, 'port': 7767}, {'ip': '10.148.77.14', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '245.66.81.114', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaJlEpntmrwMQ3zhL+NmXf7qhHekQod0VdakZ/Y0AX8EbasJ0hxI3+2cOoiU0KrzY3MT0fZ+eJuJ9iXX', 'port': 7762, 'similarity': 0}, {'ip': '121.223.7.58', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZVdqh+mneXwgChoId6J9U2/p03TimVO8UjPVRZUAA2GN75uqhJr67UACIjbXnTQ7utDx6ThLCogJfIo', 'port': 7774, 'similarity': 0}], 'collected torrents': ['\xfb\xbfP\n\x81\xa9\x90\xbd\x0f\xb4uP\x80a\xa8\x1e\xa6\xcan.'], 'nfiles': 1, 'name': 'ryosmbp.local'} 
+1188691605.8 SEND_MSG 13.153.141.16 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX/1T2dVBW4GPes6jUjOcPGeC9f276h4hf8B1umPAdY1I3aE4Qcqlm9A6ZIRfIfLAIiw8/3tLEB/XPRw 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2038, 'random peers': [{'ip': '117.2.204.145', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '41.113.205.208', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '136.129.180.238', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '96.149.215.199', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '230.106.103.63', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '248.1.15.123', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '167.78.158.160', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '153.13.6.13', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'port': 7767, 'similarity': 0}, {'ip': '156.251.85.217', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'port': 7767, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691611.1 CONN_DEL 237.2.142.83 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMzFpCvHhfv5AVCJSVUJ2h9+pHObB8cxloPun4R0Ac1ahnsJfgz28ru3g6JNPPXPDSezm+fHXTwtIUrh overlayswarm:connection lost 
+1188691619.1 CONN_ADD 203.77.71.216 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY83o2aNctjx0rLPfHNaIqQy2YK+0whkhVmUBjljAEanSBmYOZuwPsTx9VKWFhJkYC7buBsJZaEi50O/ 2 
+1188691619.1 RECV_MSG 84.236.79.10 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY83o2aNctjx0rLPfHNaIqQy2YK+0whkhVmUBjljAEanSBmYOZuwPsTx9VKWFhJkYC7buBsJZaEi50O/ 2 PASSIVE_BC {'name': 'zumik', 'taste buddies': [{'ip': '71.196.224.173', 'age': 856, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf3KPGK31cKyUXKp6+TM4cD6lffGqQOUG12G6v8gAdbCximyhZcmKbf/fP1jxrZwxIfFnyGZ6Sosl8fw', 'port': 32034, 'preferences': ['F0U6k3dXtJ7o8wll3DS3N1uxZOg=', 'J8tUXgj1qit2bvTiMwlix9rl1B8=', 'qfboyvkmMFh/5LW7hTNJLCuTFds=', 'OhAYOT5bN0pjc5gMzWO9ST80pU4=', 'sSjgKhTD6WCr/Paupeafxv3j970=', 'IAYBzPSbgM0shS/WBEqY56wYnUI=', 'UkAkYhz4IFJ7ndSssB5d7a6Yqtg=', '8YVlEuW+EQ7g5+ol8a6f3QOE85o=', 'FYXbYtmFUwR4Wl/6d13nUK4t2iw=', 'Nx59RdFxnRajAAVGPYPWfQLzL/o=']}, {'ip': '98.96.151.46', 'age': 1261, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf214D+kZDAHVW0Ftof2QedA2+DRLF5R1n4dc5zyAM9AkzvKH2ykmHu+9Zbpks82dFEoAVucrsXU1t1Q', 'port': 6881, 'preferences': ['bxnmC5MKwvzcAZHdNRyP8Hvenu4=', 'vPeQJpgWWMxCOuW8kuoyGx18jI0=', 'ZBGltRBiUf1O0kESNLsTLNNe9wQ=', '4yNmja7vS5KGB2tpzFcOBlAX4Lo=', 'pBW1lPqidXb6TUtyXlc7LaYcAbc=', '75FR8ubLnD1+JO8BYL136SUFlTQ=', 'WTkRWTYx4HITyuF1BUqxdArxgws=', '3e+TKmoK/Vh+6jztuN5R45FBQ/Y=', 'F8YfxgzqKHALFdO3osTd3/cfu/E=', '44zzzuSkuBR8IFyVqBfWB/VdEoI=']}], 'random peers': [{'ip': '24.30.31.167', 'age': 7166, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWY3lrq7SwMxk2x1lITaFV6w9+MaDMCAfc8dhlSWAf4TJH9zfPYXNVuNq1O1udqBOs682Ap4PzkBs+eC'}, {'ip': '218.91.125.149', 'age': 1871, 'port': 7767, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL'}, {'ip': '81.132.241.31', 'age': 337, 'port': 22, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL/l2IyVa6lc3KAqQyEnR++rIzi+AamnbzXHCxOFAFy67COiBhrC79PLzzUiURbHDx21QA4p8w3UDHLA'}, {'ip': '27.32.111.176', 'age': 4121, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATPL0vMy0vppj6iZY5aMcnpn/CROiuoh4Bws13lHAJ70adi+A3GSrz7imqThN9q2ej7DdS7tMeTl9O8S'}, {'ip': '18.36.161.160', 'age': 589, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATCmy3l37oUFseHQ6V2Fk8G1bP/CYzxK4+GEN4hHAbAVmXrFwMlauq5LSLmCV9bCUYZcZvbTz2HOX3Mp'}, {'ip': '173.22.171.128', 'age': 104, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZYtevzxy8p2vDU5MbV+YSJEQQft3fE6TGyS+X9KAbLwV+pvcDABYwzLeNiRGVFhQ7wD4x/MOAZY2x18'}, {'ip': '177.92.137.116', 'age': 1089, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADP6QrhIwSw7UgkmaBHT86vLBLInpy4OKF+/mdgaATkYdRNKCv7j2gV7kY7pM0V34NvlZdOs9H6vhK/Z'}, {'ip': '131.68.179.20', 'age': 1022, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAO2xz/n3IgFjoO7nMeriYE8JQBJnqWUxylXhA05DANhjHR30xpH7iMhECwVSI/5vBA5HNbI4rxd3RI8v'}, {'ip': '226.252.3.73', 'age': 88, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ8DZSqbr0WDFR323ylqG/9s7Rv8id0mW8AxA3oYAJS/NPQAABNexm/qAbq4KiH/F9ZGvD8zbuqwM3U+'}, {'ip': '200.91.55.61', 'age': 10582, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACl/E3N3V22pRDcSjE9B1iIYTuhPbN8/peuIhaTuAJTw6STYhNbk8rs+KqEswbiVTr+BsMzLZPhuGCAE'}], 'preferences': ['9igTRZpk6b0fcAyLKl9w5b4MQZc=', 'lfLfqx3KFrtcPr51VZ+ZBPB9uUU=']} 
+1188691619.3 SEND_MSG 14.239.168.64 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY83o2aNctjx0rLPfHNaIqQy2YK+0whkhVmUBjljAEanSBmYOZuwPsTx9VKWFhJkYC7buBsJZaEi50O/ 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '210.101.132.42', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '253.105.138.134', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '153.164.105.142', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '92.210.216.92', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '32.49.80.201', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '72.92.49.247', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '205.193.16.123', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691621.0 CONN_DEL 225.112.246.60 7763 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUafIzie9qskP0RVbsINjhBaLWO6JrvPSsZn2M9OAWOeGTukYavJnbXZvLpi0TScXc4LBthuOwKPQzMc overlayswarm:connection lost 
+1188691622.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 6 1 {'q': 'SIMPLE nudism camp ', 'id': '\x10R\x98\xba\x87\x005\xe8\teX\xa6<J\xe6\x0cX\xea\x8bc'} 
+1188691622.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz 0 'd1:ade2:id20:\x10R\x98\xba\x87\x005\xe8\teX\xa6<J\xe6\x0cX\xea\x8bce' 
+1188691623.8 CONN_DEL 195.67.23.11 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVlAG71pFCXmivk2xV8jdD7wd3uSwU0QTMFcROoPANm+W5r4cn0A2rGjgCVVT9/uxwSF9+OQDHVXwZah overlayswarm:connection lost 
+1188691625.4 CONN_DEL 46.225.88.88 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGyvWGrcfZtlQ+bfsmDmC06nVQ1lEDrJRZNCeVthAcDv9TEQVjzyviPu+E1AO9Y0l/gMijPGrlUWN7xt overlayswarm:connection lost 
+1188691625.9 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 6 6 {'q': 'SIMPLE mythbusters drunk ', 'id': '\xe0r}\x11\xdb\x1a&\nQ?\xcc\xa9\xc7\\1U\x9e`\x95\xcb'} 
+1188691625.9 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATpAJUaOgrnPqlqBsN/DFyNFqw5JsSx2sXzDqh4iAAeVEs6iEHtWEg9AgxONvwVSkziPNl+muUuy7toR 0 'd1:ade2:id20:\xe0r}\x11\xdb\x1a&\nQ?\xcc\xa9\xc7\\1U\x9e`\x95\xcbe' 
+1188691625.9 CONN_DEL 183.170.86.126 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB overlayswarm:connection lost 
+1188691626.6 CONN_DEL 156.116.45.142 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcgn9eYmkjIExKMJH/N/XG5NO4rZSgmNgiFqXEUzAYS+rxdD/9giKZciG1aylLzQ8+lJe0bp1Gp+HF0z overlayswarm:connection lost 
+1188691632.3 BUCA_STA 9509  2043 2043 100  1586 12115  53 53  0 7 10 
+1188691632.6 CONN_DEL 62.174.177.50 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQPXhYUDgXymqNjrP8ArGRuSCbf6DpEc0Pupcdv7AbEwJ7nyst2e+ygo1dbe09sO6hRgBA+E7hlAHGD8 overlayswarm:connection lost 
+1188691632.6 CONN_DEL 24.204.207.97 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAV8MyqshTN8Wxy1v265LZ9kqGW6i7vSIxzHUXBKAURWEFlKZI1eyRzEPkp1Vz//gsHNf0e5kv0e6+nb overlayswarm:connection lost 
+1188691632.6 CONN_DEL 186.124.149.229 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARmTFzBvLQidmznT+pHspdnI3KNMeVa/hSKwver8AbZHnQAXyGU67GTXyXucorkkYmJ8/1kNLbopK8qE overlayswarm:connection lost 
+1188691632.8 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX/1T2dVBW4GPes6jUjOcPGeC9f276h4hf8B1umPAdY1I3aE4Qcqlm9A6ZIRfIfLAIiw8/3tLEB/XPRw 6 0 {'q': 'SIMPLE guns roses ', 'id': '\xda\xc9\xeb\xada\x8e\xc1\x85\xe7\xfe\x03\xc2\x1a<}<\xecE\xd2\xeb'} 
+1188691632.8 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX/1T2dVBW4GPes6jUjOcPGeC9f276h4hf8B1umPAdY1I3aE4Qcqlm9A6ZIRfIfLAIiw8/3tLEB/XPRw 0 'd1:ade2:id20:\xda\xc9\xeb\xada\x8e\xc1\x85\xe7\xfe\x03\xc2\x1a<}<\xecE\xd2\xebe' 
+1188691633.1 CONN_DEL 129.14.244.109 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQU7TpKlXrk33moLRetYsEFY1eXUS7C5X1nAoniWAfPATM6trYS6tkiiaKzXwKfNc/DxZVqjVax7pJ8X overlayswarm:connection lost 
+1188691641.4 CONN_ADD 241.180.33.115 25456 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIVlP6RU+MGlzhmZA4/XvRQR6P+UCaRij18zmbEAQq3yQ5Maw4auxmSKhIMdJHCpMlxErBM/vGbNS0Z 2 
+1188691645.7 RECV_MSG 96.240.226.114 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSyl1+6YL8owBx6skLSnkH3IAUa2OrgXyoXo1ZgADW/JjLfyEgxQw9r6QgLxFIsSfQ9qu+I9E9K+Ti2 2 PASSIVE_BC {'name': 'tay', 'taste buddies': [{'ip': '154.17.199.101', 'age': 81349, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbUhgVAKnkPtoWVqvv/uW6cedY6qPjktROjHCnEEAHzomba4JivwY/z9GRu2nJrflfhow0jKM/nMrHYp', 'port': 7762, 'preferences': ['ojLPjIksAiA74q0Hx8jKtieccWs=', 'Gz71Urr+AsGkpeg8mx/H61UnNlE=', 'qfIoGQN55QmPyXJnHwrqT4Ww120=']}, {'ip': '175.159.113.240', 'age': 81349, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAez6hic08tdrV8I264KwSP7IktF3QO9URxMAs2lPAMsyir5ItZfj+bHxnLcJU4Y4FZOv7b85RXdrPIPI', 'port': 6881, 'preferences': ['FOyXek/csbKbkKRFXW5/hBDCMWY=', '8BUWcGnB3aKXvh6XM/ms8k2egYs=', 'UXS6Ykwsxd6g3FvmiKvaYfziTlc=', 'ara+K7RcY3RhtU/VdFv3f+iJMv4=', 'CPzBmAshCqYldDMuD/pLuEN1k5E=']}, {'ip': '152.157.168.94', 'age': 81349, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAap3f1eJEm0yBzAjBTF3/tshIbSM9gOOuA1QLz5AAKnV+l38yaMEgm9IfvOic28NFJAYjnN8g4fDQzl9', 'port': 7762, 'preferences': ['/lg5dpbJlDKFLaFmlJmXN1wDjyA=', 'n68qQco2KluW8YTNRrtV+D/jpbQ=', 'L6UCpT4nBy5Eq2M5/Hl4lIzsYLA=', '8vtbWf+xjiQYqzCF46yVjr9JEDY=', 'fTa0KJKmRNlvxJzZAT5E1MKhuts=']}], 'random peers': [{'ip': '38.54.180.86', 'age': 9246, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACKlZDfKcsXqCSXVYxPWAxUwh2ojqEqhGJZHQWjuAGncRbIjJAlrf23YN0wXTWBjOrO2kIU28r1sU1YL'}, {'ip': '146.174.4.5', 'age': 81350, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACQkfejZ0TwBBPJ6ACzFHFyZz94x/9Uj4uD/fuF5ATMYlYmmHtnltjf0UAQbb4s6QcxvyHo/CaNS5C99'}, {'ip': '113.10.11.116', 'age': 604, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdyjuz7nHIxhV2o5pJSAlvrgK//881C6CC1cmAyeAOJ9J3ehcYz93hYUsWuKlFRA85ei5Q08TU25ohGW'}, {'ip': '209.128.104.237', 'age': 5264, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe5pP+TgoFn06YMLXb6c+DRR3A+WxPkzrX0zlDSvAYSH+0GwU0Heuu36lSrulk/GMRDAUpW9K66LwLrE'}, {'ip': '127.23.94.120', 'age': 9366, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf3D540LL2qZPiT1QOT83uqPlTy05xiZX4g4XFPMAbMmY9FIXXEP5hIH1VCDuE7dqiQ4MpC8SOgkFGp/'}, {'ip': '69.154.219.170', 'age': 10847, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY76LW/1wExITRLnZiBSe5QqSikJVPGC2GO1/Vf4ALXguAPMa/ZKeaO2PRoY2H3sH25tHhz6p25KYl85'}, {'ip': '173.201.167.29', 'age': 164, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVSMe8vxwF9Rhi2l4cd1TA3fm6BviDz8GspgeRP8AAEig9fyh37RriYDMOoxhVvGfKDTqLsrvlnTOmBc'}, {'ip': '206.141.93.2', 'age': 7233, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAO2xz/n3IgFjoO7nMeriYE8JQBJnqWUxylXhA05DANhjHR30xpH7iMhECwVSI/5vBA5HNbI4rxd3RI8v'}, {'ip': '224.249.213.102', 'age': 1871, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHUUh32e/WOPiRXnDXq2dPXqDPOIkyJ0WoYmF4ncAQlQu0wyz8jRuaG35Pm9bxEG+qxPDslbbt36Kaqa'}, {'ip': '161.168.161.193', 'age': 7067, 'port': 49532, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEDTwGVhPz2UvfwHfGsd+zvOE/QGn3+gNia7YWehAQs9cfDgwMQZ9udUpW54e1vyn9F8yARLROPs+d5u'}], 'preferences': ['Bpv5qrTyBF3Ffa3f1bn2XqMIGE8=', 'RtExNaLeVrlu00rGfRAt6JKpE9s=', 'ciPCqDG/O3YVqF+mCTRgo4lQuig=', 'uYWsJWp7Y88PoMtaQ2sl2Qn0h4Q=', 'Qrrh0YLYtRIB9rJ2SCajSzoiJeo=', 'UXS6Ykwsxd6g3FvmiKvaYfziTlc=', 'BgLgxWfQ0Xl3U1XkWQGqswQCYs0=', 'UEQC/aX+4bf0wJ9wMEBWYJcd+wI=', '10k1ZpzYX6CmXHQn1zmx/csAUKs=', 'n68qQco2KluW8YTNRrtV+D/jpbQ=', 'k+rZd3YOniXmwtgLbUmrqahhkiY=', 'j1SltHohro1OILITdRyHF7A673o=', 'fdXJ5gTgVcQm7hIi0SCo5lYQoog=', 'xk0hqaWLdB/0tXll1GroO7NI1zk=', 'CbOrdP0fywhrJRYAhgK0+MCOYyI=', 'XMGN/D/74LLC21l2+oixZAhsm1Q=', 'eeW59gXLR8CrrsA2vk5T7rs8Iws=', 'jT1LarAS22fF3uGyux36mahsmRQ=', 'QwktbGG4JM56J3Jql9Vt4r81fM4=', 'qfIoGQN55QmPyXJnHwrqT4Ww120=', 'aR/N4MgwvXPv+ZUFfM443k7PP34=', 'Yk7tT2otVkXVa8pqHbOS7kssDu8=', '6NE+7ofcCs+dv34YCtJBWT5Rs0E=']} 
+1188691645.7 SEND_MSG 243.161.45.170 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbSyl1+6YL8owBx6skLSnkH3IAUa2OrgXyoXo1ZgADW/JjLfyEgxQw9r6QgLxFIsSfQ9qu+I9E9K+Ti2 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '132.248.226.86', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '48.123.177.25', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '237.157.8.165', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '149.81.162.51', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '217.6.119.55', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '10.3.82.94', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '237.49.95.25', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691646.3 CONN_DEL 58.46.111.226 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAefabVLoM/fghtRi3VgTmXP7pFo/5NtunUVI/H+lADWKS5v3IEUyPeq7Erj+W7O2W/pd/3D7Dm/VuAZ9 overlayswarm:connection lost 
+1188691648.1 CONN_ADD 135.134.167.52 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYMxQHYbUhfyiO37gmv8GfiZM2dnSALMTcPLsDOTANBd5AuKyQUa+Phz5BVdu41rXb+lDgH1UPpXsi1S 2 
+1188691648.1 RECV_MSG 24.6.71.74 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYMxQHYbUhfyiO37gmv8GfiZM2dnSALMTcPLsDOTANBd5AuKyQUa+Phz5BVdu41rXb+lDgH1UPpXsi1S 2 PASSIVE_BC {'name': '394a376ad21248c', 'taste buddies': [{'ip': '232.40.190.105', 'age': 1031, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf6gOSE2js+HPOG33c9tn/WP7+F/a0BJeEVWGmF1ABugcVPbltse6zxC7V/bttMZM8E1m4a7uJRSr9o1', 'port': 6881, 'preferences': ['I9ZKIsiyHQWC/gvZuqcEfn5eDWM=', 'zA2B3YJX3qRFHSnBngzfPVXkiCs=', '5HfN+xTfHCX4L+XxiYLH91HvRaM=', 'vOMm0qwkhH9EyejGk/G7i6XiF1w=', 'goh4jLdr5VH9Lh+xvveLuh2iDyo=', 'iRcreDKum4sSzHbImchtOW4iKPA=', 'Imbre9VEBWcA3mHIimBss3SKVD8=', '/M/Oj+K8x3aBHw25zNb+Yf2zA4k=', 'SOZakjanRZIUqQxmOqIPlgI8RK4=', '9eEcLFi7TtSYHXEIrOFRBe1dCIQ=']}, {'ip': '65.136.68.113', 'age': 489, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf214D+kZDAHVW0Ftof2QedA2+DRLF5R1n4dc5zyAM9AkzvKH2ykmHu+9Zbpks82dFEoAVucrsXU1t1Q', 'port': 6881, 'preferences': ['R+5wk1xcl/KWwQn0gm9WG1Ietb4=', 'kYl3S71swRf7Qf2emdU3wzkV1UA=', '6JC8dmcio/QPWWYVmSKEfbI0kMw=', 'F8YfxgzqKHALFdO3osTd3/cfu/E=', '3e+TKmoK/Vh+6jztuN5R45FBQ/Y=', 'wj/+o8oDGstoC+l9X0iNBx/rCGA=', 'iLNyoDMQCgqLA5Obe95pCYgh2n8=', 'Lz4H68pfCtI0kABPunVaLZ5oRS8=', 'cZdtVoZIRqz5NdktoAANY65cVxM=', 'LMUirDQ0zmLmZQwKZ+icotx8jWw=']}, {'ip': '210.109.35.163', 'age': 5808, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyoXUE3vMFY7hLlvilbjrmXvPhypwmCeQnIeZiQADLizv7r8IlquXV8OoPXDaO6Z0gBjLTgWgFEpKTn', 'port': 6881, 'preferences': ['liQh0gMNL7kqV3++aEFIjU9Zjgc=']}], 'random peers': [{'ip': '60.80.240.233', 'age': 16926, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYpbHPXxX8q6x5S75pU+o2ngECKLQWCQve+GRWJhAe7IZE1+9CtvJ7coOZvWT6GCrB9fXbuZAwdXsDR/'}, {'ip': '254.252.131.105', 'age': 2015, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdVXYqcfHeTctwsEphRP7vyp9l4JAO69bd3ssoA0AKwaqpAqG9okkCKlhW3GnAkY09tIvSU/yW/7ltud'}, {'ip': '76.150.189.139', 'age': 621, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPyFMVgJ0OOOKLahblslUhTUsuCbCr1QfqMIe9bDAZJuOlHOESm/RPWqFJWEWikIC+MgjzwKnferMEso'}, {'ip': '162.189.244.192', 'age': 2206, 'port': 54545, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl'}, {'ip': '82.91.94.251', 'age': 1028, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAS3vtpdZIcQ4ctZ+lI6ggWBSc8fdYXWMVQfjUEMTAduW5+WKar2u5o4i8vTRP1l/w43Nl1U8UNMD7bGj'}, {'ip': '104.244.190.224', 'age': 6520, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKQMsYVk3ngJq2eCLHOFrkaFyyV9ofamwhRS9fOcABW4yq5AOxuvOOVLYhA0BCQ1yMsisEH3UoUONKJ9'}, {'ip': '182.28.133.179', 'age': 1717, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABvk3rSgcX0QL2F0IYQv0PsHy7Ux3L12ReGOAhSEAJZBw5sEo121y23Et6vKsAmsy9N/ttSZQQmzweFy'}, {'ip': '75.117.247.172', 'age': 13667, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADY6NJhG3wWJ3Q67IvpBzxXndAGSLYub7+ErC6JqAWUA1z/3YnGgKzdgB2e2HW88i6keKUW0yqHzc5jU'}, {'ip': '85.147.251.190', 'age': 13458, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbnyikPsiB7d99RA+gCwdZdzx+nzKlUwOQHTyM3bAbCu23yucTKoEct2NDMsGJlDNjQzIa3AboWLcqPm'}, {'ip': '255.4.56.52', 'age': 1216, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYTY2kk0UBQtoOAH+gkSfafcjVnfI/Q+Co0qzamvAPJS+g9nAb5PaK7ZOMTdbEqzbVR0OEvUqsX9H2ot'}], 'preferences': ['KYB+B1LJ7H+f5wJ+/GsHMmQv9+A=']} 
+1188691648.2 SEND_MSG 163.238.85.4 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYMxQHYbUhfyiO37gmv8GfiZM2dnSALMTcPLsDOTANBd5AuKyQUa+Phz5BVdu41rXb+lDgH1UPpXsi1S 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '248.43.15.168', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '135.108.31.150', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '61.60.223.254', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '197.222.212.3', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '94.108.62.88', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '179.55.185.130', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '231.178.129.112', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691648.6 CONN_DEL 100.210.231.121 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASbsn+HFLsP/zArB48U7IwrYkEpl5RDyuaja0rXCAefyD+89vM2/0oYIdeWIIg22layjXn2qN76RVd6H overlayswarm:connection lost 
+1188691649.2 CONN_DEL 40.17.3.208 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFHX07bWUyt7ZmuOZ/FtmkxQ+JqxstkI6hXUSR66AYqrEBe2ydAyKiKHPeZua5+DdLUkCXiip9DyQaZy overlayswarm:connection lost 
+1188691651.3 CONN_DEL 166.62.104.83 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU509SdIMeqHqYGqlIn6rRasJnjAj2krGixVEM11ABR0xV88ZJIGUXW4ALfpwMCBu63O27xhsvm6AkkH overlayswarm:local close 
+1188691651.9 CONN_DEL 251.85.40.29 25456 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIVlP6RU+MGlzhmZA4/XvRQR6P+UCaRij18zmbEAQq3yQ5Maw4auxmSKhIMdJHCpMlxErBM/vGbNS0Z overlayswarm:connection lost 
+1188691653.7 CONN_DEL 2.133.142.49 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACmQbR3WQrGc3EF6kbVZa1gRKelPtT2eexDEUfVwAZgn2sXwZXxhoEYheYQMX3TimnaYLzmBfxpDaHAx overlayswarm:connection lost 
+1188691663.2 CONN_ADD 5.137.200.131 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI 6 
+1188691663.8 RECV_MSG 172.25.92.84 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI 6 PASSIVE_BC {'preferences': ['+7a4b2R0raRJ6+AWM6OlFlaZA4A=', '2AoqrKv0ViKBW47J7GvXblXRap4='], 'ndls': 2, 'taste buddies': [{'ip': '185.154.175.40', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABMGq8tufLavvUvtviS5ZjpYUc6aXkbLFLBl2b1eABiFyMW/elxNXBOOJWRCw2SUZwi2OyIInxBG9jVN', 'port': 6881, 'similarity': 431}, {'ip': '171.86.37.230', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAasSiWmjtEQZzoq2Gps6ksigoU+NBE7zgSKCDkgfARoGe3e41eIEoGLEE+knnqEPYCTSZRYEqtDKBO5i', 'port': 6881, 'similarity': 216}, {'ip': '147.88.193.116', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdk9/MxBv3/VLk6Yac5oKmqQmHpha1fXfZBitY7MAA+nNLyx9ET8B6IvYZmqiaBJ4o+codVvIht6MHH5', 'port': 6881, 'similarity': 92}, {'ip': '165.179.220.123', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXQ2RSuNJmxtKty38HbL7FvNRRYHt9QZYp9BMEujATy2BmlmLLix1dqgJHv50cRHKAv42l9t2dop3QoJ', 'port': 6881, 'similarity': 79}, {'similarity': 23, 'ip': '6.34.193.132', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM8NoNDi9KBQrHx7CcC++KMGgcH98yVf6EIbPlvGAWIPvkgvcxadhrBILahZrptxCZAX70eRbCfbTn/w', 'oversion': 6, 'nfiles': 183, 'port': 7764}, {'similarity': 15, 'ip': '173.166.1.128', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFQpmOfnHdzyjwxeD2Ttl6MMWgw2UdFezyRA710MAHp0kp83ov7y9omqPYC+SN25z5NcnqbzogNSTBjI', 'oversion': 6, 'nfiles': 295, 'port': 7764}], 'connectable': 0, 'npeers': 1750, 'random peers': [{'similarity': 8, 'ip': '140.62.211.178', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ75ii63ba1zGvHd8Am1hLujksc5yc575R4T6I1RAbNKjt162TB/24WyzXTjqVyCHglFbBlKX6ToEuNl', 'oversion': 6, 'nfiles': 266, 'port': 7763}, {'similarity': 8, 'ip': '56.125.235.17', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARXE5pzUbygCGSKSWw/CwU+YVk/DtdghMV2mN/KgAVepoPLdlEeJMsmyaMAQ8bDDSSN3S5I5H2Gvy9HD', 'oversion': 6, 'nfiles': 243, 'port': 7763}, {'ip': '192.118.106.114', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOCdNvgsa7J6sRgq//Rvh3w9WDKfIjVs5xuxfa4GAFO+2HUkDELPnUA+xCz3GHqNsONSINtVHbUJQjb9', 'port': 7764, 'similarity': 0}, {'ip': '241.135.3.62', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'port': 7773, 'similarity': 0}, {'similarity': 0, 'ip': '143.254.203.120', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZnYqLnt6+xlmselW2zW0ArOjQBauEaIbOdTpilbAXosRb4GKDWEDkPfr1R5/iDYOtsdB1YCZJOMhRsz', 'oversion': 6, 'nfiles': 418, 'port': 7764}, {'ip': '77.152.44.154', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATOEUEVq32E+s2GC1uUd93oKUWM6W5zGwvArUBOuAO4gPMsKy1wUAOkl9TNDH2YFaQEPBnLydiAsb/2d', 'port': 7770, 'similarity': 0}, {'ip': '4.133.144.29', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACU+VZoqIN1q1EK8zo6qtmIJidddAJwr85benj5IAHYAy39++m8Y5EFEWOqAVYDC+7QBKKanFJ9SDPni', 'port': 7774, 'similarity': 0}], 'collected torrents': ['g\x89\xc9r\xac\xd6\x96\x1f\xc9U\xf0y\xc1\xb1`\x9c\xe5\xb7~]', '\xacV\xdb\xd3\x95\x1f\xe7\x14\x92)i\x99\x1f\xea\xab\x0b\x8b:w\xfe', 'A\xfc87\xef\r\n\x88?ew\xdd\xc4\x98\xe3Pj\x1e\x9f\xdf', '\x7f-\xf2ff\x8ci\xb6\x82\x87\xecTY\xd6\xf5\xfe\x150\xbf\x1e', '\x86\xcah\x83;>s\xcbQ\xf2cxH\x14!_\x1d4c$', ';\x9f\xafe\xcc\xce\xe5\xdf\xcd\xcdE\xad+\x0b\x9c:\x19\xf2\xbc\xa4', '\x03\x08\x1f\xd1\xe9\x19\x1c\x05\xbe\x1f\xe6u\xacr\x84>\x7fA\xde\n', '\xc1\x01\x80\x10\x07\x1f\xdem\x17p\t\xfc\xd7\x85\x8en\x91?V\x83', '\x90\x1c\x9cDa\x17)=\x9bg\x85\xcb\xb9\xf3\xb8\x9f\xb5\xa3\x05h', '\xe7"p\xa3\xb6k\xc0&\xe0w\x15 Fx\x9b>\x95\x99\x1b\xd5', '\xe6\x08\x8a\x85\x18\xe1\x1c[\xca\xf5\x7f\x16\xbd\xc4\xd6\xf4\x83\xad~t', 'P\x02\x10\xdd\x84;\xb5\xc8A\xe8\x04\x18\xac\x8d!\x943\x97|\x97', '\x1bq\xba\xd2b\x8c\xab\xc6\xf3dlu\x13{\x95GzJ>\x7f', '\x10 \xde \x1a\xa2\xd9%\xdd\xdbR9\x1f@\rS_.\x19_', '\x97\x8c\xb9C\xa8\xebu>\xc5Cg\xd2:\x17\xcdq\x94\xeb!\x84', '\x9f\xaf*A\xca6*[\x96\xf1\x84\xcdF\xbbU\xf8?\xe3\xa5\xb4', 'x\xe5\xeb<\xcf9\xc7\xefT9\nj\xbe-\x11#r\xc3\xf2\x12', '\xeeX\xacN\x1c\xc0\x1esq\x9b1\xfa\x8d/\x05\xbb\xab\x05\xa5\xf9', '\x9a@\xe5\x18\x13v/~\xee\xb8\x1e\x9c\xea\xe1\x07\x99$\xc1\xf1\xdf', 'wQ\xe6Z\x0f\x05\xf9d\xe3#\x0eBZ\xd7~\xa3\x91^\x91\xe5', '\xb0\xd5k\x955\xf2\x1d\xf4\xfcn\xdf\xaat\x00^]\xd4\x9e\xd6\xef', '\xfd\x92\xe4\x0b\xc8\xf6A\xf3n\xfe\xc5D\xbb\xb1\xd1\n\x9d\xd7\x1c\xbc', '\x92H\xe1\x1eT*<6\xdb\x8c\x1e\xd2\xbd\xb1<U\xa4R\xda\x11', '\xd8\xb9\xb1\x89\x8d\x01\x0ea!\xac%\xc3\xfa\xc6A\x00z>\xd1\xf5', '\xc0\xf5A\xde6Db<\x86\r\xd6R\xcfDxc\x15\xd51\x16', '\xd9\xb3\x03\xb9\xdf>js\xcd\x17\xd4<s\xd8@\r\x9f{\xac\t', '^\x85\x850\x8d-|\xc9\xed\xa9\xff\nc \xd6\xba\x80Q\xe3e', 'N:(\xfb\xaa{]\x9f\xa3\xe1[\x8fr\xac\xab\xd1\rN\x16\x83', '\x9e\xfd\xc5l\xd5P\xf7R\x16;\xbc\xd3vO=Wl??s', "e\xcc\xd2\x8f'\xb6M\xfb\xf0\xa8\x03fX\xce\xf9%\xd8\xefN\xfc", '\xee\x1c\xbfM\x1a\x17pz\xe5\x14\x02\x83\xcf\x11)\x18J\xeb\xc7\xa7', 'a\xb7\xd6/\xc7\x07\xcc\xc3Zi(3m\x85G\xc7bw \x89', '\xf4\t`\x873\x81\xd79\xbe\xd1v\x7f\x85\x83{\x93\xd7X\xf9\xe1', '\xd0\x82\x1a\x9aQv&u\x02\xd4\xe7\xdcC\xc3,G\rY\x8c\x1c', 'I"\xe1=\x05Zv\xed\x9b:9\xa4\xcf\x12\xd4\xcc\x8b(\xf1\xe8', '\x96\xca\xd1d\x84\xfc\xf7\xeej\xd5\x157|g\x1e\xbfB\xc7\xcfD', '\x81\xd5x75\x93H\xba\x87o\xc2S\x89j\xb3+L\xc7I\xce', '8\xda\xcb\x99\r,\x8f\xfd\x1a\xf3\xf5m\xc0F\xb2\x1a4\xb0i6', '\xf0\xc7_\xf0\xee \x7f\xc3\xe6\xf2\xa5\xc8\xfb\xe61]t\x1fU\xae', '\xa0O\x9b\xe2K\x90`\xa12)\xcf7\xd7,s\x10\xc2\xa7z^', '\xec\xf9\x9a\xe7`V\xd1\xcbX\xeb4A\xa5\x19\xaf<y\x8f$2', '\x17\xe2Y`\xb2\xfa\xcf\xee\x87i\xe1\xfd\x00\x97\xa0\xf9\x98\x84\xe8\x10', '\x8c\x190/\x01\xbe/\t*\r=\x04\xbf\x13\xdf\xfc\x03&\xeep', '0\xf7\xad\xe7\x95\xe9\t\xeb\xdb\xc1\x11\x1e9\x19@ipUb\x17', '\x8a\xe3\xb2\xe2\xdc7\x95\x01\x9ep$\xef:\t\xbc\xc0U]\xa7\x14', '\xc77\x8f\xb0\x04bM"\x00\xd5\x8fIG\x826\xf2\xdc/\x1fz', 'RM[\xf7\xf1\xbeXG\xccU\xe8\xd7\xe6\x9d\xab\xf5E4\x0f\x9c', '\x98\xf2*\x12\xb9\xbc1\xe5> \xd8\xb1\xe5<Kc\x1c\xbap\xb0', 'q\x97mV\x86HF\xac\xf95\xd9-\xa0\x00\rc\xae\\W\x13', '\xa8\xcf\xe1J8\xe0\x06|\xb62\xf0|\xf75\x96C\x9c\x86\xb3\x05'], 'nfiles': 220, 'name': 'Richards-Macbook-Pro.local'} 
+1188691663.8 SEND_MSG 200.105.249.183 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2074, 'random peers': [{'ip': '58.122.119.14', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '19.71.252.162', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '38.176.178.46', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '209.252.4.52', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '94.228.65.1', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '104.2.50.42', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '100.19.239.98', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '59.133.105.24', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZa2rtfRshNUkCalS+BQqyC2TwS+JLOLI45JrJBgAEmj9qaZ41vom2nWVHFxyUgw97oKUW7jV5h8bakc', 'port': 7769, 'similarity': 0}, {'ip': '73.34.203.222', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARXE5pzUbygCGSKSWw/CwU+YVk/DtdghMV2mN/KgAVepoPLdlEeJMsmyaMAQ8bDDSSN3S5I5H2Gvy9HD', 'port': 7763, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691665.4 CONN_ADD 155.192.131.109 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeoiKe9jBhBpyCm3kI0zTyoJvASR3VbRTyaOEgtkAOWlcVYazntSLSIvNtIN7JdyMt77LuZA0CfkaJyA 2 
+1188691666.7 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKgm00seD977usBtMzTjsn0jeAJexqFnAzYImOejAZ2WMvOygsRYvi8gnUmtiPN4utntmADnhkHf9r+k 6 0 {'q': 'SIMPLE scrubs ', 'id': '\xba\xb7\x17\x1b\xd8\x00\xc6\xc1\xa9J<\xc63\x0e:\xbb,\x88\x9f\xc1'} 
+1188691666.7 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKgm00seD977usBtMzTjsn0jeAJexqFnAzYImOejAZ2WMvOygsRYvi8gnUmtiPN4utntmADnhkHf9r+k 0 'd1:ade2:id20:\xba\xb7\x17\x1b\xd8\x00\xc6\xc1\xa9J<\xc63\x0e:\xbb,\x88\x9f\xc1e' 
+1188691671.0 CONN_DEL 135.96.89.223 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHjTifzjoY5XlNZyaMJQS+xAICdHUQ8xytBqUDOJAXWg1Uvj+Kz045dE2r100hBDqhedaPqYTKV7Sara overlayswarm:connection lost 
+1188691677.0 CONN_ADD 148.245.100.133 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXf+XGxwWHODklf3TPi800j7k/YXDLLC+qaU9CuIAV2E4jhbbsrcMhGR4a49bQJwv10L1QQz+8Ulc4DF 2 
+1188691677.0 RECV_MSG 23.90.23.197 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXf+XGxwWHODklf3TPi800j7k/YXDLLC+qaU9CuIAV2E4jhbbsrcMhGR4a49bQJwv10L1QQz+8Ulc4DF 2 PASSIVE_BC {'name': 'CABYRDIV', 'taste buddies': [{'ip': '242.142.22.158', 'age': 5373, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQjlRZC9tbyIgSekP81Ko/AthJ/L/RK9m39PclUYAPk785HpWoKcKJYO/LsqdjbT5GMRxixRGN2K6nAG', 'port': 7773, 'preferences': ['l6hcnQcHx2K0FiTkw4eXbpPMFOw=']}], 'random peers': [{'ip': '108.172.76.228', 'age': 16156, 'port': 13023, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFwQm6Kz3pd0LkUPM04HWtw7ZxlyErFOwNsfL97cAS4A2GJ4wJjLsNJkHswEBwEz9WfnBZLus6+11SlG'}, {'ip': '204.159.32.52', 'age': 7561, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOMVrwNyA8GZ8K5DzGxuqbedeuAU0HjxxoODtYbRAWhHuVPU5QDSnwTQumTkHm1AesW4Uyos8tOvSrEe'}, {'ip': '85.200.48.181', 'age': 452, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXfbopVjlrJ/h97LKKI7vx3TPNtGFZeeAVq03TxjAde1Wd8lQgYrkvP8oEOgvhXzYfCDE8B+/NF6nxOX'}, {'ip': '57.144.15.155', 'age': 4262, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdBKMDxvRAx7KWkjpCYz15zGpHDV7wFVvb7yMMUYALwGT80kd1Nu639WfYg0peaqsXZtrTqiqp/SZTx4'}, {'ip': '81.135.179.177', 'age': 319, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeBYMcGakBzO8/uuVdfP4rRTyqerRBDthAaer6DJASmdeSBMo4rhRLqJGdrEp/y7NRbXl5MZthbUt4+k'}, {'ip': '153.48.136.136', 'age': 356, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAS5ejIhiZLAIbeXPLP5TfHct+6aAwSBKfYq+qvkjAeyhHWAc2ijOZjLv+0ksFS1RVdxI081P8rKLBESV'}, {'ip': '156.85.212.0', 'age': 11559, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFTI4VImgg2T8SXnFqTppEbIx9qdnY9J1QbPhr3+AYROo7pkoFrTiEf4MpbYDIkVB+7Yy9gD93O9SqcN'}, {'ip': '131.141.185.16', 'age': 9586, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXFhjTTlNOeEl9g1Rhpp2tVlegUkA3g+BXBgKf7PASXHa7w5Hb09V6YMC5DCE2R1n2y3QRr1VAiL/T4k'}, {'ip': '211.157.227.252', 'age': 3312, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZjAdc5/Kug3iaBMTpvUeyc4apDYUUcHRdpMRXrbACcxxGP+BdkyTWIiSgM6vD0cMKEDgJCrzUP3uhxy'}, {'ip': '88.246.62.159', 'age': 2124, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABBo9KJClmZTMxPbfMb7V+pdwI0y8hVfcthgM+3kAWinPHffSweHrxNAhNeoCFa1qUeDOgcuBFpOH4uV'}], 'preferences': ['Y6fbU39H0tBm2+TdWSTCcX0b4KY=', 'oJbcnYKotAL21xha1JPxPHNreuA=', 'nZzf1k3c0TOfVktDZzhj6V9Rkik=', 'Ca0k+3UXh0WcMfasTZa92RR+LjA=', 'x3LAtNo1hG+ngHXWs3mOV6ts2DY=', 'JqnC5SkLj5gWZ5yUQCosJ/ia6qQ=', 'MqQ3DmpuyigPGxwQpayUpPtjqo0=', 'l6hcnQcHx2K0FiTkw4eXbpPMFOw=', 'CtzhhmjHV6GJmsvLwpyUSHSyhJU=', 'cR9uGbfDx33lyT8n3QCIU1rRZuY=', 'ZmgvIR04t5TB/7Cr7/GHEd7+kCU=', 'ffhLZ7mKVUFTWJmexbmcLSszIeI=', 'A3+WmcmCNzDkXUsnURby0QhY3QY=', 'hOQvCFV6LpHBipzelb6VCVRsV/c=', '14ueX+crSHyBQku8psBreUI2LYA=', 'S0fyGuZcYz2VtVESeUZ/5F6beOs=', 'uAlNvodhgfaJK5q1fVfNatXmzUo=', 'VePyzDn7TiZXqGrRjiYgNnxiF10=', 'NuuoFUpS/ZMxuV7/8XV9Rgcahck=', 'lmzzp5NNhYn542arS+bRi9LBl+s=', 'CiTpe7Ca340jZoDdtz6bp13AKuw=', 'UX3kXR/1hY+qrvFhPucjEv035G0=', '1Fn4fsQiO8gLNrDfdqoEDwnQSXU=', 'sknW1KydTZi/kORlsO4IdlX2Y+w=', 'll0hyAKeB5wlYnPdB3AhDBEbyeU=', 'm3RKuKLu/hnFP5sdT/u3V4Nw+Zc=', '0MJh1sO8GK6CxriWCz1kf2r8tKQ=', 'tQdovFjPxcZC4aZThxwSLi9ZodE=', 'gAOFIotuyPFaL7UNeB6XPvBADso=', 'owVocSHghWzLdbgKTsM8xFB7Csw=', 'njLF+H4nryQpQ6MtFq3JDlGRWM4=', 'XAFwIDeX8ykPGaQwSu+qfmkQhM4=', 'MLnnFKqgNQ00/+GtGgLQIHtF8qI=', 'ZZJeoJYpoUcWvfNXT5a01+UWyxc=', 'tSp6FcFtseq2uNzeoNTICazcGyU=']} 
+1188691677.0 SEND_MSG 40.4.109.184 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXf+XGxwWHODklf3TPi800j7k/YXDLLC+qaU9CuIAV2E4jhbbsrcMhGR4a49bQJwv10L1QQz+8Ulc4DF 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '68.41.196.219', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '32.66.7.190', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '90.110.250.98', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '213.21.100.114', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '202.137.102.90', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '134.47.93.221', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '215.212.24.51', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691684.3 CONN_ADD 166.238.146.123 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUEaAkyJjQkigTeU4yutiqWyWxcAl074V47xMc34AT0kPd4eyBitCOlLaK/HqTfcKF9yPDYiOhpxeapu 2 
+1188691684.3 RECV_MSG 163.16.154.117 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUEaAkyJjQkigTeU4yutiqWyWxcAl074V47xMc34AT0kPd4eyBitCOlLaK/HqTfcKF9yPDYiOhpxeapu 2 PASSIVE_BC {'name': 'RED', 'taste buddies': [{'ip': '158.132.19.222', 'age': 249, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN4brEaxqCsNMJjU0KNfxhz7SnCkWJ/sVlPk1VUdAZHy7M/5Fx2RBf/u50IJ5JV6kVuBF5Itfl31ErAJ', 'port': 6881, 'preferences': ['WEUdg/y1zH29Nj4wzRAqf4/zbKY=', 'KQgPeZuoaMVW1/tY2F07WTv95pk=', '7nzgU3er+THxwWs2f4GQoBXdlHk=', '/DIEjMAhcHBgKk3kH+8Wh5bf6tc=', 'iiEcTPdDCxcr6GEOSEaq6qWEKPk=', 'zweVcS/pXR1LfTakqnTmrKkD4XI=', 'K1s3GUQ2W+YWsvbTMnPr9uWMIco=', 'SFjzsXbhoFqcS37KVP+tB0JVFpI=', 'Gz71Urr+AsGkpeg8mx/H61UnNlE=', '0dM1j3SyfbPqURTQkOeFF8xuavM=']}, {'ip': '142.204.194.78', 'age': 4516, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYGBBbr0q4PMofNaXdVuORWeE+60mnVFQEF4ldGDAOvt5ULuQHFTJdZmuHK2kxmdx53VwJ156pMbCl6O', 'port': 8921, 'preferences': ['Gz71Urr+AsGkpeg8mx/H61UnNlE=', 'pBW1lPqidXb6TUtyXlc7LaYcAbc=', 'zhZWWV0Vwfp/rgSTudPJRjkJcnQ=']}, {'ip': '199.3.76.240', 'age': 22186, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM7ifOEil8LgBmwHpojGD7imXbaRR/gB2CcO7CVaAJ+ubd4wP+OdLZ9ABt3Qq9BB3df2sYGGWrFu8riD', 'port': 6881, 'preferences': ['AnvUtfiZYryy4x5ZiMNhb0V2CVE=', 'Fgy/Gg/MyzCPh4kXlRoyeVJlStY=', '7fnYMwdEW+xMbf+AfeSLQ9yZPkQ=']}, {'ip': '112.5.86.193', 'age': 190, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaO5yOZui8qO2bq279/oonTl0BI9Unf5B7sHuAC/AIS5LwKJx4wGowKRZmss30QcEr9TRVHtwAmDHDeM', 'port': 6881, 'preferences': ['wO0qcXnbSe14B2Tvm2GBensZ2/I=', 'K1s3GUQ2W+YWsvbTMnPr9uWMIco=', 'cZdtVoZIRqz5NdktoAANY65cVxM=', 'Hac1PXhVg5w4f3B5nf4zJl1pX68=', 'EaB+ro9U+1wTXNDCeAKqWKyi1rM=', 'ZBGltRBiUf1O0kESNLsTLNNe9wQ=', 'uRt8q1KhdZIdc0Vg5BV3i3reqyg=', 'BanBBUOKAiptQeiNWEbZb7LG7A8=', 'gPgEXINUecfDHaHyBZYJscNv7+8=', '45DV1H0/FzxRqAo53CnV8HyXbic=']}, {'ip': '119.221.64.5', 'age': 22240, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAp1ft+W2IAUKYYRlufThQTi3OpwG20QWXokvrChAeUCE+1gL6kZWyXZzX78v8lWxQ3sN1bZXG0E9Jne', 'port': 6881, 'preferences': ['cZdtVoZIRqz5NdktoAANY65cVxM=', '13nMyp/uOIRmdm3YgFXPS9sL5Bk=', '7Dctqt09CUdqJ/bHTMnJ2IVVByg=', '8Ca7O0k1E/ndWpewEJMqZdko5NM=', '1KfdqRmPDz8EwK8aUa3/jS1FMro=', 'yd3RcU5psMz9LaTGVSi1CXUDQ48=', '7nzgU3er+THxwWs2f4GQoBXdlHk=', '/LGBtHsBNA+tMUHSdqqkhGq0mzs=']}, {'ip': '189.109.101.83', 'age': 22073, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATmJRqTT8GV+hDzMeSOXAaQDzG7TbcO6Y6ywFqwuASjxgg+uaO1C5qyz1RZMgqPzDZWJaQpsWZbUgaXe', 'port': 6881, 'preferences': ['K1s3GUQ2W+YWsvbTMnPr9uWMIco=', 'pBW1lPqidXb6TUtyXlc7LaYcAbc=', 'FuhPGRswUeA0z9xOlw8vgGgul+0=', 'WEUdg/y1zH29Nj4wzRAqf4/zbKY=', 'xxSAIVKzYgvqCTh0OoZL7bVpqZ4=', 'iSUvMLygxxXjaOGhYZLymvb+Lgs=', '7HkuR1MOisd7VNsjCbuVG8XYZd0=', 'cdJM+aHVq99GQ6zTLhp64ej+5xo=', '4536I1xIqRdAKbRh4YnWUY4ZXVw=', 'NTip+fERUn6DYeeNhqa1qIw4zU0=']}], 'random peers': [{'ip': '14.132.168.61', 'age': 22240, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKnbt7SrnKWR5oXvrgAtHKeXDFt7ZaGnJrrMfCitAEiVfi4hkao8XOhdzgEr5dWDDH+r1BsOIFV11dHz'}, {'ip': '134.67.182.78', 'age': 22239, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZIOiDJc1PBOYpuy5RhmuJPdjfL77te9lxtma1u5AA8G+igg0y2tTQAsHZHOyqSVhw1ZKFIYwrmyVc+x'}, {'ip': '185.245.92.230', 'age': 7251, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeY2KjmxicEi7zdP+038/UdrGbQjHmcNHLBppRNsAKxnY8+kukOKTgodnQODBlLZN1YphhkQDd5wGzS+'}, {'ip': '236.67.120.89', 'age': 3310, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf'}, {'ip': '58.99.217.60', 'age': 22238, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfccZg9xBM+EJjJ0YsaSCnP372Hc+ALCx2jFKLGaAKqA0uOGDl9GffkBrisBZruFTT5+EqpvGDboEYu8'}, {'ip': '71.210.143.245', 'age': 22240, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIGwUwjzW68GdxVKo2KAPCVLu/6RdYKKvk489xmWAJokgIcXzoFCHlHEiZ4QZt2BrzATf87P5JxsatEo'}, {'ip': '209.12.168.227', 'age': 22240, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF0UNn2KVawnBo05YGWnqAq0R4in2jlaTqOSVFWDAEWwwrlxqropLObSfu6b9GXnYzygpSB+KcwrWzRm'}, {'ip': '32.5.3.220', 'age': 11708, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKBZb8Ka49PTCPXj1e0IF2SJHxDbmIza7jjljPrtABd7kt0vcwJ6vxKd1t620AXmf1dl21/nndw5s8jX'}, {'ip': '220.140.227.128', 'age': 22238, 'port': 31312, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcgY0iH6O0NCO8Pdzl33llWq8UYzRVg6+MbAkPh9AVPq9obNvxOni0sFuJQa2BJIckf2C+8scmZTL82p'}, {'ip': '78.252.185.45', 'age': 22240, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABsPtJT52Yga/CX3nJAiypPD1kFQXFc7H8AU0WmqAVvDZxGcUdkGkAJkPWm2+XsMr3Ou7+Eioy6ElB8l'}], 'preferences': ['uoxEkp8YXzXI4D/JSihER7qf1mI=', 'cjL2GYpZiLGrqMPZMfE5nrRey+E=', '/XmOYQ3EBsZ45XmLr+hNifwvBIw=', 'JuK/FxhudADOXP8LdA3Ly0xrRMA=', '44zzzuSkuBR8IFyVqBfWB/VdEoI=', 'XIYB7MUObH5Vw2NP5JrLH/d6xcc=', 'c04WuneZNsjtWgfSFStPhgMgUqA=', 'hzXVsSFingdIwuj8y0pku2fv8JM=', '+l3V3bCoYtOBRZD4e78VUS/WoaA=', 'oXJqrwyXAPEi0XPuLVWht72qKXg=', 'fyFyjafT5x9RuNSdUCBPCj9FUCM=', '2+mU7xBM6VNsdGa0wCoXVpcXvjU=', 'cZdtVoZIRqz5NdktoAANY65cVxM=', 'pBW1lPqidXb6TUtyXlc7LaYcAbc=', 'wO0qcXnbSe14B2Tvm2GBensZ2/I=', 'Gz71Urr+AsGkpeg8mx/H61UnNlE=', '7nzgU3er+THxwWs2f4GQoBXdlHk=', '7Dctqt09CUdqJ/bHTMnJ2IVVByg=', 'qA9dndqtE7IOY2Jech4HDyBqTlM=', '9pvG/RqiqEkhP3jyQ+bgzdlaPBw=', 'EhwISdop1zU+13mtVzUeKH2VD4U=', 'wvkbUGy1NPVpAOFh3ZnbVMOa87c=', 'LOr7TEaxHkNXmHsEOvOaCx2fgx4=', 't2d7hfk1LvIVgWq53r9heXFONi4=', 'm4fDMu4snyVeirqXP/bhzNoYyM4=', 'rxgQ4PDf+LxfjDlXmGJX4wssmpM=', 'aNV1X7wLnKsWsfy5mUQVWVdHJm8=', '7fnYMwdEW+xMbf+AfeSLQ9yZPkQ=', 'XOTJp41v/MPyylUNIq+AnR1vElE=', 'xR4TvvPs38SbTwqOCenhYACy9rE=', 'r9g2EhXrJJbPpgvV30CBXIarPmg=', '1RxFqiAZqHKlmpvwEb3tp/nL5rg=', 'Fgy/Gg/MyzCPh4kXlRoyeVJlStY=', 'j8Gg5a6wxt+yuwiI1JseiTBu4FE=', 'neA7pRlTQNxkxh5W7ZxCsEhbT9c=', 'JDHXwihspod8IZnezxi0MHSNw1s=', 'B0blTxIE9bMBsfjUdcXU90HS2fc=', 'b6HRFsUcX5cyLpoZk+GIJUNvcoE=', 'yP5eZvPa7/RlsTqI2hKYsJ+TlEY=', 'cum0zZRfHUEBeuCsm0nnVzT+zrE=', 'yADlpTB8RfmhnnWD2/4yzBd0dOU=', '0O9OgtKBkZb6HA0edKh6DqNhMa0=', 'qHhQi9YnmW72l5ugu7+0S8GZCqw=', 'PC8wGDbh6oiMuVluQnyQEPNRRaU=', 'ANypZ8uWIc3okm5acCDXb8Ptf24=', 'IVzZad6tyxzs9Da7h4UjdlFB+jU=', 'lW0b2urQv5/loq3KoFWwFUi6QQ0=', 'tztSGmZvyVYpMaJ9LwvB4fDyqZ0=', 'SQsqKg5a2a49HTEoDGwl8Baojg0=', 'ivXk74wlwgiYgcGcLMbHTylgg9Y=']} 
+1188691684.4 SEND_MSG 250.124.210.203 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUEaAkyJjQkigTeU4yutiqWyWxcAl074V47xMc34AT0kPd4eyBitCOlLaK/HqTfcKF9yPDYiOhpxeapu 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '111.58.38.44', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '102.191.133.140', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '175.126.100.151', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '214.159.217.228', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '149.185.107.47', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '223.192.168.133', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '136.195.247.166', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691687.8 CONN_DEL 52.149.67.155 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcTt6muAe498hg1yOrcaIvl3s5fMz6ucSWGFS9t0ADRxe1cAZsZ+FQRDefYPTMX3E43Wr2Tq8OZXsE82 overlayswarm:connection lost 
+1188691692.6 BUCA_STA 9510  2091 2091 100  1580 12116  47 47  0 7 10 
+1188691692.6 CONN_DEL 85.120.200.4 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAA3P4PF2JX4KF3qOaxh1DyHtAkrDT2f2XX0ZmF78ABhrShgGJCuipeHzq1XJ3WthdMnG26YVbFEK6N99 overlayswarm:connection lost 
+1188691692.6 CONN_DEL 47.68.176.179 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAajtKN3+NL36YxykteuvSflnWv2g6eduwktkNncCARllVpoa9FshKL7kQf5mbSpzMoQrhHXu26GYB4fH overlayswarm:connection lost 
+1188691692.6 CONN_DEL 52.45.163.145 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWxQG7SUCfv1mqXI6aENBNB9MIA0+2546q6TRnUlAL237nGhMdKSSqfHtmcI4XQbg6LNsCWy+DVO3HSe overlayswarm:connection lost 
+1188691692.6 CONN_DEL 59.5.233.22 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASodEn3niQvuVmg309Y/bk5aWNl/Sn6wqybSK2AwARRfPcZQGXl/KRhVo2oiOMG5r53yCSh0JcuyBztS overlayswarm:connection lost 
+1188691699.1 CONN_DEL 22.92.50.100 7763 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMBsIkOf4NQZefln7iBWXklgrJ68MOULijCqgYj/AdU4Ny3QGcCferCj6Upabgk2fcsrMna0aWEUyqUu overlayswarm:connection lost 
+1188691700.6 CONN_ADD 193.167.222.163 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAb3n0QxdrhjQEH3NkWNDLN6xGd+5Vwb3IcJwH0iGAQ7ynW3z+mV7trV3Emad95kG21ru+11Vn7qLAzFF 6 
+1188691700.8 RECV_MSG 201.1.114.111 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAb3n0QxdrhjQEH3NkWNDLN6xGd+5Vwb3IcJwH0iGAQ7ynW3z+mV7trV3Emad95kG21ru+11Vn7qLAzFF 6 PASSIVE_BC {'preferences': ['e6yY7AKHspXXa8zNFI+tc/zdJto='], 'ndls': 1, 'taste buddies': [], 'connectable': 0, 'npeers': 476, 'random peers': [{'ip': '215.204.16.228', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu', 'port': 7005, 'similarity': 0}, {'ip': '90.49.252.32', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh', 'port': 7003, 'similarity': 0}, {'ip': '3.40.113.71', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'similarity': 0, 'ip': '27.161.39.137', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf78JHEtMZJO4zHsu3DNG6azd+jgg25xY/uqn1aCASC7irRhj6KRObIi2HZsBjT2JcdiZiKrnE5Ym7wU', 'oversion': 6, 'nfiles': 105, 'port': 7764}, {'ip': '118.228.25.179', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'ip': '230.177.47.157', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk', 'port': 7002, 'similarity': 0}, {'similarity': 0, 'ip': '186.151.8.16', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdUY77cjWwiZc2zrAhtmlZmD9vQzfz6YB4LiFK9MAH+vobOmQvqdQ1UY2rImpxhioVSZy4TzvZ7bGUXp', 'oversion': 6, 'nfiles': 291, 'port': 7768}, {'ip': '125.12.98.93', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARi99EIsZRGE+EjUhvN/nRAZ8HFmd4/KBeWZsoQaAZOHgwkv7hnke2vMRtjuL9h1y+V9R4UN9FsVKTXl', 'port': 54545, 'similarity': 0}, {'ip': '191.89.151.149', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}], 'collected torrents': [], 'nfiles': 46, 'name': 'TexasMd'} 
+1188691702.3 SEND_MSG 185.233.241.25 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAb3n0QxdrhjQEH3NkWNDLN6xGd+5Vwb3IcJwH0iGAQ7ynW3z+mV7trV3Emad95kG21ru+11Vn7qLAzFF 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2093, 'random peers': [{'ip': '230.78.127.253', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '138.120.195.135', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '162.196.172.7', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '210.175.238.83', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '148.203.231.181', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '157.248.162.147', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '178.27.253.73', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI', 'ip': '172.165.169.226', 'similarity': 0, 'port': 7764, 'nfiles': 220, 'oversion': 6}, {'ip': '194.17.182.11', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ75ii63ba1zGvHd8Am1hLujksc5yc575R4T6I1RAbNKjt162TB/24WyzXTjqVyCHglFbBlKX6ToEuNl', 'port': 7763, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691708.1 CONN_DEL 222.155.13.130 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAANxpyT+mFV/OUBr8IJ30YCSyYtyMEefJziuPU6oAELxt5z7N4w4qD2m1reRcmmNfYeh3zXJhE586ncu overlayswarm:connection lost 
+1188691708.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 6 4 {'q': 'SIMPLE the ', 'id': '\xbb\xbc\x0eD\xd5O\x10b\xb8\x9e\xa1g\xe2\xce\x99GH\x87n4'} 
+1188691708.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAScbu5ueVlJhhgfvaI/DN07hHTgYLEVylo8RLYfsAFNy8H7OOZEdAF7ZqHrY2vKq0xNDNi76qkz41u7g 0 'd1:ade2:id20:\xbb\xbc\x0eD\xd5O\x10b\xb8\x9e\xa1g\xe2\xce\x99GH\x87n4e' 
+1188691713.1 CONN_ADD 44.132.74.198 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpaAjEvXj0BiPd7NWYb7DCD4osEe5/yw/SNKsZnAE0ks+NCPAmoWMuQ9+SKqUtDL1jAmziAf58o9Z22 6 
+1188691713.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpaAjEvXj0BiPd7NWYb7DCD4osEe5/yw/SNKsZnAE0ks+NCPAmoWMuQ9+SKqUtDL1jAmziAf58o9Z22 6 0 {'q': 'SIMPLE buffett warren ', 'id': '\xb0q:\x02X\xb5w\x05\n\x8c\xc0\xaa\x8a\xf2\xc42\xe4w\x08p'} 
+1188691713.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpaAjEvXj0BiPd7NWYb7DCD4osEe5/yw/SNKsZnAE0ks+NCPAmoWMuQ9+SKqUtDL1jAmziAf58o9Z22 0 'd1:ade2:id20:\xb0q:\x02X\xb5w\x05\n\x8c\xc0\xaa\x8a\xf2\xc42\xe4w\x08pe' 
+1188691713.2 CONN_ADD 141.208.52.65 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY8giS2wU/ZTjB4S9Y3+ChVGqgoBq/ttNOf3lFkwAdSKEjtqjZy+JMhTUAck1T6qeuLb8VkW0xXaZK1I 6 
+1188691713.2 CONN_DEL 159.165.152.255 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY8giS2wU/ZTjB4S9Y3+ChVGqgoBq/ttNOf3lFkwAdSKEjtqjZy+JMhTUAck1T6qeuLb8VkW0xXaZK1I overlayswarm:local close 
+1188691717.9 CONN_ADD 55.112.219.159 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASzD/p7C55GNHw0wgD8MzdbYx8j6cw+Ye9EnC0FUAR0j5qJ1gCy6/8whrSCozCppGXTX4D55f38KURVB 5 
+1188691717.9 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 5 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '&\x98\xeb\xa2\x1abJ\xb5\xa2\xfd\xd6\xc2\xee<\x14\x15\x0bWy\xba'} 
+1188691717.9 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:&\x98\xeb\xa2\x1abJ\xb5\xa2\xfd\xd6\xc2\xee<\x14\x15\x0bWy\xbae' 
+1188691718.9 CONN_DEL 253.13.167.175 7762 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASzD/p7C55GNHw0wgD8MzdbYx8j6cw+Ye9EnC0FUAR0j5qJ1gCy6/8whrSCozCppGXTX4D55f38KURVB overlayswarm:local close 
+1188691719.3 CONN_DEL 151.147.18.191 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAROXlOSXFHSCd4tozK5g3g44rsdGBnfTSc0fWplTAf7G2mMcNLDZLeFijgZNoxdlYxcnLEccgHYWy6JD overlayswarm:connection lost 
+1188691726.2 CONN_ADD 81.209.136.88 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAbO9dJ2pxluv+pit2HXXWCMVWqEEMiAQTTJYmVzASEzNFD+vpOaCqow/RY8/Ah9Ez2foTdqQbhXKhqr 2 
+1188691726.2 RECV_MSG 199.127.13.198 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAbO9dJ2pxluv+pit2HXXWCMVWqEEMiAQTTJYmVzASEzNFD+vpOaCqow/RY8/Ah9Ez2foTdqQbhXKhqr 2 PASSIVE_BC {'name': 'kobi', 'taste buddies': [{'ip': '216.67.253.104', 'age': 557, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf71gntCpx2LMTulL9fykMcXp1dXcSUAguc4h5JsAG7eawqrzDphpDHTkSjOo5qLq5nQmg2T8eAw56ZE', 'port': 6881, 'preferences': ['AWZW8pfTJE5IXxBGfZHPtlcNOOM=', 'futmHX6GoSOqLt8Eo76D2fwPk4o=', 'B0UAlbav+Mh8a/zPQ0Et+mzn69Q=', 'plNcaPlCPor+20lwQujbSesQ+N4=', 'hzXVsSFingdIwuj8y0pku2fv8JM=', 'pdvrW+EVI6CRU7ScvFACR+MC2YY=', 'fk6tley2rd1GoKGXL+xRbLHRHG0=', '84MKV+jE5HvVcOpke3zFtzG8eeo=', 'rclRMuhNcj2S3PjuCtHWNUh/7PE=', 'RnhicLGpdRSOSRtJN5t7dxIxfeY=']}], 'random peers': [{'ip': '69.146.80.236', 'age': 195, 'port': 65535, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMFEzOs9poNUO4QKobqkg0S1NddVXqoRaOglxn4bALe/2f1qrYH+VYrGBzlcAyPDZmxsDLN+tySY7KGP'}, {'ip': '116.138.101.99', 'age': 557, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEQLsnfUPXb46YWVFmX4RYDxZlBe+ejjKBUU4jNjAfEZJMHmG2FyhvvdKNlZ3TzWgIAF51nhAh6/vBgp'}, {'ip': '62.128.203.140', 'age': 557, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdY/PvsP+EtbJykQzr43mEPVInNytYNqJ4G5bXZWAQljeGd0DiIN6Cj7zDuM/BkYxtghOfh+9Nrw+Ndw'}, {'ip': '119.212.167.211', 'age': 557, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcI6HGEIFmyi8BGz8cRIkCg7ChuIcHEFVIqPjqukAbVx3uNTzv334nVb0+WQaQfd9q5M43nbpFpDdY7q'}, {'ip': '108.176.70.158', 'age': 557, 'port': 12544, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFX0z3LydlU1QYwhMhKaKdKOckfGJSDQoUZY3JQ3APfkQ9eUm07y6drTixug+qEOmpsPxoKdD/cl0Acf'}, {'ip': '42.8.232.39', 'age': 557, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF94NDBRjgFfpdeBJFUbBAvi6P8cMyPx93l1Fkc7AArvIvFre29PP0AwA1ZRiM2jkJ6QfXVHcsmBQ8Jy'}, {'ip': '13.58.123.208', 'age': 557, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJTP1dqwfEZgaYG9Yqhaj55sqC+4HCTALsVKl7s1AbUh7fSxAqZj5KakNyaXFZKJ8wtquIieIxiq3kjw'}, {'ip': '49.145.243.171', 'age': 557, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI2MDz76NqaJQGQJmseJJmy5TJpnVqD+elLA+ni+ACdOGXuf+pnbcoyVqpdQRXKduyRxyiurlmh5jUNx'}, {'ip': '150.66.106.44', 'age': 557, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY6FgedeRxv+/zPgDpNqHNa4dNn6G5bFhCvuhYXIADVIE7DO4xTOk+D27Y07rVT+F7Ciq2FOxDM2q/Jq'}, {'ip': '209.113.91.166', 'age': 557, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADtRtGwkGoF36FDEZHiZJ61RcO0Yvobqtl+v43qwACO+D96SUNS8tKHcY1SN3VFzS+wQRtk3rZf6Sj10'}], 'preferences': ['6f4VQfJCv5bX9DZcq8rvTRC193M=', '0uk1EeiKBb9NtQBo4ZOnZiQUmWg=', 'lREokQ5RfqwYvcTxaiuGED/v1bo=', 'Ii9dunoxxfSoNfYoRuNbFGReRtw=', 'gSAQz1jqYzcNG9zk+KaaKZxGrdo=', 'Yyjn/9NxNGCat801uy5YSAwzORk=', 'zQVRBf4LWosTgoAxb1LU9GegW7k=']} 
+1188691726.2 SEND_MSG 211.138.194.66 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAbO9dJ2pxluv+pit2HXXWCMVWqEEMiAQTTJYmVzASEzNFD+vpOaCqow/RY8/Ah9Ez2foTdqQbhXKhqr 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '62.27.41.75', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '178.162.120.235', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '85.29.85.37', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '63.87.221.101', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '33.136.77.182', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '58.23.29.114', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '22.212.107.17', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691728.9 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 6 6 {'q': 'SIMPLE search files, youtube and liveleak ', 'id': '\x0e\xb3E\x19O,\x12\x06\xc8\x13\xf8h\xb8\x192`\xd3x\x91$'} 
+1188691728.9 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQDV06LyjyMUlfG8JShnZN9hLntgt0Xm4w/6tslrAUAUCpgIaoWQy3zl5xmBcUC2mefN7jTCNEgddE+u 0 'd1:ade2:id20:\x0e\xb3E\x19O,\x12\x06\xc8\x13\xf8h\xb8\x192`\xd3x\x91$e' 
+1188691729.2 CONN_DEL 171.190.129.182 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACDkXoeEjyRAMNo0EN7ihjuLxGEzxht/E8DUEwMHAS2MSZ+88b8Y88BdwFBA6Cb1sea/uYCsndtxShBf overlayswarm:connection lost 
+1188691730.3 CONN_ADD 17.236.108.159 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUsnBQbs9/tWfFNQOJVrqFmHZMHUSf3QUSKVc754AG23+Th4uUWNTJmY1QagjIRcbfXZ4jMuBVofP8hL 2 
+1188691730.3 RECV_MSG 178.84.180.99 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUsnBQbs9/tWfFNQOJVrqFmHZMHUSf3QUSKVc754AG23+Th4uUWNTJmY1QagjIRcbfXZ4jMuBVofP8hL 2 PASSIVE_BC {'name': 'HS-DEVEZAI3', 'taste buddies': [{'ip': '196.44.166.202', 'age': 3803, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf//bQeGl6O7I/6niZvh3cP+QOJqgXdIvGko+vUdAJW1cXNwXWNJD7g8QKEzWXN1pyWc86yd4CuDnfdS', 'port': 6881, 'preferences': ['nxm+IQ3I4dEJt0vvA+ne4MAVCF4=', 'mfwguhuUNphXR3KZgPrPm+soJzk=', 'S8plImpWksW4K5zqqdqZHQdwcaU=', 'pHBXVMEUD18KYm9DM/P8W4q8nBc=', 'MRq0gRfXi/0ApCpsH0vSk98NYMQ=', 'QhQbL8K2hoWU95WTFRpvF5d97Ik=', 'L/QFTuZkOu8PMbztkhDwWdQvvO8=', 'RbkY94Yu0ILG4PKNXrvDnMp1HTI=', 'nyhvxdR/0FtI1U7dR3g7muhOEds=', 'xXkZ8pwGTlkZXFWAhGIxDZHWyDM=']}], 'random peers': [{'ip': '61.24.207.250', 'age': 15815, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWqJTluvxqbfxzzSUUzs7LXm3KEzi78DD809m2KAAaTUsG7YoDgTTYXUdFpA28qVUHnp1gWrfdhozrAi'}, {'ip': '151.118.185.5', 'age': 739, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAE3O+lplYuOwTUkFsp1325ayxHrbeXtIzCOChaagAEo27/yL2TmPz3MVzP3MNH1oHYjX0iPtWXtiWVu+'}, {'ip': '91.225.58.243', 'age': 118240, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbjnnX5+Bz2KT6t7ak8RL5yoFvj+7tQDIhqlNC9vACHRIqqQaOYde5iYQ6/48C+RMTWZjjDXc/r6m5LU'}, {'ip': '19.57.174.253', 'age': 3366, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYMxQHYbUhfyiO37gmv8GfiZM2dnSALMTcPLsDOTANBd5AuKyQUa+Phz5BVdu41rXb+lDgH1UPpXsi1S'}, {'ip': '190.74.50.120', 'age': 1788, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZUQoyzBFWorCBRWLHL8dJhCn5icxx0aQbMUdz7SAVPlMxzym+mJeyxTQ2B4mFnlFZdg0QHYOGUWco7G'}, {'ip': '188.104.228.162', 'age': 384, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf6gOSE2js+HPOG33c9tn/WP7+F/a0BJeEVWGmF1ABugcVPbltse6zxC7V/bttMZM8E1m4a7uJRSr9o1'}, {'ip': '157.247.236.172', 'age': 7935, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATw/1Hj+D0EfypUMSu6fsqcwK/SM92M3SkPjM+0tAV6nuXwTsf1zD4c/lgfnSGkUmI0a8FMqs6EsxDGR'}, {'ip': '255.151.222.189', 'age': 2358, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABSy1pPL1ax5R3JEtVCkOIerxUMz0YnVGdGiP35yAVVfbxstN0dlnCEPI8/rYMq6q/2wkATXSsoyO6ZR'}, {'ip': '42.71.166.133', 'age': 1391, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALAeNlIJkZ87DJMGA8lVW+WDhm01FixeiR7A5bDJAFcxV/uOSW+XyC+NLOWr3Rw2xDIvaPzLKDqMfmcR'}, {'ip': '220.127.121.93', 'age': 1765, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ1/aTcOWqKzSJZTiHkJa2mbUJv3exi3z7zM+Fh/AIjhnTZ9arVRKwIexy0fevF+kCy2RoaadMKuClQt'}], 'preferences': ['HcrEuWlasRqngZ7XFiGEZyOqQwM=', 'ODJk7HMaYyk6aLHbjmJ43B+VGF4=', 'rqAg+ik477p4jYSTkZ4lja2J0Rc=', 'w7JWhMLsZPG7hzwdauuFmD2kHaA=', 'Vaxs8DQbHxZoB51gMc7xLrwYBpc=']} 
+1188691730.4 SEND_MSG 133.204.85.103 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUsnBQbs9/tWfFNQOJVrqFmHZMHUSf3QUSKVc754AG23+Th4uUWNTJmY1QagjIRcbfXZ4jMuBVofP8hL 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '73.42.222.152', 'age': 0, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP'}, {'ip': '248.138.181.200', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '71.84.183.70', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '42.8.210.35', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '114.247.100.220', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '99.243.45.67', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN'}, {'ip': '116.55.55.131', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691731.0 CONN_DEL 25.168.235.47 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcw4yX4cVqZm++8z/fUFfPbYmHM5XbfybHMNg//vAermlReI136TGUd3A4J0yyUG6BzoUiY4jZxM0xH3 overlayswarm:connection lost 
+1188691735.3 CONN_DEL 196.175.7.64 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaYsTw8q8kNc/CMVi+KauBk2dd94N2gzFnqU3WCCADo0qjLYIRzDJHoyUWUxlpmWg0JfkIvgTWxprdFW overlayswarm:connection lost 
+1188691737.5 CONN_ADD 159.99.106.238 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHcECvCmxUQLRzeAyg6pHa0AdkLVJ83pESaZrtpoAfuSiaEzCGjK0IOSu+i7oq29Hgg1/vpA4qLpcnAW 6 
+1188691738.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHcECvCmxUQLRzeAyg6pHa0AdkLVJ83pESaZrtpoAfuSiaEzCGjK0IOSu+i7oq29Hgg1/vpA4qLpcnAW 6 0 {'q': 'SIMPLE resident ', 'id': '\xf8L\na9m\xa5\x11\xcc\x8e%/i\x04\x9f\rEs\x80@'} 
+1188691738.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHcECvCmxUQLRzeAyg6pHa0AdkLVJ83pESaZrtpoAfuSiaEzCGjK0IOSu+i7oq29Hgg1/vpA4qLpcnAW 0 'd1:ade2:id20:\xf8L\na9m\xa5\x11\xcc\x8e%/i\x04\x9f\rEs\x80@e' 
+1188691738.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHcECvCmxUQLRzeAyg6pHa0AdkLVJ83pESaZrtpoAfuSiaEzCGjK0IOSu+i7oq29Hgg1/vpA4qLpcnAW 6 1 {'q': 'SIMPLE evil ', 'id': '\xecKw\xc2Z\xf3\xd5\\\xe62\xcbV\x8b\xf8\xcef\xc6i\xbbf'} 
+1188691738.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHcECvCmxUQLRzeAyg6pHa0AdkLVJ83pESaZrtpoAfuSiaEzCGjK0IOSu+i7oq29Hgg1/vpA4qLpcnAW 0 'd1:ade2:id20:\xecKw\xc2Z\xf3\xd5\\\xe62\xcbV\x8b\xf8\xcef\xc6i\xbbfe' 
+1188691738.5 CONN_DEL 21.197.224.206 7774 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAX/1T2dVBW4GPes6jUjOcPGeC9f276h4hf8B1umPAdY1I3aE4Qcqlm9A6ZIRfIfLAIiw8/3tLEB/XPRw overlayswarm:connection lost 
+1188691740.4 CONN_DEL 91.249.205.40 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFFB7MLhm7ZdbXdT2BIr4cR6x59yXqdzhz6Z0M3KAVZZShx3idsnoe4UOimRIKE61ed3F8UovTjnu5qr overlayswarm:connection lost 
+1188691744.6 CONN_ADD 23.23.24.66 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAW7KQbAfJF8zUiLVF9ivvOIq9gxrbOrYsGmNItGKAfr4Ufkoy4OpT5Y7WyApCHZWC3YietKJXxQQeA3z 6 
+1188691748.3 CONN_ADD 217.51.82.81 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG3x28Nmv3xXbHFkklCYYDRpWjbkaC+sqJrqw0jAAA9ITdag/q/NU7aYE9mU8qlZkGWXz4PQjqVDVlQ3 6 
+1188691748.6 RECV_MSG 8.154.205.10 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG3x28Nmv3xXbHFkklCYYDRpWjbkaC+sqJrqw0jAAA9ITdag/q/NU7aYE9mU8qlZkGWXz4PQjqVDVlQ3 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 8, 'random peers': [], 'collected torrents': [], 'nfiles': 0, 'name': 'sturoscy.local'} 
+1188691748.6 SEND_MSG 112.244.53.81 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG3x28Nmv3xXbHFkklCYYDRpWjbkaC+sqJrqw0jAAA9ITdag/q/NU7aYE9mU8qlZkGWXz4PQjqVDVlQ3 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2104, 'random peers': [{'ip': '56.220.35.63', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 0}, {'ip': '122.193.10.153', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '255.220.242.202', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '71.205.181.52', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '124.172.111.62', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '248.73.194.59', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN', 'port': 7771, 'similarity': 0}, {'ip': '32.207.206.85', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '206.148.169.104', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARXE5pzUbygCGSKSWw/CwU+YVk/DtdghMV2mN/KgAVepoPLdlEeJMsmyaMAQ8bDDSSN3S5I5H2Gvy9HD', 'port': 7763, 'similarity': 0}, {'ip': '179.169.74.151', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZnYqLnt6+xlmselW2zW0ArOjQBauEaIbOdTpilbAXosRb4GKDWEDkPfr1R5/iDYOtsdB1YCZJOMhRsz', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691751.6 CONN_DEL 226.10.236.105 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASae3XmgW6+KfHYh3qNfB920R/yA1DVUqvjXuS5OAHsPBqDblIrIdBpmTlSJ6b5jlxtOYk9eRho+h3At overlayswarm:connection lost 
+1188691752.6 BUCA_STA 9511  2104 2104 100  1578 12119  41 41  0 7 10 
+1188691753.0 CONN_DEL 10.137.114.45 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP overlayswarm:connection lost 
+1188691753.0 CONN_DEL 36.38.152.22 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAI7DexPb7lB4tt9PpW2MRsJaJKb8YW9GfukwJi1tAQxw3eqw8t9HXnrXCTbLUyLD50Q7uujiCcRLoe+B overlayswarm:connection lost 
+1188691753.0 CONN_DEL 71.98.64.131 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN overlayswarm:connection lost 
+1188691753.0 CONN_DEL 66.171.235.158 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAD8396BuV9IxYbYP1I6Ql8f+inuAv4prQXGJESFxARFMZ8yHrSUBAlngfbLeZXKlq3vWOhweFr1oU7Gz overlayswarm:connection lost 
+1188691753.0 CONN_DEL 35.222.25.29 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAMFm4gJhJfXlvIcAg292476SY2e2ZYRl+3mUSxfAVvVhovIO8HkHDQDX4TjXqbYI4rhqeXUmO1tcCTr overlayswarm:connection lost 
+1188691753.0 CONN_DEL 130.82.116.234 7771 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfhMyBbhP2dQJHm9oBEQ9uz/zYa9AoCxJxExpsmcAST0K3vXYmPndQn4eh2vsYkRcu5ZrMs8oPyu6iZN keepalive:'Not connected to permid' 
+1188691753.0 CONN_DEL 254.0.146.225 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP keepalive:'Not connected to permid' 
+1188691756.1 CONN_ADD 8.58.106.182 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVUS3GBNNBluH3x4G7Id8mMxCh/ghNGicJWlhXzoAF6SPV2cFJSyCRJCNcd7uEl/ocDdvXHA2XOwMpFk 2 
+1188691756.1 RECV_MSG 6.162.34.92 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVUS3GBNNBluH3x4G7Id8mMxCh/ghNGicJWlhXzoAF6SPV2cFJSyCRJCNcd7uEl/ocDdvXHA2XOwMpFk 2 PASSIVE_BC {'name': 'pasha-pc', 'taste buddies': [], 'random peers': [{'ip': '7.184.250.14', 'age': 4337, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEIlPdCgAUDFomiMIBXE3Xxzjev+lohcjQoUokDwARqM5y137/pQUBrp8ZuZkOPIuJA4/t1Tn/qB0Ema'}, {'ip': '41.1.65.151', 'age': 1419, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWY3lrq7SwMxk2x1lITaFV6w9+MaDMCAfc8dhlSWAf4TJH9zfPYXNVuNq1O1udqBOs682Ap4PzkBs+eC'}, {'ip': '188.93.209.101', 'age': 2043, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMsqzAoMT76N0cqqDyz6snlLiEBd0UgU1uNhGBW1AJgqXshfxHEs8nQ5gKWgQHAhet1gW5NbOtSp3sD+'}, {'ip': '233.152.100.3', 'age': 7918, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfx4Jd+//T9nmA/zuatg0ejHhzuHeDPx5iFdJM14Abhm1FAImN9KThEVWddcvULBzpmMG/gpYM5LLf+e'}, {'ip': '181.13.179.94', 'age': 12420, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeBh65Z6VF5PIL0CIq/zr6Rr8kbCbu34MpEbhM1VAaRrdA6TZwZhzJj9COxnJwJ6dA+wtH7GXgYaiu96'}, {'ip': '142.203.249.93', 'age': 12254, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARzZStrGsxtMcQMVofl2ml3I7x//0jcbmyjCY4FWAYmvf7zOM2BwTNlUMCqBBvPNrjei9vhtLnQ6Fm2E'}, {'ip': '232.7.153.153', 'age': 441, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZdq4CstMYAegoCyMKN6coqForzAWUfVLbRiPzmZASFFiYA41sDLqvhsyUWwP1IQFCjmxXOwToss6NbL'}, {'ip': '29.130.217.138', 'age': 14816, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHcDuj/QiMO0bZhCLvMyO8Mq0n7Hb2U4I2U3nZc7AXGGGmLwVtPDvL/Ibsiu4MSO7NlgIqoKQoUNBocw'}, {'ip': '45.93.76.118', 'age': 5120, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY6FgedeRxv+/zPgDpNqHNa4dNn6G5bFhCvuhYXIADVIE7DO4xTOk+D27Y07rVT+F7Ciq2FOxDM2q/Jq'}, {'ip': '92.136.89.156', 'age': 4289, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUqbwFuKkAWob/xZk/CbFTCqEkH5lxkHz1L0+G8NAJIKhlZq5BX9qUMNPwTee4hS1oT/OQXgbvcGfRpY'}], 'preferences': ['Ro5mRHF+eB9gkKr3A23n189VcBw=', '2dJBS0W6/NpcmrhFJD1bY2vwoEg=', 'mh+pB6ar5+C5BPifBHmOb4LHqWE=']} 
+1188691756.1 SEND_MSG 11.48.114.49 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVUS3GBNNBluH3x4G7Id8mMxCh/ghNGicJWlhXzoAF6SPV2cFJSyCRJCNcd7uEl/ocDdvXHA2XOwMpFk 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '2.150.144.188', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '161.252.243.27', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '84.215.108.254', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '70.236.9.18', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '234.59.83.58', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691759.0 CONN_ADD 179.234.227.228 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAhT87bPZ2m4sBRxkDCPt51Cvfzd3ERs1nml/NniAHaEFnWy96CAEZ0NTypDOJyX95Cv0LIlyClP/3Yy 6 
+1188691759.2 RECV_MSG 224.236.113.231 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAhT87bPZ2m4sBRxkDCPt51Cvfzd3ERs1nml/NniAHaEFnWy96CAEZ0NTypDOJyX95Cv0LIlyClP/3Yy 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 8, 'random peers': [], 'collected torrents': [], 'nfiles': 0, 'name': 'marissa'} 
+1188691759.2 SEND_MSG 91.226.94.7 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAhT87bPZ2m4sBRxkDCPt51Cvfzd3ERs1nml/NniAHaEFnWy96CAEZ0NTypDOJyX95Cv0LIlyClP/3Yy 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2008, 'random peers': [{'ip': '153.216.93.40', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '156.81.54.46', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '135.193.125.234', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '65.31.161.195', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '163.95.70.12', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'ip': '199.114.25.204', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf78JHEtMZJO4zHsu3DNG6azd+jgg25xY/uqn1aCASC7irRhj6KRObIi2HZsBjT2JcdiZiKrnE5Ym7wU', 'port': 7764, 'similarity': 0}, {'ip': '251.202.98.79', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'port': 7767, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691759.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 6 1 {'q': 'SIMPLE brandi belle ', 'id': '\x12\x03#Y\xf3\xf7`\xc0\x9d\xb9\xb2`A\x07\x7f\xa2\xfc\xfb\x157'} 
+1188691759.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL 0 'd1:ade2:id20:\x12\x03#Y\xf3\xf7`\xc0\x9d\xb9\xb2`A\x07\x7f\xa2\xfc\xfb\x157e' 
+1188691759.3 CONN_ADD 225.68.220.64 27015 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAP+GT7+XE/dLkop0W/I1xXrBhmc+fqu6TffLwnKXAVhSu5uoQ+qswEYvJvlsoWhY+jm0FOjAmkFiWKlk 2 
+1188691759.4 RECV_MSG 87.230.12.51 27015 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAP+GT7+XE/dLkop0W/I1xXrBhmc+fqu6TffLwnKXAVhSu5uoQ+qswEYvJvlsoWhY+jm0FOjAmkFiWKlk 2 PASSIVE_BC {'name': 'pasfield', 'taste buddies': [{'ip': '140.152.230.136', 'age': 9707, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJzkcOXHnzoY4ebyV+xru4fPEEG8uzpQdSJ5yb5PAbVCbN4Y3UL6R9X0vsU6+dRhP3AYC3MfXU+W1M7v', 'port': 27015, 'preferences': ['IeLCWId1zhYalBlK5SjivLtgLtI=', 'GWorEtJ6ZqXlhQab2ii63k54XYk=', 'syu9Fx3ANPlBsZvwCgXViAAqZVE=', 'WwKu7NzLQcTvdkNHoJ9SPQTO4zI=', 'TrYTjkl8/A1tvkabsrEowdullhY=']}, {'ip': '13.104.219.196', 'age': 8923, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf/93O8x3gEK2z/qnj+j5Ub/PuGoKUkMDg0P2vX6AGqC4+oN8lsN2BgdeLupFP2nmhqcZeozq3M70lEv', 'port': 6881, 'preferences': ['6r/Dl/8FMptGRB+cx+bBtWWKiIg=', 'm9DKu020F3JX7EFGanx0MEU+7Cg=', 'E/6OdWbvfg44hxSJLPr+2izz37A=', '7gzVPIaj/zSy+ItrCCX416bN2Ic=', 'jnKyzSeuiz5W0vpe9Wg+8vC0hSM=', 'OisOwqhVUgL3WB33DajSgW44ovY=', 'VyBkgAh+EIrzcTer/++LvqvwgFs=', '8TxJsY7PgfZd2eKsj0GwzWjpxEs=', 'cFd2k2h5jOwfsQO/aOsLtWxX+L0=', 'Z0bZUSgQlHQjvXujHajSzSP9RK8=']}, {'ip': '81.65.142.114', 'age': 9835, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf/6fvRFGazRC6FnUC2tyw3A5fxW//wdHfZ6TcGFAOzvaBcRXO8qKakhltxZIOtbsYdLCaGPc8MF4r/f', 'port': 6881, 'preferences': ['OwnAcRZSgcT/bAmB0/F6IT6P+JQ=']}], 'random peers': [{'ip': '254.132.95.51', 'age': 9835, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWPAXtVgV09OWAnchf3ol/n6poelFyatEyZtSFSwAYRG0jKqobrkY2HUKIrWmbgsHmeQ5V+FQkqy+v8L'}, {'ip': '29.68.114.90', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACKlZDfKcsXqCSXVYxPWAxUwh2ojqEqhGJZHQWjuAGncRbIjJAlrf23YN0wXTWBjOrO2kIU28r1sU1YL'}, {'ip': '99.109.89.183', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKL1mpSFGGI/eLpjkdJSsDLDN/6AkZ2qihtHiob9AaDTTe3fa8SdhYnjnALcHD6P32pYWa0E8hV4aAXC'}, {'ip': '252.72.33.180', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM1OytZa55qU5atAV+2N8yv6Od8INT2pqUZdvUJ/AeifAcLXIhLHoTD5XBP1BkEdXKH9itYY6YyE1e8I'}, {'ip': '60.227.101.0', 'age': 9835, 'port': 6879, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdUCNgKKZWN8a6yQPnlZ2Mkq7X0BX7kTrUCjdYlNAAWYBIHZSgyFx0Yrbq9Qd8WNo6PQYIQDwmf5pYxC'}, {'ip': '105.180.1.68', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAe1ws215ZBXoD9nTy/Ez5AsoaUeJOLlwOQukFYQbAaiE8yByPCuY1udvir/4YX717QzQHHBf2PACEL7L'}, {'ip': '73.253.158.143', 'age': 2056, 'port': 7767, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9'}, {'ip': '192.54.168.76', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADdA/E65Z5HIZdifFuPw+Uv9aiowKzBAAiX5+AaBAVDfEkumhkMy2LqlTlhLLjqpqfLNwtxB62KAlQUC'}, {'ip': '21.253.214.99', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU9vEV/6GeUKQGXPmKQGWutZxSHMES1IJFbH/lpwAVt2HcxCVzzDAfPSAH0cOEDd70C6dHJSWBgqqy5V'}, {'ip': '20.58.185.114', 'age': 9835, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGcUsjBx9THUHB/+Zb20sr6rYt13b7zc2QMg+o8AATwWq//LLxAVCld85Vs7BTAbXCS3BOU9zC8aGFGN'}], 'preferences': ['LP6af8ecZ1fnHlJHDwgoxtcrumA=', '1B6XpQQtYu1PoY2RIbmeTs6toSQ=', 'kOlNu40ieAHoji8WH4+UjcibgDo=', 'OG+DWyUpuzPANJ2UgJsytADlD0c=', '+qohovnhrGfEv9DdViMcrUelvvM=', 'gK1g3cUI798v7qUaQS72kGu54fA=', 'uJlquOPJAsFov2qw4ZQ5bf/HNWY=', 'pgIu2vPz4SPTNFdp18b8k5uTHdI=', '6mRQNcMS37jO/Wj319rxlwubXQk=', 'IeLCWId1zhYalBlK5SjivLtgLtI=', 'GWorEtJ6ZqXlhQab2ii63k54XYk=', 'TrYTjkl8/A1tvkabsrEowdullhY=', 'WwKu7NzLQcTvdkNHoJ9SPQTO4zI=', 'QbqNhUi2RdqfFgCZX+m4z41P9to=', 'IDdQHjurLgzgODG5zlM6bp1Z39Q=', '4m+Rr8fXZTTISbwDFzWGokVEF+o=', 'vF0Tnxzr/vFUqUMmMNnigQnaIGU=', '6mUJ/9LABVNUduvopiOA85EOLMY=']} 
+1188691759.5 SEND_MSG 61.106.172.157 27015 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAP+GT7+XE/dLkop0W/I1xXrBhmc+fqu6TffLwnKXAVhSu5uoQ+qswEYvJvlsoWhY+jm0FOjAmkFiWKlk 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '186.137.61.228', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '225.68.59.247', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '60.111.83.85', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '58.56.2.96', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '204.155.2.39', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691760.5 CONN_ADD 45.82.144.133 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 6 
+1188691760.6 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 6 6 {'q': 'SIMPLE xxx pics ', 'id': '\x96\x95\xaa\xb0r\x99#\x8e\xed+\xe8N\xcb\x12#\x00\xd8\xce\xac\x11'} 
+1188691760.6 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 0 'd1:ade2:id20:\x96\x95\xaa\xb0r\x99#\x8e\xed+\xe8N\xcb\x12#\x00\xd8\xce\xac\x11e' 
+1188691762.6 CONN_DEL 231.63.33.78 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAgGrWi/6zYhs0JQ9m2WzWU7nRxZm945X1koeD5sAT17ge2ZV2AMCGb9SafAQzpHsQQgDzy6vQyBDLnQ overlayswarm:connection lost 
+1188691764.9 CONN_ADD 164.12.108.27 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa13mk9o7KHVuB22N1D2kZVdoWYBCshMFODUlGJQAVB3PdPpR52Ta0S2gMm61kBG23yJEX1yg9lL17Lp 2 
+1188691764.9 RECV_MSG 180.251.109.147 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa13mk9o7KHVuB22N1D2kZVdoWYBCshMFODUlGJQAVB3PdPpR52Ta0S2gMm61kBG23yJEX1yg9lL17Lp 2 PASSIVE_BC {'name': 'tramamurthiT60', 'taste buddies': [{'ip': '121.55.127.13', 'age': 15019, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf/Qgr2M4qWSCeCpUmD7q1Ys6lsBvxO5ryLk4ubJACPUNZO9vYh1+ZyCI8H90TjqaHHifP/kLElUNdeA', 'port': 7762, 'preferences': ['MB39kMlya3sH4YOhs/BVZe/Qjts=', 'XKkZE+X+7O8BPCr7QmXGN34lQuc=', 'ei4wVaQ0CADqmpVCWgxRUa0jJIA=', 'tpxXyg46B6RtX95NtOgiC6ib3n4=', 'AtMu39Z44y+0sTbegvjk8Wz9/Y8=', 'cKJzGSiyMNSHAIgCSRdvFwv4TtI=', '1xj/pZMc3yu1Se0GBYDFA3SfiY4=', 'deUf7eJ2/Gz8SOCm2/fc80RMgQw=', 'dPQo3cdekpj91PNxVJG4quw6XOc=', '7n1TzDbAhreJQJEGVDEYgvK8a4Y=']}, {'ip': '150.11.82.37', 'age': 471, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf6gOSE2js+HPOG33c9tn/WP7+F/a0BJeEVWGmF1ABugcVPbltse6zxC7V/bttMZM8E1m4a7uJRSr9o1', 'port': 6881, 'preferences': ['nuLOT1wrI7VFDOx/uGFvFk1X8jQ=', 'iGQwPM3GNyUuGirWAh+AkdRfCWM=', 'vawqx6EfXXMYwHLnPZXUlSQuEzU=', '45B0+fQJ8dt0wSLCaJCVZJJkX/I=', '8Gmk640GxgLcAR3+HI8CKmYJs54=', 'k87oBwWHGPw+bDhnxbqe+FYYs9c=', 'N2YeeoTyIkx4ZVw9SfZnin2VVMQ=', 'qdpoREnQkI8gIZHd4RJAry6flmI=', 'xdz9CCrcnGW8QjgRwN9Me6f96aI=', 'YFbPCtAWV5j8OGjHgBZ0I+3+Wl8=']}], 'random peers': [{'ip': '231.75.106.166', 'age': 1099, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAdARdTaTVx70shayfXP2NFsusMIYQ4TMGK1oC9smAKq9FEnai/K/G7LLe9Q1qy+EkkRw+SkgQ8kJI8bS'}, {'ip': '95.27.14.137', 'age': 734, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAafzePWnI47N1g6ROkpmApsAblUXIffEpaEjDtnKALM/ZRDuZXIs1dmQ7OwroiCVQWghoh+JIz2z+S9O'}, {'ip': '190.12.90.78', 'age': 10737, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGw70vVq+GjUaUV3ctKb/yN+9dXmWV3KvLPxwiMyAVLUDhItnlFSaNU2o4uN8LzblqwAxM5S7M6T5Tf8'}, {'ip': '19.64.131.156', 'age': 435, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc2kgAglCiBl5jRJRbZhNRJlArOtDMU5+XGt0dT4AfelHvcR0H0gyPQnFOu8qfGt+gV1mvW9WUBfgp3S'}, {'ip': '1.243.251.107', 'age': 244, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASXq28ZXiH3YtMG4d2ShG4E3FuNaJ+taG23j4jNVAPiSjBEfG+Kt0aqqzqYuaai7wSeSSU1Y3JYm4lWo'}, {'ip': '212.15.88.231', 'age': 3754, 'port': 6900, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAR7REPX1/iVF7UI5ihNdWL63hnXcGoDU94Ar5jcyAcbz0KgVHGQ5W4RGa1q/UPUSp9F8xhDe/EEsY29r'}, {'ip': '2.143.147.80', 'age': 768, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAEc0shixjEQ172uCu/Yn6ljHgRtAFD6v57TqrpvyAY25yDsXOgcheUS1OHGecEEbitqwvSDJJJ87V4Gb'}, {'ip': '201.223.211.199', 'age': 1208, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMUp99cHufeSV2g45E2HQAr00KRt1w5HIOC4FdTMAUImHzUDyhVzZ3zYfPVuHP1WyfqZtOlWSYsht90H'}, {'ip': '203.206.121.188', 'age': 3291, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAKRY1IWarlzROyWxsHJsv86JYOJwiz7OmSg72QVAWwPVLjoavTvj3aE8mz9H0e6yjPvKzyFMRXx2Fmu'}, {'ip': '134.42.107.250', 'age': 2905, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYs6SuY10JkHDDExd3wzuwVXSpEuslWyWbZKImRWAL6MbXLpLm199/9Z62m1tUdv+g7sU0szV1d62+T9'}], 'preferences': ['EYqbyO6Iil/KIBfuSJtXguoCnF4=', 'hzc08b19a46qRIKFe6l2/ttttQs=', 'lZgjM6IYDiNlNtGR6wmEPdv+umA=', 'r/O9ycQ3sUXwPcyOZJqeFis27qc=', 'MoM3+B/Oj2pJJWeTtJzXqWZoNc8=', 'KBtzfVX5x7Kgp4YLRPUmXNpDouM=']} 
+1188691764.9 SEND_MSG 166.177.181.180 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa13mk9o7KHVuB22N1D2kZVdoWYBCshMFODUlGJQAVB3PdPpR52Ta0S2gMm61kBG23yJEX1yg9lL17Lp 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '227.78.109.65', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '191.157.183.146', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '209.253.154.89', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '93.232.40.200', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '174.126.105.139', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691766.5 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 6 7 {'q': 'SIMPLE xxx pics ', 'id': '\x84\xc1\xa6d\xb0\x83\xa6\x81\x87\x07s!^H #E\x96\x1b\xb1'} 
+1188691766.5 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 0 'd1:ade2:id20:\x84\xc1\xa6d\xb0\x83\xa6\x81\x87\x07s!^H #E\x96\x1b\xb1e' 
+1188691767.0 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 6 8 {'q': 'SIMPLE xxx pics ', 'id': '\xado\xde46\x86\xea\x14\x8d\xa0\x0cE\xfd\xc8\xc5c\x98*b\xba'} 
+1188691767.0 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 0 'd1:ade2:id20:\xado\xde46\x86\xea\x14\x8d\xa0\x0cE\xfd\xc8\xc5c\x98*b\xbae' 
+1188691767.3 CONN_ADD 129.191.116.90 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa3XDltuAD36plQtqTl3BREqKq10aI/pNmRu63ccAKrJqLlp6qSXnyCLtLe1s6I43m5Fsh8veQM72U4N 2 
+1188691767.3 RECV_MSG 130.37.139.64 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa3XDltuAD36plQtqTl3BREqKq10aI/pNmRu63ccAKrJqLlp6qSXnyCLtLe1s6I43m5Fsh8veQM72U4N 2 PASSIVE_BC {'name': 'YOUR-26A3A119BA', 'taste buddies': [], 'random peers': [{'ip': '225.38.26.132', 'age': 1736, 'port': 7763, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYFXFYpL4PrXTXZQsFzltKDbxejCl6vIfiDVr1y2AHM/VEZFr0A9nkNFGk0nwmV9ItrmByIu1+ftQedb'}, {'ip': '208.29.176.178', 'age': 4728, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbtf77j4oPM5N78zhaHV6hkBcKy7tPpP7VbrsqFVAWyAxQfc5Rui9ESVjPTnEnPyoxtv06/mimi3nGNH'}, {'ip': '218.149.239.217', 'age': 18151, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAU5eRWgNG2/fbbOpvQKYtnylbBRxxShk1oRIfSmnAAYfQ6o0P8Utb+NFWzsPQFyPrBq5UYlFnj3QXKgV'}, {'ip': '154.211.212.209', 'age': 4658, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbE4oWI0rzijk25+BxyDXPmLtT8c1Zq5PPY2F3e+ACUVUlE6IdhCzz2gruieqV41Hzq+McpDGr6R0Wqi'}, {'ip': '41.162.5.161', 'age': 5649, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARP9sXdWSg1HkU4lmBdVZjcxlxflrFYuk9oJ1YUqAHswbu53xBy4I5EGvo16scGVsVZ6k7dseFEND8hS'}, {'ip': '153.107.253.20', 'age': 1351, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUTy1JNhEeDK2DPAED1PO0TSVmuyqzMCiVfKJOAaAb6bB5PU18g27uE94NRSFPvKxXn+nP3VwR+vDjT6'}, {'ip': '43.73.182.62', 'age': 3328, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEALu016jxKxmqmT9YzebvpeiFNVOf1jBmRKskNbl8AQ9TAsGpLvd0bzEFZl3d91S1UFrbP5sq0RZbyxxR'}, {'ip': '217.87.164.16', 'age': 6875, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeeGlhaf4xDqAMaka1t+Ut46wjsB4hXG+05HerJzAKil7Ob1wwONX2UtP4Q6qiFGDUGBrRDqoOBvMTLQ'}, {'ip': '164.60.158.210', 'age': 13607, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQBujhg/W/4jQc9AMf76XQOE28N/A3QumH4N+RJFAaK8P2KKXXJkGUEGA/AIp+IWfKQUr0CAWv1wa0Eo'}, {'ip': '181.251.114.115', 'age': 8835, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbnyikPsiB7d99RA+gCwdZdzx+nzKlUwOQHTyM3bAbCu23yucTKoEct2NDMsGJlDNjQzIa3AboWLcqPm'}], 'preferences': ['x19eV0f1yOKJz0Hy8U/McUVIn4U=', '/k6E7Bnz22HAcaoEhQvNVRslynY=', 'Zd+UxlyAKOwk+1jCVDrfWXwjY/Q=', '7RgbKeESUidJil28NmmiqRL9opQ=', 'uX6NJI3rLetFoGLdp1yISrOeiTw=', 'qM86infbjaMi/N+P6szZr/PJZbw=', '7hxAn9tBC2ayUtQvMylzSGuyDzs=', 'ISGiJ/zvuWVVJJuFQp7702oNer8=', 'JNRHViRykMzGKkSNTqRtmyBJvdU=', 'UZLdxJH8jEy8OsMFtu4WXGADGCo=', 'e3TWHqqZAZmH6D1tfDVKrZu4QxM=', 'wwzxfcfZ5oj8OpUlSyiE7/W4bdk=', '6Pet1QbgnfZivG9MoMEqKYOb++M=', 'Ml7zthtHkCAQDxBWLpINUesAALc=', 'qAUVNjDjsUCAl2iNQHjlzK21/iM=', 'nW3zUXTSsMcqip7L3DulxRxXdnk=', 'RtNxtc8/7LA2DtbGxnKbzlYm3O8=', 'yNY2rJpMyDSWZwO7P/ag7HFWMlk=', 'K4bA614Ig0laQ29Xk8lhsx8QdN0=', 'c8jl4Wm3lFPJgKEpSywegRWQY/o=', 'UKxoEpKs48Yp9/NeH7lrRXRN22s=', '4F1TjVjXe9N9OtE/0VDXthRmY7I=', '4x5wU88R84LCp1BU+kY6gHONb5g=', 'zwXP67/KO0W2cN+2cqFJo8NKPiM=', 'jSC/XiUi1QFH0lPOvfVLJPisXgs=', 'De/D4bm2Hie7GYKX7ztKixR+Cao=', 'vmmMe/S3VDVttUAYs6eBoYDOrIQ=', 'M+W9oHAncxUEyNmDM7J+ZjG5kPY=', 'bqQM5DMURd42c92dCbdF9D0wArI=', 'YSwtK+yoCOKdHu0KATP/UtCypYw=', 'K1s3GUQ2W+YWsvbTMnPr9uWMIco=']} 
+1188691767.4 SEND_MSG 81.215.240.221 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAa3XDltuAD36plQtqTl3BREqKq10aI/pNmRu63ccAKrJqLlp6qSXnyCLtLe1s6I43m5Fsh8veQM72U4N 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '125.212.13.37', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '243.35.240.249', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '32.255.117.231', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '216.94.12.63', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}, {'ip': '114.130.45.47', 'age': 0, 'port': 7770, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL'}], 'preferences': []} 
+1188691768.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 6 9 {'q': 'SIMPLE xxx pics ', 'id': 'A"\x8d\xe8\xdd\xef\x94Z\xa8\x87\xee,vr\xb7\xeb\x11Xo\xe9'} 
+1188691768.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 0 'd1:ade2:id20:A"\x8d\xe8\xdd\xef\x94Z\xa8\x87\xee,vr\xb7\xeb\x11Xo\xe9e' 
+1188691770.5 CONN_DEL 196.139.148.21 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQaoAeWfOp7GOtb+Bp6ye4g5u4Cvi0QJ3Bd+qh4xAYrsiW2q94YV9OdPW1NdDq2eL037FruPLsiVVCIb overlayswarm:connection lost 
+1188691771.1 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 6 10 {'q': 'SIMPLE xxx pictures ', 'id': '\xe0`\xc8><\xba\xbe\x8bVVkA\xaa\x82\xb2\x84\xd1E>\xad'} 
+1188691771.1 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcHSJCEhJ73ukBZrLfFDEntB5ffgLmGMp7cgb3wWAJDwq7IJ6RdHG5RFUWeSttNAuE18W3mJD3+YI92p 0 'd1:ade2:id20:\xe0`\xc8><\xba\xbe\x8bVVkA\xaa\x82\xb2\x84\xd1E>\xade' 
+1188691773.4 CONN_DEL 70.48.134.3 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG3x28Nmv3xXbHFkklCYYDRpWjbkaC+sqJrqw0jAAA9ITdag/q/NU7aYE9mU8qlZkGWXz4PQjqVDVlQ3 overlayswarm:connection lost 
+1188691774.1 CONN_ADD 212.165.62.98 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/ 6 
+1188691774.2 RECV_MSG 46.91.31.128 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/ 6 PASSIVE_BC {'preferences': ['sNVrlTXyHfT8bt+qdABeXdSe1u8=', 'BEjijBZiHe43UgGhFo9KI2kbJQ0='], 'ndls': 2, 'taste buddies': [{'ip': '70.60.118.66', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfBO5z5U7ECMie7g3AaYx+W6HiEBaATYS8Gpe0XfASNs8jjNKqkCv/AnJM3VWMPf6iLEcj6yr9pfuaTo', 'port': 7762, 'similarity': 487}, {'ip': '219.76.65.86', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAC9+dgTwgTYeOiQnAsjtO5drURgnOU+zS86O6ZhQAY8LdWFYK6XUCIg9fU/hMsQ0IHAEhp/2yZgSUzKc', 'port': 5737, 'similarity': 423}, {'ip': '245.38.42.35', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAasSiWmjtEQZzoq2Gps6ksigoU+NBE7zgSKCDkgfARoGe3e41eIEoGLEE+knnqEPYCTSZRYEqtDKBO5i', 'port': 6881, 'similarity': 394}, {'ip': '181.87.94.13', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3Ssv9ZolvKrWfU2ronUNaevYIZe94hNuMhMD6QAA+b6kryXUMqjOxaw9pMnRdWCO6g+yrtPQNrDaO1', 'port': 7762, 'similarity': 344}, {'ip': '157.53.38.62', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAVXdEQr8+vLoVJW4uQij3UX9IVFB6f2Nx71RkW4PAHxDzqwG6PQYbW3+CQuE9cRQ1/pW7ZHZklckxF8B', 'port': 7762, 'similarity': 196}, {'ip': '231.215.127.38', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAaAA3DYMKUXcc8OZFuWhvhqXG65vRl4XMCQFZcwPAb92ocW6muV4lfwPvkY+VqdYAMD6I17RQcucAe/x', 'port': 6881, 'similarity': 172}, {'ip': '63.61.6.57', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYTdhiBFbQqPOwFSK0KwoMg+WMALvV1VjZsORAQgAEMdRQnvuh2SGo2S0TV7XaNzeLG7bNcduSWRLpWC', 'port': 6881, 'similarity': 172}, {'similarity': 145, 'ip': '254.68.107.224', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOCdNvgsa7J6sRgq//Rvh3w9WDKfIjVs5xuxfa4GAFO+2HUkDELPnUA+xCz3GHqNsONSINtVHbUJQjb9', 'oversion': 6, 'nfiles': 599, 'port': 7764}, {'ip': '145.166.111.90', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG844xlfmISp1w9tS5WzvDiWFUX97ML06lrQIS/CANqC6If8SzrSIVp7jR9iUB3qzakNAL1ZB4lEs6ut', 'port': 6881, 'similarity': 103}, {'similarity': 86, 'ip': '147.57.157.129', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI', 'oversion': 6, 'nfiles': 204, 'port': 7764}], 'connectable': 1, 'npeers': 993, 'random peers': [{'ip': '129.100.1.34', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQO08RrE/kBx9VPCjx8I30vMvi85H1UbAbURu9T+AT9uJSS6lQG05GDYqkUqepL8HJCQZPxjX0nDzhgv', 'port': 7771, 'similarity': 0}, {'ip': '45.166.94.170', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYeaL9ZgSj6HeLn0t7gDIdgF5GuMomfjjaZ4N7DHAIKSO1ZWbLuNPfK0MXIGod4LZ2Q3OWeC2K4gW/se', 'port': 7765, 'similarity': 0}, {'similarity': 42, 'ip': '252.173.226.175', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAePcm0yk3Nn3S0ir0XRtlrAXyBkpmb2hx/qP3GioAN6MeSh75pgR+oDfwHdMPXUUG8Wy6QeuZqeH+/cx', 'oversion': 6, 'nfiles': 367, 'port': 7773}, {'similarity': 12, 'ip': '251.104.142.193', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFY3lSHHMzF8T7LFp+1G2+y9UNO56SSGZLfDOYZtAAkKJ85kAZrooaafKgSi3fKv8oTBgFAHU7Dw6s9Z', 'oversion': 6, 'nfiles': 128, 'port': 7772}, {'ip': '156.119.94.155', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAJ1FB3Mzb/ZpdA1kmu1HUp9fW5SbH0k0+/9uT+7Aa0BjGlOd+EQchLxqQZD7EHBfQ239nzv3GvjKD7/', 'port': 7762, 'similarity': 70}, {'ip': '196.70.61.163', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP', 'port': 7766, 'similarity': 17}, {'ip': '13.205.78.21', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'similarity': 0, 'ip': '4.221.236.39', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKo8Fhg6hCUVOCU5W0Y24YKJNixk400VmTFV8LDAAHl0tfe8piy2xRzuN83VlYiMz+wqhV7CMqtCGDZR', 'oversion': 6, 'nfiles': 576, 'port': 7766}, {'similarity': 0, 'ip': '5.47.1.22', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB', 'oversion': 6, 'nfiles': 216, 'port': 7762}], 'collected torrents': ['\xd3F^\x81\x12y\x00\x86\x12\xee\xa6\xa3H\xcd\x06\xce\xc2\xc6k\\', '{\xac\x98\xec\x02\x87\xb2\x95\xd7k\xcc\xcd\x14\x8f\xads\xfc\xdd&\xda', '\x9c\xb8\x85\xaf\x82vz\x9au,\xf2\x05\x0b_\x94C\xe1\x90\xc2-', '\xb8\xec\x0b4\x9f\xf9\xbb*!<\x94\x81\xf96\x9dE\xc7$\xe1z', '\xdd\x81%qdy\xbf\x15\xf8\x14\xf0\xedF\t(\xad\xa8\xfeZ!', '\x87x\x00\xe5V$\x9e~`Tt\xa2\x1fFo\x96U\xa9-\xe5', 'S\xd7mH\xef\x07Y\xe29\xe2k\xd4\xbd\xf3\xa4\x05*\x9aI5', '\xfb\xbfP\n\x81\xa9\x90\xbd\x0f\xb4uP\x80a\xa8\x1e\xa6\xcan.', '0\xfa\x8dg3\xb0\xa0\xf2\xb4\x19\xd1\x82\x9c\x94\xf50\xca)M:', 'W\x91\xe4\xbe\xf9\xec\xfb\xd3\xe9\xb2R\x15\x95\xbcU\x90\xca\xd0\xb3L', 'w[E<\x1cr\xf2P/\xa6\x10Ov\xff~\xef\xb8\x99\xa1\xf7', '\x08X39\x0eL\xc6wv`\xcc\xbc\xfe\xc1\x14\xd7X\xd0}\x8a', '}V\xa9\x90$\x80\x98\xba\x9fb\xd6c\x91\x0c\xf1\x9f\xdbd]A', '3r8\xe8\xb1\x8a\xc3\xd4\xf2H"\xd8\x10\xfe\x19Ar\x8d2^', "\x07.\xd7`\xe4s\x7f \x83\xa8\x86\xb1zXg\x81u'![", '"\xe5.0\xd9\xb8{z6sn-\x06\x86\xcf-\xed\xb2\xba\x04', '\xf5\x11{\xfc\xf5\xe1\x04\xc6\xf1t\xc1R<\xe1\xb6\xec\x7f\x9d4&', '\x1d\x1c\x84~\xc4\x9b""3\x96\x95O\x80\xe8\xd34p&\xef\xa6', 'W\xe2d\xaf\xf7\x8f\xd9j\xac\xd6\xfc\x9a\xc8\xbc\x88\x9f\xbf\x8bv\xd9', '\x03;\xb4\xc6\xbay\x11&\xef\x0f\x88\xb6\xf6\xd4\xd8V\x10k\x84\x10', '\xbb\xb926n\xf8\xca\xc7\xc2\x8e\x14\xedk\x1d\xdb\xfc\x96@%\xef', '\xe6dM\x03\x8bs\\-q8e\xef\xff\xa7Z\xa1F\xa2\xac~', 'o\x8dB]\x86$&\xfe?o8e\xbf\x1b\x8a\xbb\x13Z\xdd*', '[S\x8do\xc4\xf7}:\xa6\x0b\xac&?\x10\x9e\x07\x1f&8@', '\xcfg\x1a\n/\x1b\xea\t\x07\xe1\xe3\xf5\xb2\x88\xc1\xeas\xb6\x9d\xb7', 'u\x17\xc5\x0c\x12\xe9\x9e\x13\x95B\xbc\xa6;\x7f\xc2\xcb\xdf\x85\xc1N', '\r\xa4P3c\xa7\t\x99\x86a&#\xaf\xb6m\x97\x93\x81T\xd6', 'LA\xa1+F\t8_\x84}!\xaf"E\x9c\x9d\xe3QJ\x99', "v\x0b\x8bw\xbfM\x1f\xa9y\x92\x12Y\xa3\xc9l\x19\x7f\xce'\xf9", '\x96\xca\xd1d\x84\xfc\xf7\xeej\xd5\x157|g\x1e\xbfB\xc7\xcfD', '\x04\xbfl\xef\xcd5\xaf\x14\xddP\x9f\xa5\xf1\x03\x05\x8f\x13n\x1b\xe1', '\xc61\xb0\x9d\x9b\xc5\x1a\xb6\xe8\xbbm\xd67/$-\xcb\x9a\x15\xed', 'I\xc8\x00\xab\xcb\x17\x07t\x00\xa9I\x06\xf1Y\x86\xce8\x86r\x96', '\x16\xa8\xa6I\x12\x11\x9c\x03\x15\xea\xa7\xfeM\x12\xbd\xcf\x8ce\x9d0', '\xb6re\xf1Yi\xf4xW\x7f\xd1,\xaeO\x90\xff\xb3BN\xc6', '\x82\x99\xceh[\xc2X\x17\xf3\xc7\xb6\xab0\xb8b\xd3A>Ve', '\xe1|I\xc0I\xb0\x1d\x08\xbb\xef7\x8f*\xbe\xf9G\x91\xbb\xce\xb9', '\xf6\xd3s~n\xf8Z\xbf6\xa0^\x92\x92\xea\x84\x7f\x12\xdd\xd2\xba', '\x8c\x190/\x01\xbe/\t*\r=\x04\xbf\x13\xdf\xfc\x03&\xeep', 'F\xd3q\xb5\xcf?\xec\xb06\x0e\xd6\xc6\xc6r\x9b\xceV&\xdc\xef', '\x18\xd9\xf5\x84\xb1\xfc\x0cW\t\xba\xd6\x9cs\x18N\x14\xbay\xcdv', '\x03f&\xa9\xf9]\x02>\x87\x11\xb6Eb\xbe\x8b\x02\xbe\x89\xb7n', '@\xfen\xdc\x8a~h;\xb5\xef\xb9E\x1c\xbb\xab\xdb\x81\x88\x9b\x01', '\x1b\xef1\xd3\x04\xda\x86\xac\xe7\xcb\xe2<\xd9\x93\x99-C\x15\xf9\x89', '\xb9\x1f\xc1k\xe2V\r\xc6\xc0\xb7ca\xc5~\x01\xbd\x04\xf8\xd86', '\xb3/\x96\x9d_\xec#\x9e\x01`P+\x15Lh\x176\xb3\xc1\xed', '\xddW\xbd\xa2\x1fS\xfc\xff\x9fJ\xc1N{\xfb;Tx\r\t\xcb', '\xf6\x94!:R\xba\x9d\\x\xd9:\xe0\xd9B\x0c\x8b\xa6\t\xed-', 'Y\x88f\xe4\x81\xa8\xd8\x1a F\xd0N\x8f\xb6\xbc\xde\x96\x8f\xa1c', '\xe7\x9d\xd44);\x04F\xee%\xb7"\x13\xfcN\xcf1?\x03\xe1'], 'nfiles': 158, 'name': 'd142-058-122-093.wireless.sfu.ca'} 
+1188691774.2 SEND_MSG 17.126.166.194 7773 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/ 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2044, 'random peers': [{'ip': '218.67.10.247', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '254.117.207.204', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '192.130.118.62', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '158.232.80.175', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '127.57.48.107', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL', 'port': 7770, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI', 'ip': '84.1.189.77', 'similarity': 0, 'port': 7764, 'nfiles': 220, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAePcm0yk3Nn3S0ir0XRtlrAXyBkpmb2hx/qP3GioAN6MeSh75pgR+oDfwHdMPXUUG8Wy6QeuZqeH+/cx', 'ip': '107.113.116.221', 'similarity': 0, 'port': 7773, 'nfiles': 250, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691780.3 CONN_DEL 190.158.30.8 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAR1XefmCtOtQjb4WIwRQ35aW291/aiAwcYwJJmpLAbfNeYOxyUVaD5Mx7Cj111APjT32HOo27a8rjNUj overlayswarm:connection lost 
+1188691781.3 CONN_ADD 22.122.181.190 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP 6 
+1188691781.3 RECV_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP 6 0 {'q': 'SIMPLE rush hour 3 ', 'id': '\xc6LA0\xcb\xb3\xa0\xe2`60\xb0\t)\x9e\xd3\x04J\xf6y'} 
+1188691781.3 RPLY_QRY MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP 0 'd1:ade2:id20:\xc6LA0\xcb\xb3\xa0\xe2`60\xb0\t)\x9e\xd3\x04J\xf6ye' 
+1188691781.3 CONN_DEL 63.235.196.227 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWsUI7tim8Ewnr0mOTHrcXun35ZDjXs9g5RJwbe5AEzFLWXXD0qtsCuH6bRhegUyO9DuCCib/SFBS8FL overlayswarm:connection lost 
+1188691781.7 CONN_ADD 224.248.227.28 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM44GNZk/duKmUjSFuTYLo6AgwrlalBiXC63EApIAVvzEyUqbFxwP2ABhjjZdsaEScKjagGj4eW0k0Lb 2 
+1188691789.4 CONN_ADD 28.47.46.255 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp 6 
+1188691789.4 RECV_MSG 248.185.127.67 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp 6 PASSIVE_BC {'preferences': ['PTJdWfl4jlZa8SRtJzYnkVMA5xk='], 'ndls': 1, 'taste buddies': [], 'connectable': 1, 'npeers': 314, 'random peers': [{'similarity': 0, 'ip': '167.90.34.239', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'oversion': 6, 'nfiles': 324, 'port': 7766}, {'ip': '0.31.92.117', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabmO60ycDR0WHabIJzv0UsmfcfB+qhPC1pgXozoAUIxCPLgkp1RG92DlC7usSWtbSoDf1U23PbkvcZg', 'port': 7768, 'similarity': 0}, {'ip': '150.248.119.105', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbduv2Rl5EHyWP2/LgAKn9kx2rLl7y87jy831qIDAAH47ysyElQYddQ51J3U7sbHz+9gRINYnk6MTth9', 'port': 6881, 'similarity': 0}, {'ip': '240.142.202.45', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAtvv4q1ltSVnLdzDyiqH3hRBuA6kVcZ1gG1k2cjAKnzprHaQvcW8Mp+KkjuKO+8VFUE8I6QauBPwGl6', 'port': 7762, 'similarity': 0}, {'ip': '111.146.46.250', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcPTCdalIDnRvNQWxVB+0130gFAWUbeX3J2KJEy7AKWv3dA+yIYuHbrqUCpq5acsQBpPBV/rb0c9Gxir', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '197.20.81.29', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'oversion': 6, 'nfiles': 159, 'port': 7773}, {'similarity': 0, 'ip': '170.165.103.94', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'oversion': 6, 'nfiles': 324, 'port': 7766}, {'ip': '202.67.239.192', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASg7ygXK/gxWAdise/kVKfnyVK6hg10RYEcnU2KlAStvAdLfZg5zpSxBdcbsf8/83YuON50ihVmDYdcF', 'port': 6881, 'similarity': 0}, {'similarity': 0, 'ip': '99.141.238.249', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYhIM6iNZt/MVARJ8CzkewxHqldUlCnV7B8WfgAeAPbOfvac3eRO/6E9XiSofeb4R/1VVnRZXUBwI4s9', 'oversion': 6, 'nfiles': 103, 'port': 7773}, {'ip': '223.159.3.91', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3Ssv9ZolvKrWfU2ronUNaevYIZe94hNuMhMD6QAA+b6kryXUMqjOxaw9pMnRdWCO6g+yrtPQNrDaO1', 'port': 7762, 'similarity': 0}], 'collected torrents': ['KW\nd\xbc\x19\xd7\xacJ$\xa9\xe6\x0f_\x80L\x11\rM\x18', '\x8a?\xc7\x01\x1a\xea_\xde\xc4\x9f\xe3k\x92T\xe8\xb2\x01}q]', "=2]Y\xf9x\x8eVZ\xf1$m'6'\x91S\x00\xe7\x19", '\\\xc4#Y*\x08\xb1\xbe"\\\x15\x93\x0b\xee\xeeS|\xa4B\x98', '\xe8\xcaz\x14\xe1\xb2\xb5+\x1f\xe5\x1c\xd6\xf0w\xc2\xa4\xd0Z\xf9\x8c', 'D\xc4\x1c\xfd5s\x00\r\xd7\xa6<RWK\x8c6\xe1\x14\xeaU', "gG\x1b\x80\tY\xb4\xe4U~\xf7\xd6\x07\x14\xaa\x9a\xc0\xe5'S", '|\xc5|\x93\xf6G\xd9\xc3\x18\xcb\x9f\xf7\xb9$\x97\xe3(*\x8by', '\xd4\x07\x06\xb4.\x85/T\xc1\xe0\x19W\xfeq\xb3\x08\xca\x85\xe6\x89', '26.\xd8^\x9bQ\xc7[\xa0\x98s\x9b\x02x\x00\xab=x\xa9', '\xdb\xb4\xbb\xf5\xbet\xef\xb6@P\x19\x9d\x94\xb8\xbe\xdc\x17*\xe1\xf8', '^K\x04CHM\xebt\xac\x04\x8cH\\9\x1f\xe7\xdcc\xf4c', '\x7fA\xbe\xaeL\xff-2\xff\x1d\x95\x9c\x1bF\x82G\x03\xa1\xdd\xe1', '\xac\xe1\xcfi`\x07bWY\x8c*\xd1\x04}\x9e\xaf\x91\x96\xd2\xc2', '\xd9\xedD\x7fSE\x96\xfc\xac\xf1\xe9\x0e\xf1a\xf6\xdb\x13J\xc9\xa0', '\xec\xc8c\x90+\x9a\xc5\x801\x7f\xf5{MZ\xd1W\xaa\xfe\xa8\xe4', '1M\xb0\x80\x188v\x9b\xfd\x103zp\x86T\x1c\xf2\xf4\xe4M', '*{\x1e~\x83\x02-\xad"\x12c\xeb\x89>\x88-\xd5\xa3\x97\xb5', 'RM[\xf7\xf1\xbeXG\xccU\xe8\xd7\xe6\x9d\xab\xf5E4\x0f\x9c', 'G\xeep\x93\\\\\x97\xf2\x96\xc1\t\xf4\x82oV\x1bR\x1e\xb5\xbe', '3.*\x0e\xbbvp@\xd3Mh;\xbbH\xe4\xf5vN\xea)', '\xbd\x1e\xba9\x11\x11\xf8\xa2\x05\xb8\xa2)\xf6\x96<\xe6t\x05\x88\xf7', 'k\xb5\xe5\xa0)\x0f\xa7X\xda\xf1,\x12e{l\xeb\t\xcc4\xfd', '\xaen\x0c\x9d\x07\x81\xae.)Z\xbf&:o\xd1a\x19q\xa2\x85', '\xb0\xd5k\x955\xf2\x1d\xf4\xfcn\xdf\xaat\x00^]\xd4\x9e\xd6\xef', '\xebm7\xbf\x8c\x0bq\r\xb9\x13\x17\x9fE\x9a}/\\\xe19\x1d', 'u\xe5\x04\x87\xe5\n7\x10\x8a\xd5\xdd\xcc\xe6Pb6\xc50\xa2W'], 'nfiles': 27, 'name': 'tower'} 
+1188691789.4 SEND_MSG 254.187.232.188 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2051, 'random peers': [{'ip': '202.204.43.77', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '43.117.128.189', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '210.94.119.23', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '24.153.167.37', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '81.114.135.65', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '146.95.212.130', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB', 'port': 7762, 'similarity': 0}, {'ip': '190.102.38.81', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'port': 7766, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691792.7 CONN_ADD 51.118.147.185 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPGMgNwsurAyw1VontFTLMNw02wvvZpCEN0mBiHAAAsUN0TSR9/a/4jFElE0/yB32K3s694Z1yVicr1C 6 
+1188691792.7 RECV_MSG 136.41.81.193 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPGMgNwsurAyw1VontFTLMNw02wvvZpCEN0mBiHAAAsUN0TSR9/a/4jFElE0/yB32K3s694Z1yVicr1C 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 8, 'random peers': [{'ip': '229.122.237.190', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu', 'port': 7005, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'Shiny'} 
+1188691794.1 SEND_MSG 60.69.229.40 7772 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPGMgNwsurAyw1VontFTLMNw02wvvZpCEN0mBiHAAAsUN0TSR9/a/4jFElE0/yB32K3s694Z1yVicr1C 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2052, 'random peers': [{'ip': '49.32.15.128', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp', 'port': 7764, 'similarity': 0}, {'ip': '45.102.103.226', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '158.207.250.81', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '67.221.218.12', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '23.126.246.92', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '79.7.62.40', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAePcm0yk3Nn3S0ir0XRtlrAXyBkpmb2hx/qP3GioAN6MeSh75pgR+oDfwHdMPXUUG8Wy6QeuZqeH+/cx', 'ip': '214.38.94.178', 'similarity': 0, 'port': 7773, 'nfiles': 250, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXNTe1C0wQ65HrmCNN6loRK2neW2VJyRg/Lr4qzLAXbudGqc5L+fJU5S3+rinBdRoscSZsgCRuD92TeI', 'ip': '24.147.136.111', 'similarity': 0, 'port': 7764, 'nfiles': 220, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691794.1 CONN_DEL 203.96.2.87 7766 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAN5vYcWQ0olarWlvbUxZ1uaMrGfB/4+7ZBqwslkcAfLDu8NFa/MaS3q3OdMiRIKu6j011AUr0ZWlCWIP overlayswarm:connection lost 
+1188691795.2 CONN_ADD 156.172.4.136 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQYqS8AhOPE1vDlHxEwo3U8aJX7LBR83x7SG8tG8AcMar9GDsaE08YUK+aL2yS1IX+PRA0zBEWW7cBog 6 
+1188691796.2 RECV_MSG 156.191.179.234 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQYqS8AhOPE1vDlHxEwo3U8aJX7LBR83x7SG8tG8AcMar9GDsaE08YUK+aL2yS1IX+PRA0zBEWW7cBog 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 15, 'random peers': [{'ip': '220.57.142.196', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAduAK0/ZTjdg/NPd8CD9Q17J10CXqpFyHN5M8m6fAFXBQflBZT/YdH1fYwizR/hnQE4hIKCQTfvKz1pA', 'port': 7004, 'similarity': 0}, {'ip': '4.210.38.31', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk', 'port': 7002, 'similarity': 0}, {'ip': '147.218.10.249', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu', 'port': 7005, 'similarity': 0}, {'ip': '254.98.64.217', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '131.223.194.92', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMXZSh4iM3lvXAJ3EGdO24+B1IzA0RgXlnq5Oq4UAc/3xB9R7dm2d/f5vRYkl00ur6W04fz8zPWR/IuL', 'port': 7767, 'similarity': 0}, {'ip': '193.1.93.75', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQ75ii63ba1zGvHd8Am1hLujksc5yc575R4T6I1RAbNKjt162TB/24WyzXTjqVyCHglFbBlKX6ToEuNl', 'port': 7763, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'illfelde-3pzdnd'} 
+1188691797.1 SEND_MSG 64.35.98.47 7770 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAQYqS8AhOPE1vDlHxEwo3U8aJX7LBR83x7SG8tG8AcMar9GDsaE08YUK+aL2yS1IX+PRA0zBEWW7cBog 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2055, 'random peers': [{'ip': '221.167.186.109', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp', 'port': 7764, 'similarity': 0}, {'ip': '176.181.196.252', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '154.55.185.193', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '31.175.150.151', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '71.146.252.241', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '4.234.174.136', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '52.152.80.248', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACGR0n9N7KeXlNjVwukU/Jy8B64wS9JYk11s/hLxAaUPwGqn2Z5p8+/iEkccJbjTipxWeZy8lye+RG8G', 'port': 7766, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKo8Fhg6hCUVOCU5W0Y24YKJNixk400VmTFV8LDAAHl0tfe8piy2xRzuN83VlYiMz+wqhV7CMqtCGDZR', 'ip': '19.132.195.99', 'similarity': 0, 'port': 7766, 'nfiles': 436, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691800.0 CONN_ADD 12.71.79.240 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM53CMTzGC8XkXotNwq/6vGFGG/gauPStmTbY3tUAAKOCyNiU9x1tVePAkfdTMP2Quimqrz923eC9rIC 6 
+1188691800.1 CONN_ADD 249.144.204.189 7769 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAG3x28Nmv3xXbHFkklCYYDRpWjbkaC+sqJrqw0jAAA9ITdag/q/NU7aYE9mU8qlZkGWXz4PQjqVDVlQ3 6 
+1188691800.1 RECV_MSG 208.140.216.175 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM53CMTzGC8XkXotNwq/6vGFGG/gauPStmTbY3tUAAKOCyNiU9x1tVePAkfdTMP2Quimqrz923eC9rIC 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 39, 'random peers': [{'ip': '11.12.219.90', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk', 'port': 7002, 'similarity': 0}, {'ip': '154.184.25.178', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'ip': '171.11.81.33', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU', 'port': 7008, 'similarity': 0}, {'similarity': 0, 'ip': '115.89.67.27', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'oversion': 6, 'nfiles': 257, 'port': 7767}, {'ip': '73.140.183.244', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEADONqsOqvhCJH2XaIQJucZdw83Pd19pqXRIg/X/6AemhWGMOmy1t7cKNx7GjJptJMW6rKgqh2l9XTIfN', 'port': 7773, 'similarity': 0}, {'ip': '2.210.173.240', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '225.189.153.54', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWaFUkwZ8HkoF3cloV9j+Tawy6jz8T0UFnJlhgeSAeQbc47mvpGUJ8nowSv1Vi6TIYCvX8RugZSOHVYS', 'port': 7767, 'similarity': 0}, {'ip': '22.180.237.178', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcqYf/ypZKNYka3jtPrfjr+OfQnhJe366bvglbczAGEEcqRJhuRlf7hi87emoXqXpTv3vBYmcgbTBIvY', 'port': 7771, 'similarity': 0}], 'collected torrents': ['\x19%\x1f\xf975\xd2\x8f\xc2\xbf\xda\x1b\x0b\r\x9d\xc0\xa9\xa7\x9b\xd6'], 'nfiles': 1, 'name': 'Keith'} 
+1188691800.5 SEND_MSG 6.192.190.56 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM53CMTzGC8XkXotNwq/6vGFGG/gauPStmTbY3tUAAKOCyNiU9x1tVePAkfdTMP2Quimqrz923eC9rIC 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2060, 'random peers': [{'ip': '202.143.164.142', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp', 'port': 7764, 'similarity': 0}, {'ip': '193.39.75.127', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '66.194.43.252', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '121.207.93.115', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '76.217.144.187', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '57.106.110.19', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '129.13.207.138', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARNZfv2M6KWAxEqfFcdG0bY1cV7mObZt5Rsx52QyAbkS25gWunh3rG5DqcECFvXbT9tSH3LHH7z6y2nB', 'port': 7762, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '48.120.3.210', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691802.8 CONN_DEL 25.247.169.73 7764 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKgm00seD977usBtMzTjsn0jeAJexqFnAzYImOejAZ2WMvOygsRYvi8gnUmtiPN4utntmADnhkHf9r+k overlayswarm:connection lost 
+1188691806.2 CONN_ADD 189.91.188.9 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIra+QGcX57yYgquysIUe5Ax9DZq9LvvwhCB42pqANu4NW/A328p2H/i6D7GJ9aB2Eo33GCybjYLXeY1 2 
+1188691806.8 RECV_MSG 14.180.225.155 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIra+QGcX57yYgquysIUe5Ax9DZq9LvvwhCB42pqANu4NW/A328p2H/i6D7GJ9aB2Eo33GCybjYLXeY1 2 PASSIVE_BC {'name': '0-kc6a5tg24inpp', 'taste buddies': [], 'random peers': [], 'preferences': []} 
+1188691806.9 SEND_MSG 161.112.160.145 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIra+QGcX57yYgquysIUe5Ax9DZq9LvvwhCB42pqANu4NW/A328p2H/i6D7GJ9aB2Eo33GCybjYLXeY1 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '56.126.191.224', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp'}, {'ip': '115.235.205.13', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '118.87.22.59', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/'}, {'ip': '126.237.165.251', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '81.101.36.228', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '96.178.34.226', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}], 'preferences': []} 
+1188691808.4 CONN_ADD 231.47.86.41 7765 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAavuK5q3Zh7ob7SsTDQo5vFCA/0HlDfoMjLQT8q3ABQVqa3PCFYO6G7OLvd2EriJKQiMtT25aA00Yj4+ 6 
+1188691808.5 RECV_MSG 118.66.125.151 7765 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAavuK5q3Zh7ob7SsTDQo5vFCA/0HlDfoMjLQT8q3ABQVqa3PCFYO6G7OLvd2EriJKQiMtT25aA00Yj4+ 6 PASSIVE_BC {'preferences': [], 'ndls': 0, 'taste buddies': [], 'connectable': 0, 'npeers': 24, 'random peers': [{'ip': '56.232.99.189', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAFzfWHb/WPL+luFfXfUbtJGRpUnwbmyB0kH7t3UpAVSpKilym4Fzt2rS7HJTZyQ7yCI3c+xTRtMLZ0sc', 'port': 7007, 'similarity': 0}, {'ip': '54.248.85.72', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGZomjLNDu6i/5c/ATpsatWiL0P7huV/ixgzdvwlAU8AEYHp7ppyumydUg2MnoneHJ74H58yB+pUPSdu', 'port': 7005, 'similarity': 0}, {'ip': '105.25.216.233', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfizxRUl7S3Fec2cl1+uML6tORwnUIod5mh3soWVANxmu+flFNp1yayiLPjB+dWQ6Va77FXbHDkw5smd', 'port': 7006, 'similarity': 0}, {'ip': '17.106.117.6', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEASXq28ZXiH3YtMG4d2ShG4E3FuNaJ+taG23j4jNVAPiSjBEfG+Kt0aqqzqYuaai7wSeSSU1Y3JYm4lWo', 'port': 7764, 'similarity': 0}, {'ip': '215.212.119.211', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOCdNvgsa7J6sRgq//Rvh3w9WDKfIjVs5xuxfa4GAFO+2HUkDELPnUA+xCz3GHqNsONSINtVHbUJQjb9', 'port': 7764, 'similarity': 0}], 'collected torrents': [], 'nfiles': 0, 'name': 'AdamsLaptop'} 
+1188691808.8 SEND_MSG 94.1.200.235 7765 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAavuK5q3Zh7ob7SsTDQo5vFCA/0HlDfoMjLQT8q3ABQVqa3PCFYO6G7OLvd2EriJKQiMtT25aA00Yj4+ 6 PASSIVE_BC {'preferences': [], 'name': u'superpeer1', 'taste buddies': [], 'ndls': 0, 'npeers': 2062, 'random peers': [{'ip': '223.77.89.47', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp', 'port': 7764, 'similarity': 0}, {'ip': '138.153.49.105', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '202.232.17.86', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6', 'ip': '160.28.82.44', 'similarity': 0, 'port': 7772, 'nfiles': 289, 'oversion': 6}, {'ip': '174.220.139.88', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz', 'port': 7773, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX', 'ip': '183.136.235.134', 'similarity': 0, 'port': 7771, 'nfiles': 195, 'oversion': 6}, {'ip': '105.8.241.220', 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAfyrYHFupXDpAi7sjiEDjzVR4Akjl6If5ggYJqBUAREvrEeBoBt/svdUdLaU9ca+RkQJJFvbGpg2sWM9', 'port': 7767, 'similarity': 0}, {'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/', 'ip': '199.102.118.140', 'similarity': 0, 'port': 7773, 'nfiles': 158, 'oversion': 6}], 'collected torrents': [], 'nfiles': 0, 'connectable': 1} 
+1188691809.3 CONN_ADD 117.176.63.228 7767 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL9dEV8lmBqo/mBySZt6aoLmlm7S6XJIwDmuBidMAa2adwf6NyjD1QZbtZkjCT4qjaIUJU6X7XOgD2ac 6 
+1188691810.1 CONN_ADD 17.138.248.248 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABW3E2A6uOX6xW/8xK+p2pYfC2Ip8mviF6Wd2zA8ANlRa73gQRhdNSwMMx7IXVtnKez4usm8S2kJy9GX 2 
+1188691810.2 RECV_MSG 157.105.51.248 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABW3E2A6uOX6xW/8xK+p2pYfC2Ip8mviF6Wd2zA8ANlRa73gQRhdNSwMMx7IXVtnKez4usm8S2kJy9GX 2 PASSIVE_BC {'name': 'D6Q165C1', 'taste buddies': [{'ip': '184.145.78.91', 'age': 8329, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAf/Qgr2M4qWSCeCpUmD7q1Ys6lsBvxO5ryLk4ubJACPUNZO9vYh1+ZyCI8H90TjqaHHifP/kLElUNdeA', 'port': 7762, 'preferences': ['ZZXFmBrHcFG/OUG91p5l7M+al6g=', 'ei4wVaQ0CADqmpVCWgxRUa0jJIA=', 'deUf7eJ2/Gz8SOCm2/fc80RMgQw=', 'sDiM/wmG9rWNxKRPpO8jKJyRWdc=', 'dPQo3cdekpj91PNxVJG4quw6XOc=', 'cKJzGSiyMNSHAIgCSRdvFwv4TtI=', '7n1TzDbAhreJQJEGVDEYgvK8a4Y=', 'MB39kMlya3sH4YOhs/BVZe/Qjts=', '2P3HGVYmKPWMDNvPi7yKjCC49dI=', 'QrXBz8ofjWoZNkZcH8QrZdHhlRM=']}], 'random peers': [{'ip': '157.105.88.43', 'age': 9343, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZb/wBziwkXO4ksN9GN2slrISYvk9rCLNCFLlcCqAFiZ7UOZn78gKpOU2Ckqyg/TOPub4bBXEPS5LZyW'}, {'ip': '214.127.12.252', 'age': 22203, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUv3xIal/Q2QuKrTiikX6ei3tUOPOlxigrUGIWApAbLfM3zbUFk2DCJBUMkIvVCFIR/LQWFfQFpoUacd'}, {'ip': '171.6.140.39', 'age': 1924, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEATHShk9GsvmesB9RHOE8Mo37jDMKBy6rgfhvFfLQAOzQyC2ACSveEN6aE8cCiEfURbNtSw3wp/mX5MTO'}, {'ip': '216.140.169.241', 'age': 2948, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABmcc1B5PwiDx3xdlpo1D1Zv/JHO5xCLUW0BibcmAE2uYqYVxe7B+hjeZd4O6Rgxpx8h+KlktJNkMutB'}, {'ip': '40.146.86.146', 'age': 2572, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAP2+hAj2Gxv8GAdWrUNrENeSbYjo946x5afQRTYKAZOdlmeYx/PwZzQNNrcPt6lcWnTm6QD1O5fWJxD4'}, {'ip': '23.21.139.102', 'age': 306, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHccREfVdCfGOSW8JQdPovLpisLEOZX/PC/HoUO4AGT4bfb7ihF2HjZI5h8o5wPTIYe43ovYu2FVS4fO'}, {'ip': '97.255.231.136', 'age': 1223, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAHyy29hE3vexOvplkJe70PZ5f0r5a+CXEtzzMgzZAOQjQw78NCldun/PpiNBbXJftRsFgH0bJLyio7HY'}, {'ip': '25.237.77.20', 'age': 4643, 'port': 59736, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAbE4oWI0rzijk25+BxyDXPmLtT8c1Zq5PPY2F3e+ACUVUlE6IdhCzz2gruieqV41Hzq+McpDGr6R0Wqi'}, {'ip': '128.110.187.46', 'age': 4556, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACu4NmJkJWmNVWx1yJYP8Vp+HMeS2xapk4QjrlbuAODgM/Fw/5SFrs+thvDCnMKX4VbH38JoD9yoKJES'}, {'ip': '250.127.10.50', 'age': 14383, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAOpYDWlnIBrUTTyK9PZE6XI4sR3tPL/CGfB7fRMaAYzmEyQAQE2QRbFvri21Twbdqfm3UOe3VCwqWkP7'}], 'preferences': ['mrq4dM2K8RB2rQW79QdEI0i0TmY=', 'YoODF89MwG5BYT9SRdY/P8eXPfA=', 'CmTN1NfencMYuvYM3vHKb3FeOxM=']} 
+1188691810.2 SEND_MSG 37.0.190.181 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEABW3E2A6uOX6xW/8xK+p2pYfC2Ip8mviF6Wd2zA8ANlRa73gQRhdNSwMMx7IXVtnKez4usm8S2kJy9GX 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '83.138.62.62', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp'}, {'ip': '128.241.96.21', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '138.6.126.120', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/'}, {'ip': '156.108.212.169', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '26.44.173.205', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '115.235.115.53', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}], 'preferences': []} 
+1188691811.4 CONN_ADD 248.112.153.140 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY3SsAfzp8l15Vn8GiA3rKH7Sn77g/nBKIcwYfAqAKw5z6RWPLeRt/K8ypfyqFaytEp9ftZ1u3510/Qm 2 
+1188691811.4 RECV_MSG 161.144.177.150 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY3SsAfzp8l15Vn8GiA3rKH7Sn77g/nBKIcwYfAqAKw5z6RWPLeRt/K8ypfyqFaytEp9ftZ1u3510/Qm 2 PASSIVE_BC {'name': 'Laptop', 'taste buddies': [{'ip': '234.3.1.189', 'age': 6611, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJKtYoa/FT8lndA9lBnMGrhtDavih6lhukK6EUh7AG2i2OjAOutGkQJneRKGsXubC8eIU8GXFNOnhip1', 'port': 6881, 'preferences': ['re+wfQbzaSaI8cRIanHg60q4htk=', 'futmHX6GoSOqLt8Eo76D2fwPk4o=', 'pBW1lPqidXb6TUtyXlc7LaYcAbc=', '5Mo0PtPoC1lUKb9+QEwmp5UoKYg=', 'OEJP+P1f/q5izk8slkEXY/38XjE=', 'ikBGWwU1dx0ls/cqPjsBcPKH7bs=', 'DXZ2QU8l6Ce3Scwsvsx+CczV8Vk=', '2lIg8iNYeQV5FidM4DyL7jWwjtU=', 'AJnC6NroRk8W9mavja0tb866Z14=', 'bm89PHiJ8IRAXRFQMhkXirs+P2Q=']}, {'ip': '74.1.134.11', 'age': 26247, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKVTVshD/12DDh1Rg7LmWndAigX5k/daB0nHfPpWAM9X6EH/2fbvAmKwboaR818UfbEDqdBbIWDT0FI6', 'port': 6881, 'preferences': ['ZKGVf7w+ZmFhxqCvKossyfI1M/M=', 'nJR6u0KcjBD2JO5gyeT+4swRCsM=', 'RtNxtc8/7LA2DtbGxnKbzlYm3O8=', 'zSFbisJT969nWcdQsfG26oFkHOw=', 'QgHiNstWLbSwSBGMx1Xqf6Volb0=', 'TZxJw/cGWf5Kf9URHWamTrH+Z28=', '8NgES7kBz672pR6CWnaFTam0Quc=', 'cti72zdHCBgtqe6HGdeBjt1Lrss=', 'MWgSgU7UY97ImPde5exO+O2sKHM=', 'YJB60HEYkd5WboaTFDqUeNdVSUk=']}], 'random peers': [{'ip': '29.199.182.96', 'age': 3660, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMSnxjzPP8P+kQmAFSTRMfcob/nOs46HpQFNb6SyAESlfiyV3FihBVv3iblzfLI9iiYrE4obu0z64/jC'}, {'ip': '129.248.213.151', 'age': 18993, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACNgzOEWIDkn/v89ms9yM5kDa+knWVCFi0bm6MBvATqokeBiZZStFSqKj8PrIr/dMo4DpmikNSk72S63'}, {'ip': '121.143.46.165', 'age': 8426, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAF0QcMiDbp5FowXdOe455dsSrx5tMrm0Y3aB892mAZBr+LpXMKDpw8xkVSodQbelUPaebRNpfhsKIQBF'}, {'ip': '91.231.176.43', 'age': 903, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAeYVUXqqifYZ5yLcDzLM4nJYAYMQgjz1ij3qTM/cAVMSJ903rexs0+Oa0c3CR4D4OROP2cBTnpHWFvAj'}, {'ip': '81.223.122.162', 'age': 2275, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAK3vuT3wd0cXJ4etwQsZceMVlqxwcDEan/Z7Hf/ZAcMDz5ZD4NCQWjHNT/3w2IN47lqaZmRq4f1nHffi'}, {'ip': '76.70.174.194', 'age': 26247, 'port': 7762, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAR97KVo3Ys92fxqe9o4j59ld498+Pt3MxOq2UGGWABb5w+U6VWx9LlHUoU/5H3sZls9g2cPLmAXnHHtp'}, {'ip': '169.2.32.228', 'age': 2724, 'port': 7766, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEARme3zHMzrKL8lykjKFpTZCFDvYUVy8D2R4GzadEAKcMvQYRS1c3965JH7PT6hQ4N5HFeDS+AjjLai5C'}, {'ip': '224.254.144.232', 'age': 14769, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAPKYnH2qxePPuWpHJ+nj4naJO5/5bLsN/ieKVxzUAQXYvzxE56hcRsJOTkiNjG+ZbyVUEUYPkTNceZKN'}, {'ip': '140.57.29.222', 'age': 2399, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJqHKDOXFKbSOR3QBHN1T2dkt/T6ZmwHUpMu0etYAEpnDWQi7lQybP8h5T1Yf1QwJUi8t3y4nIzBhDpf'}, {'ip': '42.246.219.187', 'age': 26247, 'port': 6881, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAMk6nzqbozSH4W/rVUUErBm02X86tIw9BRWUDqd+AYd+lAIJcLzcbaCO6kiewXKJ2u4RjWS7Y3Namjwn'}], 'preferences': ['Nt0vaVOINis5rbOzOOvD35l+BZU=', 'pBB0xNj1SOEL6rWT7KFCUd93QoM=', '2P9tGuIz0yBSPVZm4M4gpHeQQ/0=', 'OhriYUKPtmasC5EfjP+WRIzcZUc=', 'bGPQbrsEjJ45VtojJvGlkaEQwcc=', 'b3uItk1UdHJRpGn7rqLQzvw7pJ8=', 'onY9k8LCjx3fgI5FRqxic1rwn6g=', 'iMi1XMrUy12iZ7SfEXgMJp9Oq3I=', '/3AfeG9n3vfJ7gLGwkK+/XagU7s=', '/yS+S5NOwHvAzaKV7mMjKy/lMR4=', 'Uc4u1u+EbDvDMMQxhL8tf9ZPBak=', 'LV+kqqipcJ7LhHSUSiJyK1Pkqv4=', 'p3qwbFFa3oAafaLF2PTnRvwIJ1Q=', 'mruV8SXFfIoEaGpeTBAu1Ttd29I=', 'Iq+/Kv49QXgPutrDu6ssnA0hLZ0=']} 
+1188691811.7 SEND_MSG 96.1.54.161 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAY3SsAfzp8l15Vn8GiA3rKH7Sn77g/nBKIcwYfAqAKw5z6RWPLeRt/K8ypfyqFaytEp9ftZ1u3510/Qm 2 PASSIVE_BC {'name': u'superpeer1', 'taste buddies': [], 'random peers': [{'ip': '191.228.48.103', 'age': 0, 'port': 7764, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAGpolLsbKVrI8e6NjtCVr5Uf5aMHuIKjECIY+JSbAYMURDpttWkZDsgw5vRtBXvqGi67TFBjsPFaLFJp'}, {'ip': '70.92.117.100', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXxRWrDeE2ufFsHegkznPrGqNLkEHPc+bpzmH8utAQ8xCL1EE62rpTf0GTQc4gMC1Rfn70ry71bnUfOi'}, {'ip': '139.5.131.128', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAabTwtiVOVtjrgovRxlcFGQKC+JuOar4FOocEgvKAUcuzDPciJ6DHANvN4RW0PuJxY6Ljx89SxXg1ni/'}, {'ip': '124.165.144.137', 'age': 0, 'port': 7772, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYLyb5SB5UcFHJj5cTSb7Jhs2LlHFSh/Ct677t6xAXVS8RegAlga+Vurg/hIgPpBx5N57X7am5mk8Qw6'}, {'ip': '150.253.217.174', 'age': 0, 'port': 7773, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAZ3f03sJuwR81a1Mh93VuhbSRGqRUsKXpLWlZGADAfKxcoZVcivGMfLv0N0R0rvFGBy49FkVlaDmqQSz'}, {'ip': '223.58.57.92', 'age': 0, 'port': 7771, 'permid': 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAYf3brbq7kIBW3Xjyg1M3hiMukkx86JLaR94P+agAeREgn/0891ejS5OMZ+IfBPDNl+9auP0372aC7vX'}], 'preferences': []} 
+1188691813.0 BUCA_STA 9512  2075 2075 100  1586 12129  48 48  0 6 10 
+1188691813.0 CONN_DEL 41.182.8.181 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAXGP2UgzBPPwVHFQN7yk9Zi6Ugzhvh3VMKcOhzuIAOd8ji28YnAE6jJ/9jBa4pzSkQZEbcv5BBNGq9Pv overlayswarm:connection lost 
+1188691813.0 CONN_DEL 187.109.236.102 6881 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAKCPs99ohZOqkrGFaQl3OO9wus+QWa3EsFQvY30VACOufAewrc8zo1804JV13SpeXJqBMviVJY75fhxP overlayswarm:connection lost 
+1188691815.9 CONN_DEL 5.63.24.64 7768 MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAM53CMTzGC8XkXotNwq/6vGFGG/gauPStmTbY3tUAAKOCyNiU9x1tVePAkfdTMP2Quimqrz923eC9rIC overlayswarm:connection lost 
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/extend_hs_dir/dummydata.merkle.torrent b/instrumentation/next-share/BaseLib/Test/extend_hs_dir/dummydata.merkle.torrent
new file mode 100644 (file)
index 0000000..95a15d8
--- /dev/null
@@ -0,0 +1 @@
+d8:announce30:http://localhost:4901/announce13:creation datei1172047413e8:encoding5:UTF-84:infod6:lengthi2458880e4:name9:dummydata10:name.utf-89:dummydata12:piece lengthi262144e9:root hash20:\96Zqµ{¨Äé(6ö°Ï´ªÝ£\88ä-ee
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/extend_hs_dir/proxyservice.test.torrent b/instrumentation/next-share/BaseLib/Test/extend_hs_dir/proxyservice.test.torrent
new file mode 100644 (file)
index 0000000..3f732b6
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/extend_hs_dir/proxyservice.test.torrent differ
diff --git a/instrumentation/next-share/BaseLib/Test/log_parser.py b/instrumentation/next-share/BaseLib/Test/log_parser.py
new file mode 100644 (file)
index 0000000..d803f0f
--- /dev/null
@@ -0,0 +1,145 @@
+import sys
+import os
+
+def parse_log_line(line, line_num):
+    terms = line.split()
+    try:
+        timestamp = float(terms[0])
+        key = terms[1]
+        data = {'time':timestamp}
+    except KeyboardInterrupt:
+        raise KeyboardInterrupt
+    except:
+        return None
+    
+    if key == 'BUCA_STA':
+        data['nRound'] = int(terms[2])
+        data['nPeer'] = int(terms[3])
+        data['nPref'] = int(terms[4]) 
+        data['nConnCandidates'] = int(terms[5])
+        data['nBlockSendList'] = int(terms[6])
+        data['nBlockRecvList'] = int(terms[7])
+        data['nConnectionsInSecureOver'] = int(terms[8])
+        data['nConnectionsInBuddyCast'] = int(terms[9])
+        data['nTasteConnectionList'] = int(terms[10])
+        data['nRandomConnectionList'] = int(terms[11])
+        data['nUnconnectableConnectionList'] = int(terms[12])
+
+    elif key == 'CONN_TRY' or key == 'CONN_ADD' or key == 'CONN_DEL':
+        try:
+            data['ip'] = terms[2]
+            data['port'] = int(terms[3])
+            data['permid'] = terms[4]
+            if key == 'CONN_ADD' or key == 'RECV_MSG' or key == 'SEND_MSG':
+                data['oversion'] = terms[5]
+        except KeyboardInterrupt:
+            raise KeyboardInterrupt
+        except Exception, msg:
+            print >> sys.stderr, "Error in parse the log on line %d:"%line_num, Exception, msg
+            return None
+            
+    elif key == 'RECV_MSG' or key == 'SEND_MSG':
+        try:
+            data['ip'] = terms[2]
+            data['port'] = int(terms[3])
+            data['permid'] = terms[4]
+            data['oversion'] = terms[5]
+            data['MSG_ID'] = terms[6]
+            msg = ' '.join(terms[7:])
+            data['msg'] = eval(msg)
+            if key == 'RECV_MSG':
+                data['msg']['permid'] = data['permid']
+                data['msg']['ip']  = data['ip']
+                data['msg']['port'] = data['port'] 
+        except KeyboardInterrupt:
+            raise KeyboardInterrupt
+        except Exception, msg:
+            print >> sys.stderr, "Error in eval the msg on line %d:"%line_num, Exception, msg
+            return None
+    
+    elif key == 'RECV_QRY':
+        try:
+            data['permid'] = terms[2]
+            data['oversion'] = int(terms[3])
+            data['nqueries'] = int(terms[4])
+            msg = ' '.join(terms[5:])
+            data['msg'] = eval(msg)
+            data['query'] = data['msg']['q'][7:]
+        except KeyboardInterrupt:
+            raise KeyboardInterrupt
+        except Exception, msg:
+            print >> sys.stderr, "Error in eval the msg on line %d:"%line_num, Exception, msg
+            return None
+        
+    elif key == 'RPLY_QRY':
+        pass
+    
+    if len(data) == 1:
+        return None
+    
+    return key, data
+
+def parse_log_file(file_path):
+    file = open(file_path)
+    i = 0
+    for line in file:
+        i += 1
+        line = line.strip()
+        if not line.startswith('#'):
+            yield parse_log_line(line, i)
+    file.close()
+    
+def parse_log_file_readall(file_path, N_BREAK):
+    file = open(file_path)
+    i = 0
+    alldata = []
+    for line in file:
+        if N_BREAK > 0 and i >= N_BREAK:
+            break
+        i += 1
+        line = line.strip()
+        if not line.startswith('#'):
+            res = parse_log_line(line, i)
+            alldata.append(res)
+    return alldata
+
+def yield_files2load(file_paths):
+    for file_path in file_paths:
+        if os.path.isdir(file_path):
+            files_in_dir = os.listdir(file_path)
+            files2load = [os.path.join(file_path, afile) for afile in files_in_dir]
+        else:
+            files2load = [file_path]
+        for afile in files2load:
+            if afile.endswith('.log'):
+                yield afile
+
+
+def get_buddycast_data(file_path):
+    file = open(file_path)
+    i = 0
+    for line in file:
+        i += 1
+        line = line.strip()
+        if not line.startswith('#'):
+            ret = parse_log_line(line, i)
+            if ret is not None:
+                key, data = ret
+                if key == 'RECV_MSG':
+                    yield data['permid'], int(data['oversion']), data['msg']
+    file.close()
+
+        
+
+if __name__ == '__main__':
+    if len(sys.argv) < 2:
+        print >> sys.stderr, "Must specify the path or directory of logs"
+        sys.exit(1)
+    
+    i = 1
+    for file_path in yield_files2load(sys.argv[1:]):
+        print >> sys.stderr, "load", i, file_path
+        i += 1
+        #for ret in parse_log_file(file_path):
+        for ret in get_buddycast_data(file_path):
+            print "GOT",`ret`
diff --git a/instrumentation/next-share/BaseLib/Test/olconn.py b/instrumentation/next-share/BaseLib/Test/olconn.py
new file mode 100644 (file)
index 0000000..3a5ffe9
--- /dev/null
@@ -0,0 +1,107 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+
+from BaseLib.Test.test_permid import TestPermIDs
+from btconn import BTConnection
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.Utilities.Crypto import sha
+
+DEBUG=False
+
+cr_random_size = 1024
+
+class OLConnection:
+
+    def __init__(self,my_keypair,hostname,port,opensock=None,mylistenport=481,myoversion=None):
+        """ If opensock is not None, we assume this is a connection we
+            accepted, and he initiates the Challenge/Response
+        """
+
+        self.my_keypair = my_keypair
+        self.b = BTConnection(hostname,port,opensock,mylistenport=mylistenport,myoversion=myoversion)
+        if opensock:
+            self.b.read_handshake_medium_rare()
+            # Read challenge
+            msg = self.b.recv()
+            assert(msg[0] == CHALLENGE)
+            randomB = bdecode(msg[1:])
+            [randomA,resp1_data] = self.create_good_response1(randomB,self.b.get_his_id())
+            self.b.send(resp1_data)
+            # Read response2
+            msg = self.b.recv()
+            assert(msg[0] == RESPONSE2)
+        else:
+            self.b.read_handshake()
+            [rB,chal_data] = self.create_good_challenge()
+            self.b.send(chal_data)
+            resp1_data = self.b.recv()
+            if DEBUG:
+                print >>sys.stderr,"olconn: recv",len(resp1_data),"bytes"
+            resp1_dict = bdecode(resp1_data[1:])
+            resp2_data = self.create_good_response2(rB,resp1_dict,self.b.get_his_id())
+            self.b.send(resp2_data)
+            if DEBUG:
+                print >>sys.stderr,"olconn: sent",len(resp2_data),"bytes"
+
+    def get_my_fake_listen_port(self):
+        return self.b.get_my_fake_listen_port()
+
+    #
+    # Cut 'n paste from TestPermIDs 
+    #
+    def create_good_challenge(self):
+        r = "".zfill(cr_random_size)
+        return [r,self.create_challenge_payload(r)]
+
+    def create_good_response2(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = [rB,resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_challenge_payload(self,r):
+        return CHALLENGE+bencode(r)
+
+    def create_response2_payload(self,dict):
+        return RESPONSE2+bencode(dict)
+
+
+    #
+    # Cut 'n paste from TestPermIDResponse1
+    #
+    def create_good_response1(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(cr_random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_response1_payload(self,dict):
+        return RESPONSE1+bencode(dict)
+
+
+
+    def send(self,data):
+        """ send length-prefixed message """
+        self.b.send(data)
+
+    def recv(self):
+        """ received length-prefixed message """
+        return self.b.recv()
+
+    def close(self):
+        self.b.close()
+        
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake.srt b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake.srt
new file mode 100755 (executable)
index 0000000..20c2450
--- /dev/null
@@ -0,0 +1,6 @@
+this is a fake srt
+
+only needed for testing
+
+cheers :)
+
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake0.srt b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake0.srt
new file mode 100755 (executable)
index 0000000..20c2450
--- /dev/null
@@ -0,0 +1,6 @@
+this is a fake srt
+
+only needed for testing
+
+cheers :)
+
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake1.srt b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake1.srt
new file mode 100755 (executable)
index 0000000..15e53d6
--- /dev/null
@@ -0,0 +1,11 @@
+this is a fake srt
+
+only needed for testing
+
+cheers :)
+
+asfasgb sagas gba
+
+asfasfas 24214 a 
+The checksum is different yuppy!
+
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake2.srt b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/fake2.srt
new file mode 100755 (executable)
index 0000000..b44bcd9
--- /dev/null
@@ -0,0 +1,12 @@
+this is a fake srt
+
+only needed for testing
+
+cheers :)
+
+dgsdjkbn gsdf h43 y35
+
+43uhy35 h0-jkh 54h
+safsdgsd 22211
+
+yet another checksum! -yay!
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/schema_sdb_v5.sql b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/schema_sdb_v5.sql
new file mode 100644 (file)
index 0000000..3eb0b83
--- /dev/null
@@ -0,0 +1,447 @@
+-- Tribler SQLite Database
+-- Version: 5
+--
+-- History:
+--   v1: Published as part of Tribler 4.5
+--   v2: Published as part of Tribler 5.0
+--   v3: Published as part of Next-Share M16
+--   v4: Published as part of Tribler 5.2
+--   v5: Introduced to test subtitles integration
+
+-- 
+-- See Tribler/Core/CacheDB/sqlitecachedb.py updateDB() for exact version diffs.
+--
+-- v4: ChannelCast is an extension of the concept of ModerationCast, with an additional integrity measure.
+--     'torrenthash' field is used to protect the integrity of the torrent file created by the publisher,
+--     from fake-tracker attack, by including sha1 hash of the dictionary corresponding to the entire torrent.
+--
+--     'InvertedIndex' table is used for precise keyword matching than substring search that was used previously.
+
+BEGIN TRANSACTION create_table;
+
+----------------------------------------
+
+CREATE TABLE BarterCast (
+  peer_id_from  integer,
+  peer_id_to    integer,
+  downloaded    numeric,
+  uploaded      numeric,
+  last_seen     numeric,
+  value         numeric
+);
+
+CREATE UNIQUE INDEX bartercast_idx
+  ON BarterCast
+  (peer_id_from, peer_id_to);
+
+----------------------------------------
+
+CREATE TABLE Category (
+  category_id    integer PRIMARY KEY NOT NULL,
+  name           text NOT NULL,
+  description    text
+);
+
+----------------------------------------
+
+CREATE TABLE MyInfo (
+  entry  PRIMARY KEY,
+  value  text
+);
+
+----------------------------------------
+
+CREATE TABLE MyPreference (
+  torrent_id     integer PRIMARY KEY NOT NULL,
+  destination_path text NOT NULL,
+  progress       numeric,
+  creation_time  integer NOT NULL,
+  -- V2: Patch for BuddyCast 4
+  click_position INTEGER DEFAULT -1,
+  reranking_strategy INTEGER DEFAULT -1
+);
+
+----------------------------------------
+
+CREATE TABLE Peer (
+  peer_id              integer PRIMARY KEY AUTOINCREMENT NOT NULL,
+  permid               text NOT NULL,
+  name                 text,
+  ip                   text,
+  port                 integer,
+  thumbnail            text,
+  oversion             integer,
+  similarity           numeric DEFAULT 0,
+  friend               integer DEFAULT 0,
+  superpeer            integer DEFAULT 0,
+  last_seen            numeric DEFAULT 0,
+  last_connected       numeric,
+  last_buddycast       numeric,
+  connected_times      integer DEFAULT 0,
+  buddycast_times      integer DEFAULT 0,
+  num_peers            integer,
+  num_torrents         integer,
+  num_prefs            integer,
+  num_queries          integer,
+  -- V3: Addition for local peer discovery
+  is_local            integer DEFAULT 0
+);
+
+CREATE UNIQUE INDEX permid_idx
+  ON Peer
+  (permid);
+
+CREATE INDEX Peer_name_idx
+  ON Peer
+  (name);
+
+CREATE INDEX Peer_ip_idx
+  ON Peer
+  (ip);
+
+CREATE INDEX Peer_similarity_idx
+  ON Peer
+  (similarity);
+
+CREATE INDEX Peer_last_seen_idx
+  ON Peer
+  (last_seen);
+
+CREATE INDEX Peer_last_connected_idx
+  ON Peer
+  (last_connected);
+
+CREATE INDEX Peer_num_peers_idx
+  ON Peer
+  (num_peers);
+
+CREATE INDEX Peer_num_torrents_idx
+  ON Peer
+  (num_torrents);
+
+----------------------------------------
+
+CREATE TABLE Preference (
+  peer_id     integer NOT NULL,
+  torrent_id  integer NOT NULL,
+  -- V2: Patch for BuddyCast 4
+  click_position INTEGER DEFAULT -1,
+  reranking_strategy INTEGER DEFAULT -1
+);
+
+CREATE INDEX Preference_peer_id_idx
+  ON Preference
+  (peer_id);
+
+CREATE INDEX Preference_torrent_id_idx
+  ON Preference
+  (torrent_id);
+
+CREATE UNIQUE INDEX pref_idx
+  ON Preference
+  (peer_id, torrent_id);
+
+----------------------------------------
+
+CREATE TABLE Torrent (
+  torrent_id       integer PRIMARY KEY AUTOINCREMENT NOT NULL,
+  infohash                text NOT NULL,
+  name             text,
+  torrent_file_name text,
+  length           integer,
+  creation_date    integer,
+  num_files        integer,
+  thumbnail        integer,
+  insert_time      numeric,
+  secret           integer,
+  relevance        numeric DEFAULT 0,
+  source_id        integer,
+  category_id      integer,
+  status_id        integer,
+  num_seeders      integer,
+  num_leechers     integer,
+  comment          text
+);
+
+CREATE UNIQUE INDEX infohash_idx
+  ON Torrent
+  (infohash);
+
+CREATE INDEX Torrent_length_idx
+  ON Torrent
+  (length);
+
+CREATE INDEX Torrent_creation_date_idx
+  ON Torrent
+  (creation_date);
+
+CREATE INDEX Torrent_relevance_idx
+  ON Torrent
+  (relevance);
+
+CREATE INDEX Torrent_num_seeders_idx
+  ON Torrent
+  (num_seeders);
+
+CREATE INDEX Torrent_num_leechers_idx
+  ON Torrent
+  (num_leechers);
+
+CREATE INDEX Torrent_name_idx 
+  ON Torrent
+  (name);
+
+----------------------------------------
+
+CREATE TABLE TorrentSource (
+  source_id    integer PRIMARY KEY NOT NULL,
+  name         text NOT NULL,
+  description  text
+);
+
+CREATE UNIQUE INDEX torrent_source_idx
+  ON TorrentSource
+  (name);
+
+----------------------------------------
+
+CREATE TABLE TorrentStatus (
+  status_id    integer PRIMARY KEY NOT NULL,
+  name         text NOT NULL,
+  description  text
+);
+
+----------------------------------------
+
+CREATE TABLE TorrentTracker (
+  torrent_id   integer NOT NULL,
+  tracker      text NOT NULL,
+  announce_tier    integer,
+  ignored_times    integer,
+  retried_times    integer,
+  last_check       numeric
+);
+
+CREATE UNIQUE INDEX torrent_tracker_idx
+  ON TorrentTracker
+  (torrent_id, tracker);
+  
+----------------------------------------
+
+CREATE VIEW SuperPeer AS SELECT * FROM Peer WHERE superpeer=1;
+
+CREATE VIEW Friend AS SELECT * FROM Peer WHERE friend=1;
+
+CREATE VIEW CollectedTorrent AS SELECT * FROM Torrent WHERE torrent_file_name IS NOT NULL;
+
+
+-- V2: Patch for VoteCast
+            
+CREATE TABLE VoteCast (
+mod_id text,
+voter_id text,
+vote integer,
+time_stamp integer
+);
+
+CREATE INDEX mod_id_idx
+on VoteCast 
+(mod_id);
+
+CREATE INDEX voter_id_idx
+on VoteCast 
+(voter_id);
+
+CREATE UNIQUE INDEX votecast_idx
+ON VoteCast
+(mod_id, voter_id);
+
+
+-- V2: Patch for BuddyCast 4
+
+CREATE TABLE ClicklogSearch (
+                     peer_id INTEGER DEFAULT 0,
+                     torrent_id INTEGER DEFAULT 0,
+                     term_id INTEGER DEFAULT 0,
+                     term_order INTEGER DEFAULT 0
+                     );
+CREATE INDEX idx_search_term ON ClicklogSearch (term_id);
+CREATE INDEX idx_search_torrent ON ClicklogSearch (torrent_id);
+
+CREATE TABLE ClicklogTerm (
+                    term_id INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT 0,
+                    term VARCHAR(255) NOT NULL,
+                    times_seen INTEGER DEFAULT 0 NOT NULL
+                    );
+CREATE INDEX idx_terms_term ON ClicklogTerm(term);  
+
+
+
+
+
+--v4: Path for BuddyCast 5. Adding Popularity table
+
+CREATE TABLE Popularity (
+                         torrent_id INTEGER,
+                         peer_id INTEGER,
+                         msg_receive_time NUMERIC,
+                         size_calc_age NUMERIC,
+                         num_seeders INTEGER DEFAULT 0,
+                         num_leechers INTEGER DEFAULT 0,
+                         num_of_sources INTEGER DEFAULT 0
+                     );
+
+CREATE INDEX Message_receive_time_idx 
+  ON Popularity 
+   (msg_receive_time);
+
+CREATE INDEX Size_calc_age_idx 
+  ON Popularity 
+   (size_calc_age);
+
+CREATE INDEX Number_of_seeders_idx 
+  ON Popularity 
+   (num_seeders);
+
+CREATE INDEX Number_of_leechers_idx 
+  ON Popularity 
+   (num_leechers);
+
+CREATE UNIQUE INDEX Popularity_idx
+  ON Popularity
+   (torrent_id, peer_id, msg_receive_time);
+
+
+
+-- v4: Patch for ChannelCast, Search
+
+CREATE TABLE ChannelCast (
+publisher_id text,
+publisher_name text,
+infohash text,
+torrenthash text,
+torrentname text,
+time_stamp integer,
+signature text
+);
+
+CREATE INDEX pub_id_idx
+on ChannelCast
+(publisher_id);
+
+CREATE INDEX pub_name_idx
+on ChannelCast
+(publisher_name);
+
+CREATE INDEX infohash_ch_idx
+on ChannelCast
+(infohash);
+
+----------------------------------------
+
+CREATE TABLE InvertedIndex (
+word               text NOT NULL,
+torrent_id         integer
+);
+
+CREATE INDEX word_idx
+on InvertedIndex
+(word);
+
+CREATE UNIQUE INDEX invertedindex_idx
+on InvertedIndex
+(word,torrent_id);
+--------------------------------------
+
+-- v5 Subtitles DB
+CREATE TABLE Metadata (
+  metadata_id integer PRIMARY KEY ASC AUTOINCREMENT NOT NULL,
+  publisher_id text NOT NULL,
+  infohash text NOT NULL,
+  description text,
+  timestamp integer NOT NULL,
+  signature text NOT NULL,
+  UNIQUE (publisher_id, infohash),
+  FOREIGN KEY (publisher_id, infohash) 
+    REFERENCES ChannelCast(publisher_id, infohash) 
+    ON DELETE CASCADE -- the fk constraint is not enforced by sqlite
+);
+
+CREATE INDEX infohash_md_idx
+on Metadata(infohash);
+
+CREATE INDEX pub_md_idx
+on Metadata(publisher_id);
+
+
+CREATE TABLE Subtitles (
+  metadata_id_fk integer,
+  subtitle_lang text NOT NULL,
+  subtitle_location text,
+  checksum text NOT NULL,
+  UNIQUE (metadata_id_fk,subtitle_lang),
+  FOREIGN KEY (metadata_id_fk) 
+    REFERENCES Metadata(metadata_id) 
+    ON DELETE CASCADE, -- the fk constraint is not enforced by sqlite
+  
+  -- ISO639-2 uses 3 characters for lang codes
+  CONSTRAINT lang_code_length 
+    CHECK ( length(subtitle_lang) == 3 ) 
+);
+
+
+CREATE INDEX metadata_sub_idx
+on Subtitles(metadata_id_fk);
+
+-- Stores the subtitles that peers have as an integer bitmask
+ CREATE TABLE SubtitlesHave (
+    metadata_id_fk integer,
+    peer_id text NOT NULL,
+    have_mask integer NOT NULL,
+    received_ts integer NOT NULL, --timestamp indicating when the mask was received
+    UNIQUE (metadata_id_fk, peer_id),
+    FOREIGN KEY (metadata_id_fk)
+      REFERENCES Metadata(metadata_id)
+      ON DELETE CASCADE, -- the fk constraint is not enforced by sqlite
+
+    -- 32 bit unsigned integer
+    CONSTRAINT have_mask_length
+      CHECK (have_mask >= 0 AND have_mask < 4294967296)
+);
+
+CREATE INDEX subtitles_have_idx
+on SubtitlesHave(metadata_id_fk);
+
+-- this index can boost queries
+-- ordered by timestamp on the SubtitlesHave DB
+CREATE INDEX subtitles_have_ts
+on SubtitlesHave(received_ts);
+
+-------------------------------------
+
+COMMIT TRANSACTION create_table;
+
+----------------------------------------
+
+BEGIN TRANSACTION init_values;
+
+INSERT INTO Category VALUES (1, 'Video', 'Video Files');
+INSERT INTO Category VALUES (2, 'VideoClips', 'Video Clips');
+INSERT INTO Category VALUES (3, 'Audio', 'Audio');
+INSERT INTO Category VALUES (4, 'Compressed', 'Compressed');
+INSERT INTO Category VALUES (5, 'Document', 'Documents');
+INSERT INTO Category VALUES (6, 'Picture', 'Pictures');
+INSERT INTO Category VALUES (7, 'xxx', 'XXX');
+INSERT INTO Category VALUES (8, 'other', 'Other');
+
+INSERT INTO TorrentStatus VALUES (0, 'unknown', NULL);
+INSERT INTO TorrentStatus VALUES (1, 'good', NULL);
+INSERT INTO TorrentStatus VALUES (2, 'dead', NULL);
+
+INSERT INTO TorrentSource VALUES (0, '', 'Unknown');
+INSERT INTO TorrentSource VALUES (1, 'BC', 'Received from other user');
+
+INSERT INTO MyInfo VALUES ('version', 5);
+
+COMMIT TRANSACTION init_values;
+
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/subs_languages.csv b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/subs_languages.csv
new file mode 100644 (file)
index 0000000..2261bc6
--- /dev/null
@@ -0,0 +1,32 @@
+ara,Arabic 
+ben,Bengali 
+ces,Czech
+dan,Danish
+deu,German
+ell,Greek
+eng,English
+fas,Persian
+fin,Finnish
+fra,French
+hin,Hindi 
+hrv,Croatian
+hun,Hungarian
+ita,Italian
+jav,Javanese
+jpn,Japanese
+kor,Korean
+lit,Latvia 
+msa,Malay
+nld,Dutch
+pan,Panjabi
+pol,Polish
+por,Portuguese
+ron,Romanian
+rus,Russian
+spa,Spanish
+srp,Serbian
+swe,Swedish
+tur,Turkish
+ukr,Ukranian
+vie,Vietnamese
+zho,Chinese
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/wrong_subs_languages.1.csv b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/wrong_subs_languages.1.csv
new file mode 100644 (file)
index 0000000..ad2f31b
--- /dev/null
@@ -0,0 +1,32 @@
+ara,Arabic 
+ben,Bengali 
+ces,Czech
+dan,Danish
+deu,German
+ell,Greek
+eng,English
+fas,Persian
+fin,Finnish
+fra,French
+hin,Hindi 
+hrv,Croatian
+hun,Hungarian
+ita,Italian
+jav,Javanese
+jpn,Japanese
+kor,Korean
+lit,Latvia 
+msa,Malay
+nld,Dutch
+pan,Panjabi
+pol,Polish
+por,Portuguese
+ron
+rus,Russian
+spa,Spanish
+srp,Serbian
+swe,Swedish
+tur,Turkish
+ukr,Ukranian
+vie,Vietnamese
+zho,Chinese
diff --git a/instrumentation/next-share/BaseLib/Test/subtitles_test_res/wrong_subs_languages.2.csv b/instrumentation/next-share/BaseLib/Test/subtitles_test_res/wrong_subs_languages.2.csv
new file mode 100644 (file)
index 0000000..05681af
--- /dev/null
@@ -0,0 +1,32 @@
+ara,Arabic 
+ben,Bengali 
+ces,Czech
+dan,Danish
+deu,German
+ell,Greek
+eng,English
+fas,Persian
+fin,Finnish
+fra,French
+hin,Hindi 
+hrv,Croatian
+hun,Hungarian
+ita,Italian
+jav,Javanese
+jpno,Japanese
+kor,Korean
+lit,Latvia 
+msa,Malay
+nld,Dutch
+pan,Panjabi
+pol,Polish
+por,Portuguese
+ron,Romanian
+rus,Russian
+spa,Spanish
+srp,Serbian
+swe,Swedish
+tur,Turkish
+ukr,Ukranian
+vie,Vietnamese
+zho,Chinese
diff --git a/instrumentation/next-share/BaseLib/Test/test.bat b/instrumentation/next-share/BaseLib/Test/test.bat
new file mode 100644 (file)
index 0000000..2ef57d2
--- /dev/null
@@ -0,0 +1,80 @@
+\r
+set PYTHONPATH=..\..\r
+\r
+REM python test_bsddb2sqlite.py # Arno, 2008-11-26: Alea jacta est.\r
+REM python test_buddycast.py # currently not working due to missing DataHandler functions, 2008-10-17\r
+REM python test_friend.py # Arno, 2008-10-17: need to convert to new DB structure\r
+REM python test_torrentcollecting.py # currently not working due to missing functions, 2009-12-04\r
+python test_TimedTaskQueue.py\r
+python test_bartercast.py\r
+python test_buddycast2_datahandler.py\r
+python test_cachingstream.py\r
+python test_closedswarm.py\r
+python test_connect_overlay.py singtest_connect_overlay\r
+python test_crawler.py\r
+python test_dialback_request.py\r
+python test_extend_hs.py\r
+python test_friendship_crawler.py\r
+python test_g2g.py\r
+python test_gui_server.py\r
+python test_merkle.py\r
+python test_multicast.py\r
+python test_osutils.py\r
+python test_permid.py\r
+python test_permid_response1.py\r
+python test_remote_query.py\r
+python test_seeding_stats.py\r
+python test_social_overlap.py\r
+python test_sqlitecachedb.py\r
+python test_status.py\r
+python test_superpeers.py \r
+python test_url.py\r
+python test_url_metadata.py\r
+python test_ut_pex.py\r
+python test_video_server.py\r
+python test_threadpool.py\r
+python test_miscutils.py\r
+\r
+CALL test_buddycast_msg.bat \r
+CALL test_dialback_conn_handler.bat\r
+CALL test_dialback_reply_active.bat\r
+REM # CALL test_dlhelp.bat       Arno, Disabled replaced with ProxyService\r
+REM # See warning in test_friendship.py\r
+CALL test_friendship.bat        \r
+CALL test_merkle_msg.bat\r
+CALL test_overlay_bridge.bat\r
+CALL test_rquery_reply_active.bat\r
+CALL test_secure_overlay.bat\r
+CALL test_sqlitecachedbhandler.bat\r
+CALL test_vod.bat\r
+CALL test_na_extend_hs.bat\r
+CALL test_channelcast.bat\r
+CALL test_subtitles.bat\r
+REM # CALL test_proxyservice.bat # Arno, not finished\r
+CALL test_proxyservice_as_coord.bat\r
+\r
+REM Takes a long time, do at end\r
+python test_natcheck.py\r
+\r
+REM ##### ARNO\r
+REM # wait till arno's fixes are merged\r
+REM # python test_buddycast5.py\r
+\r
+REM #### NITIN\r
+REM # broken...\r
+REM # python test_searchgridmanager.py\r
+\r
+REM ########### Not unittests\r
+REM #\r
+REM # 2010-02-03 Boudewijn: The stresstest works, but does not contain any\r
+REM # actual unittests... it just takes a long time to run\r
+REM # python test_buddycast4_stresstest.py\r
+REM #\r
+REM # 2010-02-03 Boudewijn: Doesn't look like this was ever a unittest\r
+REM # python test_tracker_checking.py\r
+\r
+REM ########### Obsolete\r
+REM #\r
+REM # 2010-02-03 Boudewijn: OLD, not using anymore\r
+REM # python test_buddycast4.py \r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test.sh b/instrumentation/next-share/BaseLib/Test/test.sh
new file mode 100644 (file)
index 0000000..b93accf
--- /dev/null
@@ -0,0 +1,84 @@
+#!/bin/sh -x
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+export PYTHONPATH=../..:"$PYTHONPATH"
+
+# python test_bsddb2sqlite.py # Arno, 2008-11-26: Alea jacta est.
+# python test_buddycast.py # currently not working due to missing DataHandler functions, 2008-10-17
+# python test_friend.py # Arno, 2008-10-17: need to convert to new DB structure
+# python test_torrentcollecting.py # currently not working due to missing functions, 2009-12-04
+python test_TimedTaskQueue.py
+python test_bartercast.py
+python test_buddycast2_datahandler.py
+python test_cachingstream.py
+python test_closedswarm.py
+python test_connect_overlay.py singtest_connect_overlay
+python test_crawler.py
+python test_dialback_request.py
+python test_extend_hs.py
+python test_friendship_crawler.py
+python test_g2g.py
+python test_gui_server.py
+python test_merkle.py
+python test_multicast.py
+python test_osutils.py
+python test_permid.py
+python test_permid_response1.py
+python test_remote_query.py
+python test_seeding_stats.py
+python test_social_overlap.py
+python test_sqlitecachedb.py
+python test_status.py
+python test_superpeers.py 
+python test_url.py
+python test_url_metadata.py
+python test_ut_pex.py
+python test_video_server.py
+python test_threadpool.py
+python test_miscutils.py
+
+./test_buddycast_msg.sh
+./test_dialback_conn_handler.sh
+./test_dialback_reply_active.sh
+# ./test_dlhelp.sh            # Arno, Disabled replaced with ProxyService
+./test_friendship.sh          # See warning in test_friendship.py
+./test_merkle_msg.sh
+./test_overlay_bridge.sh
+./test_rquery_reply_active.sh
+./test_secure_overlay.sh
+./test_sqlitecachedbhandler.sh
+./test_vod.sh
+sh ./test_na_extend_hs.sh # sh should be superfluous
+sh ./test_channelcast.sh
+./test_subtitles.sh
+# ./test_proxyservice.sh  # Arno not finished
+./test_proxyservice_as_coord.sh
+
+
+# Takes a long time, do at end
+python test_natcheck.py
+
+##### ARNO
+# wait till arno's fixes are merged
+# python test_buddycast5.py
+
+#### NITIN
+# broken...
+# python test_searchgridmanager.py
+
+########### Not unittests
+#
+# 2010-02-03 Boudewijn: The stresstest works, but does not contain any
+# actual unittests... it just takes a long time to run
+# python test_buddycast4_stresstest.py
+#
+# 2010-02-03 Boudewijn: Doesn't look like this was ever a unittest
+# python test_tracker_checking.py
+
+########### Obsolete
+#
+# 2010-02-03 Boudewijn: OLD, not using anymore
+# python test_buddycast4.py 
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_TimedTaskQueue.py b/instrumentation/next-share/BaseLib/Test/test_TimedTaskQueue.py
new file mode 100644 (file)
index 0000000..78865bf
--- /dev/null
@@ -0,0 +1,80 @@
+import unittest
+from time import sleep
+
+from BaseLib.Utilities.TimedTaskQueue import TimedTaskQueue
+
+class TestTimedTaskQueue(unittest.TestCase):
+    
+    def setUp(self):
+        pass
+        
+    def tearDown(self):
+        pass
+    
+    def test_addTask(self):
+        self.queue = TimedTaskQueue()
+        self.count = 0
+        self.queue.add_task(self.task3a, 3)
+        self.queue.add_task(self.task0, 0)
+        self.queue.add_task(self.task3b, 3)
+        self.queue.add_task(self.task2, 1)
+        sleep(6)
+        assert self.count == 11
+        del self.queue
+        
+    def task0(self):
+        self.count += 1
+        assert self.count == 1
+    
+    def task2(self):
+        self.count += 2
+        assert self.count == 3
+    
+    def task3a(self):
+        self.count += 4
+        assert self.count == 7 or self.count == 11
+    
+    def task3b(self):
+        self.count += 4
+        assert self.count == 7 or self.count == 11
+
+    def test_addTask0FIFO(self):
+        self.queue = TimedTaskQueue()
+        self.count = 0
+        self.queue.add_task(self.task0a, 0)
+        self.queue.add_task(self.task0b, 0)
+        self.queue.add_task(self.task0c, 0)
+        self.queue.add_task(self.task0d, 0)
+        sleep(6)
+        assert self.count == 4
+        del self.queue
+
+    def task0a(self):
+        assert self.count == 0
+        self.count = 1
+        
+    def task0b(self):
+        assert self.count == 1
+        self.count = 2
+
+    def task0c(self):
+        assert self.count == 2
+        self.count = 3
+
+    def task0d(self):
+        assert self.count == 3
+        self.count = 4
+    
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestTimedTaskQueue))
+    
+    return suite
+        
+def main():
+    unittest.main(defaultTest='test_suite')
+
+    
+if __name__ == '__main__':
+    main()     
+            
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/test_as_server.py b/instrumentation/next-share/BaseLib/Test/test_as_server.py
new file mode 100644 (file)
index 0000000..48b4736
--- /dev/null
@@ -0,0 +1,74 @@
+# Written by Arno Bakker, Jie Yang
+# see LICENSE.txt for license information
+
+import unittest
+
+import os
+import sys
+import tempfile
+import random
+import shutil
+import time
+from traceback import print_exc
+
+from M2Crypto import EC
+
+from BaseLib.Core.Session import *
+from BaseLib.Core.SessionConfig import *
+
+
+DEBUG=False
+
+class TestAsServer(unittest.TestCase):
+    """ 
+    Parent class for testing the server-side of Tribler
+    """
+    
+    def setUp(self):
+        """ unittest test setup code """
+        self.setUpPreSession()
+        self.session = Session(self.config)
+        self.hisport = self.session.get_listen_port()
+        self.setUpPostSession()
+
+    def setUpPreSession(self):
+        """ Should set self.config_path and self.config """
+        self.config_path = tempfile.mkdtemp()
+
+        self.config = SessionStartupConfig()
+        self.config.set_state_dir(self.config_path)
+        self.config.set_listen_port(random.randint(10000, 60000))
+        self.config.set_buddycast(False)
+        self.config.set_start_recommender(False)
+        self.config.set_torrent_checking(False)
+        self.config.set_superpeer(False)
+        self.config.set_dialback(False)
+        self.config.set_social_networking(False)
+        self.config.set_remote_query(False)
+        self.config.set_internal_tracker(False)
+        self.config.set_bartercast(False)
+        self.config.set_multicast_local_peer_discovery(False)
+        # Assume all test scripts are run from Tribler/Test
+        self.config.set_install_dir(os.path.abspath(os.path.join('..','..')))
+
+        self.my_keypair = EC.gen_params(EC.NID_sect233k1)
+        self.my_keypair.gen_key()
+
+    def setUpPostSession(self):
+        """ Should set self.his_keypair """
+        keypair_filename = os.path.join(self.config_path,'ec.pem')
+        self.his_keypair = EC.load_key(keypair_filename)
+
+    def tearDown(self):
+        """ unittest test tear down code """
+        if self.session is not None:
+            self.session.shutdown()
+            print >>sys.stderr,"test_as_server: sleeping after session shutdown"
+            time.sleep(2)
+        try:
+            shutil.rmtree(self.config_path)
+        except:
+            # Not fatal if something goes wrong here, and Win32 often gives
+            # spurious Permission Denied errors.
+            print_exc()
+        
diff --git a/instrumentation/next-share/BaseLib/Test/test_bartercast.py b/instrumentation/next-share/BaseLib/Test/test_bartercast.py
new file mode 100644 (file)
index 0000000..b3b43c8
--- /dev/null
@@ -0,0 +1,219 @@
+# Written by Arno Bakker, Michel Meulpolder
+# see LICENSE.txt for license information
+
+import unittest
+import os
+import sys
+import time
+from BaseLib.Core.Utilities.Crypto import sha
+from types import DictType
+from M2Crypto import Rand,EC
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+DEBUG=True
+
+
+class TestBarterCast(TestAsServer):
+    """ 
+    Testing SOCIAL_OVERLAP message of Social Network extension V1
+    """
+    
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        Rand.load_file('randpool.dat', -1)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        # Enable buddycast
+        self.config.set_buddycast(True)
+        self.config.set_start_recommender(True)
+        self.config.set_bartercast(True)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())        
+        self.myhash = sha(self.mypermid).digest()
+
+    def tearDown(self):
+        """ override TestAsServer """
+        TestAsServer.tearDown(self)
+        try:
+            os.remove('randpool.dat')
+        except:
+            pass
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        # 1. test good SOCIAL_OVERLAP
+        self.subtest_good_bartercast()
+
+
+        # 2. test various bad SOCIAL_OVERLAP messages
+        self.subtest_bad_not_bdecodable()
+        self.subtest_bad_not_dict1()
+        self.subtest_bad_not_dict2()
+        self.subtest_bad_empty_dict()
+        self.subtest_bad_wrong_dict_keys()
+
+        
+
+    #
+    # Good SOCIAL_OVERLAP
+    #
+    def subtest_good_bartercast(self):
+        """ 
+            test good BARTERCAST messages
+        """
+        print >>sys.stderr,"test: good BARTERCAST"
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_bartercast()
+        s.send(msg)
+        while True:
+            resp = s.recv()
+            print >>sys.stderr,"test: reply message",getMessageName(resp[0])
+            if resp[0] == KEEP_ALIVE:
+                continue
+            else:
+                break
+        self.assert_(resp[0] == BARTERCAST)
+        self.check_bartercast(resp[1:])
+        time.sleep(10)
+        # the other side should not have closed the connection, as
+        # this is all valid, so this should not throw an exception:
+        s.send('bla')
+        s.close()
+
+    def create_good_bartercast(self):
+
+        print "Creating good bartercast message..."
+
+        top_peers = ["permid1", "permid2"]
+        data = {}
+        
+        for permid in top_peers:
+
+            data_to = 100
+            data_from = 200
+            data[permid] = {'u': data_to, 'd': data_from}
+        
+        bartercast_data = {'data': data}
+
+        ###
+        for id in range(0,10):
+            permid = 'permid'+str(id)
+            data_to = 100
+            data_from = 200
+            data[permid] = {'u': data_to, 'd': data_from}
+        bartercast_data = {'data': data}
+        ###
+
+        print "Bartercast_data: ", bartercast_data
+        
+        return self.create_payload(bartercast_data)
+
+
+    def create_payload(self,r):
+        return BARTERCAST+bencode(r)
+
+    def check_bartercast(self,data):
+        d = bdecode(data)
+        
+        print "Received data:"
+        print d
+        
+        self.assert_(type(d) == DictType)
+        self.assert_(d.has_key('data'))
+        self.check_bartercast_data(d['data'])
+
+
+    def check_bartercast_data(self,d):
+        self.assert_(type(d) == DictType)
+        print "test: bartercast_data: keys is",d.keys()
+
+
+
+    # Bad soverlap
+    #    
+    def subtest_bad_not_bdecodable(self):
+        self._test_bad(self.create_not_bdecodable)
+
+    def subtest_bad_not_dict1(self):
+        self._test_bad(self.create_not_dict1)
+
+    def subtest_bad_not_dict2(self):
+        self._test_bad(self.create_not_dict2)
+
+    def subtest_bad_empty_dict(self):
+        self._test_bad(self.create_empty_dict)
+
+    def subtest_bad_wrong_dict_keys(self):
+        self._test_bad(self.create_wrong_dict_keys)
+
+    def _test_bad(self,gen_soverlap_func):
+        print >>sys.stderr,"test: bad BARTERCAST",gen_soverlap_func
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = gen_soverlap_func()
+        s.send(msg)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        self.assert_(len(s.recv())==0)
+        s.close()
+
+    def create_not_bdecodable(self):
+        return BARTERCAST+"bla"
+
+    def create_not_dict1(self):
+        bartercast = 481
+        return self.create_payload(bartercast)
+
+    def create_not_dict2(self):
+        bartercast = []
+        return self.create_payload(bartercast)
+
+    def create_empty_dict(self):
+        bartercast = {}
+        return self.create_payload(bartercast)
+
+    def create_wrong_dict_keys(self):
+        bartercast = {}
+        bartercast['data'] = {'permid1': {}}
+        return self.create_payload(bartercast)
+
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestBarterCast))
+    
+    return suite
+
+def sign_data(plaintext,keypair):
+    digest = sha(plaintext).digest()
+    return keypair.sign_dsa_asn1(digest)
+
+def verify_data(plaintext,permid,blob):
+    pubkey = EC.pub_key_from_der(permid)
+    digest = sha(plaintext).digest()
+    return pubkey.verify_dsa_asn1(digest,blob)
+
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast.py b/instrumentation/next-share/BaseLib/Test/test_buddycast.py
new file mode 100644 (file)
index 0000000..2b40fb3
--- /dev/null
@@ -0,0 +1,198 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+# Arno, pychecker-ing: the addTarget and getTarget methods of JobQueue are
+# no longer there, this code needs to be updated.
+
+# 2008-06-24: add OverlayBridge equiv to DataHandler() constructor calls.
+
+import os
+import sys
+import unittest
+
+
+from BaseLib.__init__ import LIBRARYNAME    
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, str2bin
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import *
+from BaseLib.Core.BuddyCast.buddycast import DataHandler
+from bak_tribler_sdb import *
+
+LIB = 0
+AUTOCOMMIT = 0
+BUSYTIMEOUT = 5000
+
+SQLiteCacheDB.DEBUG = False
+
+class Session:
+    def __init__(self):
+        self.sessconfig = {}
+    
+    def get_permid(self):
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        return fake_permid_x
+    
+    def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], objectID = None):
+        pass
+    
+
+class FakeLaunchmany:
+    
+    def __init__(self, db):
+        self.peer_db = PeerDBHandler.getInstance()
+        self.superpeer_db = SuperPeerDBHandler.getInstance()
+        self.torrent_db = TorrentDBHandler.getInstance()
+        self.mypref_db = MyPreferenceDBHandler.getInstance()
+        self.pref_db = PreferenceDBHandler.getInstance()
+        self.friend_db =  FriendDBHandler.getInstance()
+        self.listen_port = 6881
+        self.session = Session()
+
+    def get_ext_ip(self):
+        return '127.0.0.1'
+
+class FakeOverlayBridge:
+    def add_task(self, foo, sec=0, ident=None):
+        foo()
+
+class TestBuddyCastDataHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.initDB(db_path, busytimeout=BUSYTIMEOUT)
+        launchmany = FakeLaunchmany(db)
+        overlay_bridge = FakeOverlayBridge()
+        self.datahandler = DataHandler(launchmany,overlay_bridge)
+                
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+        self.datahandler.peers = None
+        del self.datahandler
+            
+    def loadData(self, npeers = 2500):
+        self.datahandler.updateMyPreferences()
+        self.datahandler.loadAllPeers(npeers)
+        #self.datahandler.loadAllPrefs(npeers)
+                    
+    def test_updateMyPreferences(self):
+        self.datahandler.updateMyPreferences()
+        assert self.datahandler.myprefs == [126, 400, 562, 1074, 1279, 1772, 1812, 2271, 2457, 2484, 3359, 3950]
+        
+        self.datahandler.updateMyPreferences(10)
+        assert self.datahandler.myprefs == [126, 400, 562, 1074, 1279, 1772, 1812, 2271, 2457, 3359]
+            
+        assert len(self.datahandler.owners[3359]) == 21
+        assert len(self.datahandler.owners[2484]) == 0
+        assert len(self.datahandler.owners[400]) == 8
+            
+    def test_updateAllPeers_Prefs(self):
+        self.datahandler.loadAllPeers()
+        for p in self.datahandler.peers:
+            assert len(self.datahandler.peers[p][2]) == 0
+        assert len(self.datahandler.peers) == 3995
+        
+        npeers = 2500
+        self.datahandler.peers = None
+        self.datahandler.loadAllPeers(npeers)
+        for p in self.datahandler.peers:
+            assert len(self.datahandler.peers[p][2]) == 0
+        assert len(self.datahandler.peers) == npeers
+        
+        # Statistics: loadAllPeers takes 0.015 sec on test db
+        #                                0.5 sec on Johan's db
+        #                                0.03 second on loading 2500 peers from Johan's db
+
+    def test_updateAllPrefs(self):
+        self.loadData(2500)
+        n = 0
+        for p in self.datahandler.peers:
+            assert len(self.datahandler.peers[p]) == 3
+            n += len(self.datahandler.peers[p][2])
+        assert n == self.datahandler.nprefs
+
+        self.datahandler.peers = None
+        self.loadData(None)
+        n = 0
+        for p in self.datahandler.peers:
+            assert len(self.datahandler.peers[p]) == 3
+            n += len(self.datahandler.peers[p][2])
+        assert n == self.datahandler.nprefs
+            
+#        Statistics: 2500 peers preferences covers 91% of all preferences
+#        self.datahandler.peers = None
+#        for i in range(100, 4000, 100):
+#            self.datahandler.loadAllPrefs(i)
+#            print i, self.datahandler.nprefs, '%.2d%%'%(100*self.datahandler.nprefs/60634)
+
+    """        Statistics of memory usage (KB)
+                    Full Test DB     Full Johan's DB     2000 peers from Johan's DB
+            Init:    11,520            12,912            12,912
+        LoadPeers:   12,656            23,324            12,532
+        LoadPrefs:   17,792            50,820            18,380
+    """
+        
+    def test_updateAllSim(self):
+        init_bak_tribler_sdb()
+        self.loadData(2500)
+        pid = 3582
+        oldsim = self.datahandler.peer_db.getOne('similarity', peer_id=pid)
+        assert abs(oldsim-21.941432788)<1e-4, oldsim
+        n_updates = self.datahandler.updateAllSim()    # 0.296 second for Johan's db, 0. 188 second for test db
+        assert n_updates == 2166
+        sim = self.datahandler.peer_db.getOne('similarity', peer_id=pid)
+        assert abs(sim-17.9844112279)<1e-4, sim
+        
+    def test_adddelMyPref(self):
+        self.datahandler.overlay_bridge = FakeOverlayBridge()
+        self.loadData()
+        pid = 3582
+        self.datahandler.updateAllSim()
+        oldsim = self.datahandler.peer_db.getOne('similarity', peer_id=pid)
+        tids = sample(range(4000),10)
+        for tid in tids:
+            infohash = self.datahandler.torrent_db.getInfohash(tid)
+            
+            self.datahandler.addMyPref(infohash)
+            torrents = self.datahandler.pref_db._getTorrentOwnersID(tid)
+            assert self.datahandler.owners[tid] == set(torrents), (self.datahandler.owners[tid], set(torrents))
+            assert tid in self.datahandler.myprefs
+            sim = self.datahandler.peer_db.getOne('similarity', peer_id=pid)
+            assert abs(sim-oldsim)>1e-4, (sim, oldsim)
+            
+            self.datahandler.delMyPref(infohash)
+            assert tid not in self.datahandler.owners.keys()
+            assert tid not in self.datahandler.myprefs
+            sim = self.datahandler.peer_db.getOne('similarity', peer_id=pid)
+            assert abs(sim-oldsim)<1e-4, (sim, oldsim)
+            
+            oldsim = sim
+            
+    def test_get_dns_from_peerdb(self):
+        permid_str_id_1 = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAA6SYI4NHxwQ8P7P8QXgWAP+v8SaMVzF5+fSUHdAMrs6NvL5Epe1nCNSdlBHIjNjEiC5iiwSFZhRLsr'
+        permid = str2bin(permid_str_id_1)
+        self.loadData(2500)
+        assert self.datahandler.get_dns_from_peerdb(permid) == ('68.108.115.221', 6881)
+        
+    def test_numbers(self):
+        self.loadData(2500)
+        npeers = self.datahandler.get_npeers()
+        ntorrents = self.datahandler.get_ntorrents()
+        nmyprefs = self.datahandler.get_nmyprefs()
+        assert npeers == 2500
+        assert ntorrents == 4483
+        assert nmyprefs == 12
+        
+        
+def test_suite():
+    init_bak_tribler_sdb()
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestBuddyCastDataHandler))
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite')
+
+if __name__ == '__main__':
+    main()
+    
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast2_datahandler.py b/instrumentation/next-share/BaseLib/Test/test_buddycast2_datahandler.py
new file mode 100644 (file)
index 0000000..be8ab2d
--- /dev/null
@@ -0,0 +1,242 @@
+# Written by Jie Yang
+# see LICENSE.txt for license information
+
+# Arno, pychecker-ing: the addTarget and getTarget methods of JobQueue are
+# no longer there, this code needs to be updated.
+
+# 17/02/10 Boudewijn: this test reads a superpeer log to get actual
+# buddycast messages.  However, these messages were wrtten to the log
+# using readableBuddyCastMsg(...) and are NOT made back into normal
+# buddycast messages.  This causes some buddycast messages to be
+# silently dropped.
+
+import os
+import sys
+import unittest
+from traceback import print_exc
+from shutil import copy as copyFile, move
+from time import sleep
+import base64
+import math
+
+from BaseLib.Core.defaults import *
+from BaseLib.Core.BuddyCast.buddycast import DataHandler, BuddyCastFactory
+from BaseLib.Core.CacheDB.CacheDBHandler import *
+from BaseLib.Category.Category import Category
+from BaseLib.Utilities.TimedTaskQueue import TimedTaskQueue
+from BaseLib.Core.Statistics.Crawler import Crawler
+from bak_tribler_sdb import *
+
+
+STATE_FILE_NAME_PATH = os.path.join(FILES_DIR, 'tribler.sdb-journal')
+S_TORRENT_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_single.torrent')
+S_TORRENT_PATH = os.path.join(FILES_DIR, 'single.torrent')
+
+M_TORRENT_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_multiple.torrent')    
+M_TORRENT_PATH = os.path.join(FILES_DIR, 'multiple.torrent')    
+BUSYTIMEOUT = 5000
+
+
+def init():
+    init_bak_tribler_sdb()
+
+    db = SQLiteCacheDB.getInstance()
+    db.initDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT)
+    
+    print >>sys.stderr,"OPENING DB",TRIBLER_DB_PATH
+    
+    #db.execute_write('drop index Torrent_relevance_idx')
+    TorrentDBHandler.getInstance().register(Category.getInstance('..'),'.')
+
+
+class FakeSession:
+    sessconfig = {}
+    def get_permid(self, *args, **kargs):
+        return base64.decodestring('MG0CAQEEHR/bQNvwga7Ury5+8vg/DTGgmMpGCz35Zs/2iz7coAcGBSuBBAAaoUADPgAEAL2I5yVc1+dWVEx3nbriRKJmOSlQePZ9LU7yYQoGABMvU1uGHvqnT9t+53eaCGziV12MZ1g2p0GLmZP9\n' )
+
+    def get_moderationcast_moderations_per_have(self, *args, **kargs):
+        return 100
+
+    def add_observer(self, *args, **kargs):
+        pass
+
+    def get_votecast_recent_votes(self):
+        return sessdefaults['votecast_recent_votes']
+    
+    def get_votecast_random_votes(self):
+        return sessdefaults['votecast_random_votes']
+
+
+class FakeLauchMany:
+    
+    def __init__(self):
+        self.session = FakeSession()
+        self.crawler = Crawler.get_instance(self.session)
+        
+        self.my_db          = MyDBHandler.getInstance()
+        self.peer_db        = PeerDBHandler.getInstance()
+        self.torrent_db     = TorrentDBHandler.getInstance()
+        self.torrent_db.register(Category.getInstance(),'.')
+        self.mypref_db      = MyPreferenceDBHandler.getInstance()
+        self.pref_db        = PreferenceDBHandler.getInstance()
+        self.superpeer_db   = SuperPeerDBHandler.getInstance()
+        self.friend_db      = FriendDBHandler.getInstance()
+        self.bartercast_db  = BarterCastDBHandler.getInstance()
+        self.bartercast_db.registerSession(self.session)
+        self.secure_overlay = FakeSecureOverlay()
+#        torrent_collecting_dir = os.path.abspath(config['torrent_collecting_dir'])
+        self.listen_port = 1234
+
+        self.channelcast_db = ChannelCastDBHandler.getInstance()
+        self.channelcast_db.registerSession(self.session)
+
+        self.votecast_db = VoteCastDBHandler.getInstance()
+        self.votecast_db.registerSession(self.session)
+        self.simi_db        = SimilarityDBHandler.getInstance()
+        self.pops_db = PopularityDBHandler.getInstance()
+
+    def get_ext_ip(self):
+        return None
+    
+    def set_activity(self, NTFY_ACT_RECOMMEND, buf):
+        pass
+    
+class FakeThread:
+    def join(self):
+        pass
+    
+class FakeSecureOverlay:
+    def get_dns_from_peerdb(self, permid):
+        return None    
+    
+class FakeOverlayBridge:
+    
+    def __init__(self):
+        self.thread = FakeThread()
+                    
+    def add_task(self, task, time=0, id=None):
+        if task == 'stop':
+            return
+        task()
+
+
+class TestBuddyCastDataHandler(unittest.TestCase):
+    
+    def setUp(self):
+        # prepare database
+
+        launchmany = FakeLauchMany()
+        self.overlay_bridge = TimedTaskQueue() 
+        #self.overlay_bridge = FakeOverlayBridge()
+        self.data_handler = DataHandler(launchmany, self.overlay_bridge, max_num_peers=2500)
+
+    def tearDown(self):
+        self.overlay_bridge.add_task('quit')
+        
+    def test_postInit(self):
+        #self.data_handler.postInit()
+        self.data_handler.postInit(1,50,0, 50)
+        #from time import sleep
+        
+class TestBuddyCast(unittest.TestCase):
+    
+    def setUp(self):
+        # prepare database
+
+        launchmany = FakeLauchMany()
+        self.overlay_bridge = TimedTaskQueue() 
+        #self.overlay_bridge = FakeOverlayBridge()
+        superpeer=False # enable it to test superpeer
+        self.bc = BuddyCastFactory.getInstance(superpeer=superpeer)
+        self.bc.register(self.overlay_bridge, launchmany, None, 
+                 None, None, True)
+
+    def tearDown(self):
+        self.overlay_bridge.add_task('quit')
+        print "Before join"
+
+    def remove_t_index(self):
+        indices = [
+        'Torrent_length_idx',
+        'Torrent_creation_date_idx',
+        'Torrent_relevance_idx',
+        'Torrent_num_seeders_idx',
+        'Torrent_num_leechers_idx',
+        #'Torrent_name_idx',
+        ]
+        for index in indices:
+            sql = 'drop index ' + index
+            self.data_handler.torrent_db._db.execute_write(sql)
+            
+    def remove_p_index(self):
+        indices = [
+        'Peer_name_idx',
+        'Peer_ip_idx',
+        'Peer_similarity_idx',
+        'Peer_last_seen_idx',
+        'Peer_last_connected_idx',
+        'Peer_num_peers_idx',
+        'Peer_num_torrents_idx'
+        ]
+        for index in indices:
+            sql = 'drop index ' + index
+            self.data_handler.peer_db._db.execute_write(sql)
+
+    def local_test(self):
+                
+        self.remove_t_index()
+        self.remove_p_index()
+                
+        from BaseLib.Test.log_parser import get_buddycast_data
+        
+        #start_time = time()
+        #print >> sys.stderr, "buddycast: ******************* start local test"
+        costs = []
+        self.data_handler.postInit(updatesim=False)
+        for permid, selversion, msg in get_buddycast_data(os.path.join(FILES_DIR,'superpeer120070902sp7001.log')):
+            message = bencode(msg)
+            #print 'got msg:', permid, selversion, message
+
+            try:
+                s = time()
+                self.bc.gotBuddyCastMessage(message, permid, selversion)
+                cost = time()-s
+                costs.append(cost)
+            except:
+                print_exc()
+                break
+
+            print 'got msg: %d %.2f %.2f %.2f %.2f' %(len(costs), cost, min(costs), sum(costs)/len(costs), max(costs))
+        # with all indices, min/avg/max:  0.00 1.78 4.57 seconds
+        # without index, min/avg/max:  0.00 1.38 3.43 seconds  (58)
+        print "Done"
+       
+    def test_start(self):
+        try:
+            self.bc.olthread_register(start=False)
+            self.data_handler = self.bc.data_handler
+            self.local_test()
+            print "Sleeping for 10 secs"
+            sleep(10)
+            print "Done2"
+            
+        except:
+            print_exc()
+            self.assert_(False)
+    
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestBuddyCastDataHandler))
+    suite.addTest(unittest.makeSuite(TestBuddyCast))
+    
+    return suite
+
+    
+def main():
+    init()
+    unittest.main(defaultTest='test_suite')
+
+if __name__ == '__main__':
+    main()
+    
+    
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast4_stresstest.py b/instrumentation/next-share/BaseLib/Test/test_buddycast4_stresstest.py
new file mode 100644 (file)
index 0000000..b87592d
--- /dev/null
@@ -0,0 +1,228 @@
+# Written by Nicolas Neubauer, modified from test_bartercast.py\r
+# see LICENSE.txt for license information\r
+\r
+import random\r
+import unittest\r
+import os\r
+import time as T\r
+from M2Crypto import Rand,EC\r
+\r
+from BaseLib.Test.test_as_server import TestAsServer\r
+from BaseLib.Core.BitTornado.BT1.MessageID import *\r
+from BaseLib.Core.BuddyCast.buddycast import BuddyCastFactory\r
+from BaseLib.Core.simpledefs import *\r
+from BaseLib.Core.Utilities.Crypto import sha\r
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import *\r
+\r
+\r
+def list_zipf_values(exponent, num_of_values):\r
+    list_of_values = []\r
+    b = 2 ** (exponent - 1)\r
+    while len(list_of_values) < num_of_values:\r
+        value = genvalue(b, exponent)\r
+        if value != None:\r
+            list_of_values.append(value)\r
+        return list_of_values\r
+\r
+def genvalue(b, exponent):\r
+    U = random.uniform(0,1)\r
+    V = random.uniform(0,1)\r
+    X = math.floor(U ** (-(1/(exponent - 1))))\r
+    T = (1 + (1/X)) ** (exponent - 1)\r
+    upper_bound = T/b\r
+    value = V*X*((T-1)/(b-1))\r
+    if value <= upper_bound:\r
+        return value\r
+\r
+\r
+class TestBuddyCast(TestAsServer):\r
+    """ \r
+    Testing BuddyCast 4 protocol interactions:\r
+      * clicklog exchange messages\r
+    """\r
+    \r
+    def setUp(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUp(self)\r
+        Rand.load_file('randpool.dat', -1)\r
+\r
+    def setUpPreSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPreSession(self)\r
+        # Enable buddycast\r
+        self.config.set_buddycast(True)\r
+\r
+    def setUpPostSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPostSession(self)\r
+\r
+        self.mypermid = str(self.my_keypair.pub().get_der())\r
+        self.hispermid = str(self.his_keypair.pub().get_der())        \r
+        self.myhash = sha(self.mypermid).digest()\r
+        \r
+        self.buddycast = BuddyCastFactory.getInstance(superpeer=True)\r
+        self.buddycast.olthread_register(True)\r
+        \r
+            \r
+    def tearDown(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.tearDown(self)\r
+        try:\r
+            os.remove('randpool.dat')\r
+        except:\r
+            pass\r
+\r
+\r
+    # copied from test_bartercast.py up to here\r
+\r
+    def test_all(self):\r
+        """ \r
+            I want to start a Tribler client once and then connect to\r
+            it many times. So there must be only one test method\r
+            to prevent setUp() from creating a new client every time.\r
+\r
+            The code is constructed so unittest will show the name of the\r
+            (sub)test where the error occured in the traceback it prints.\r
+        """\r
+        self.queryfile = "test_buddycast4_stresstest_queries.txt"\r
+        self.queries = []\r
+        f = open(self.queryfile,"r")\r
+        oldline = ""\r
+        for line in f.xreadlines():\r
+            line = unicode(line[0:-1], 'UTF-8')\r
+            if line==oldline:\r
+                continue\r
+            if line.strip=="":\r
+                continue\r
+            oldline= line\r
+            self.queries.append(line)\r
+            # print repr(line)\r
+            \r
+        print "queries: %d" % len(self.queries)\r
+        self.stresstest()\r
+        \r
+    def mean(self,l):\r
+        return sum(l)/(0.0+len(l))\r
+\r
+    def stresstest(self):\r
+        search_db = self.session.open_dbhandler(NTFY_SEARCH)\r
+        term_db = self.session.open_dbhandler(NTFY_TERM)\r
+        pref_db = self.session.open_dbhandler(NTFY_PREFERENCES)        \r
+        \r
+        num_torrents = 250000\r
+        num_peers = 65000\r
+        num_torrents_per_user = 25\r
+        query_ids = range(len(self.queries))\r
+        queries_per_torrent = 10\r
+        # set up a number of possible queries for each torrent\r
+        print "setting up queries"\r
+        torrent_terms = dict([(torrent_id, random.sample(query_ids, queries_per_torrent)) \r
+                              for torrent_id \r
+                              in xrange(num_torrents)])\r
+        \r
+        # log= open("c:\\log.txt", "w")\r
+        \r
+        for peer_id in xrange(num_peers):\r
+            store_times = []\r
+            complete_times = []\r
+            relterm_times = []\r
+            \r
+            # if peer_id%10==0:\r
+            #     print peer_id            \r
+            #     log.flush()\r
+                \r
+            \r
+            # put a slight long-tail distribution over torrents such that we get more frequently and less\r
+            # frequently clicked torrents.\r
+            # this causes the clicklo lookup distribution to spread in the graph \r
+            torrent_ids = [min(int(num_torrents*random.betavariate(1,.3)), num_torrents-1) \r
+                           for x \r
+                           in range(num_torrents_per_user)]\r
+            \r
+            query_ids = dict([ (torrent_id, random.choice(torrent_terms[torrent_id])) \r
+                                 for torrent_id \r
+                                 in torrent_ids])\r
+            query_terms = dict([ (torrent_id, self.queries[query_id].replace(","," ").split(" ")) \r
+                                 for (torrent_id, query_id)\r
+                                 in query_ids.items()])\r
+            all_term_lists = query_terms.values()\r
+            all_terms = []\r
+            for term_list in all_term_lists:\r
+                for term in term_list:\r
+                    if not term in all_terms:\r
+                        all_terms.append(term)\r
+            #print all_terms\r
+            before= T.time()\r
+            term_db.bulkInsertTerms(all_terms)\r
+            after = T.time()\r
+            termtime = after-before             \r
+\r
+            \r
+            for torrent_id in torrent_ids:\r
+                terms = query_terms[torrent_id]\r
+                before = T.time() \r
+                try:\r
+                    search_db.storeKeywords(peer_id, \r
+                                           torrent_id, \r
+                                           terms=terms, \r
+                                           commit=False)\r
+                except:\r
+                    print terms\r
+                    raise\r
+                after = T.time()\r
+                store_times.append(after-before)\r
+\r
+                    \r
+            before=T.time()\r
+            search_db.commit()\r
+            after=T.time()\r
+            commit_time = after-before\r
+\r
+            for torrent_id in torrent_ids:\r
+                for term in query_terms[torrent_id]:\r
+                    if len(term)==0:\r
+                        continue\r
+                    before = T.time()\r
+                    t = term_db.getTermsStartingWith(term[0])\r
+                    after = T.time()\r
+                    complete_times.append(after-before)\r
+                    \r
+                    before = T.time()\r
+                    #print "torrent_id, term_id: %d, %d" % (term_db.getTermID(term), torrent_id)\r
+                    A = search_db.getRelativeTermFrequency(term_db.getTermID(term), torrent_id)\r
+                    #print A\r
+                    after = T.time()\r
+                    relterm_times.append(after-before)                                    \r
+                    \r
+            print "\n\n\nOVERALL: %f" % (termtime+sum(store_times)+commit_time)\r
+            print "term time: %f" % termtime\r
+            print "storage: %f (%f)" % (sum(store_times), self.mean(store_times))\r
+            print "commit: %f" % commit_time\r
+            print "completion: %f" % self.mean(complete_times)\r
+            print "retrieval: %f" % self.mean(relterm_times)\r
+            termsindb = term_db.getNumTerms()\r
+            print "terms: %d" % termsindb\r
+                      \r
+        #     log.write("%d\t%f\t%f\t%f\t%f\t%f\t%f\t%d\n" % (peer_id,\r
+        #                                                      termtime+sum(store_times)+commit_time,\r
+        #                                                      termtime, \r
+        #                                                      sum(store_times),\r
+        #                                                      commit_time, \r
+        #                                                      self.mean(complete_times), \r
+        #                                                      self.mean(relterm_times),\r
+        #                                                      termsindb))\r
+            \r
+\r
+                \r
+        # log.close()            \r
+                \r
+            \r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    suite.addTest(unittest.makeSuite(TestBuddyCast))\r
+    #suite.addTest(unittest.makeSuite(TestBuddyCastNonServer))\r
+    return suite\r
+\r
+if __name__ == "__main__":\r
+    unittest.main()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast4_stresstest_queries.txt b/instrumentation/next-share/BaseLib/Test/test_buddycast4_stresstest_queries.txt
new file mode 100644 (file)
index 0000000..bd89ad7
--- /dev/null
@@ -0,0 +1,61735 @@
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+acoustic guitar 
+blackened 
+blackened.mp3 
+mature 
+shol 
+s6 
+superbad 
+pan's labyrinth 
+
+what i've done 
+miami vice 
+numb/encore 
+star trek 
+chavez 
+touhou 
+nadine jansen 
+big breasts 
+r. kelly 
+r. kelly 
+r. kelly 
+transformers 
+reggae 
+the nun 
+the saint 
+huge tits 
+huge tits 
+huge tits 
+osx 
+file maker pro 
+the starlite desperation 
+entourage season 4 
+pornoraphy 
+pornography 
+sex 
+
+
+
+pornagraphy 
+porngraphy 
+porngraphy 
+
+evochron 
+evochron 
+bourne ultimatum 
+bourne ultimatum 
+resident evil extinction 
+medal of honour airborne 
+handjob 
+handjob 
+g3 
+yeah yeah yeah 
+steve vai 
+die bedrohung 
+k3 
+2063 
+house german 
+scrubs german 
+need for speed 
+windows vista 
+bangbus 
+divx 
+amateur 
+cum 
+divx 
+sinbad 
+south park 
+mongrain 
+mongrain 
+friday 
+ozzy 
+bioshock 
+barbara 
+fun 
+citty 
+kitty 
+relax 
+gay 
+soliloquists 
+danny elfman 
+poison 
+metallica 
+the last of the mohicans 
+
+poison 
+metallica 
+metallica 
+metallica 
+
+300 
+gaber 
+
+rosetta 
+entourage 
+entourage 4 
+forced 
+lolita 
+top gear 
+ita 
+the three musketeers 
+pc 
+dusty rhodes 
+ita 
+carbon 
+dusty rhodes westminster 
+xxx 
+trieste 
+boobs 
+porn 
+bergamas 
+nordio 
+ween 
+lacucaracha 
+sammi 
+gay 
+little bri 
+little britain 
+janey do 
+it crowd 
+search files, youtube and liveleak 
+desenho chaves 
+search files, youtube and liveleak 
+jenna 
+dita 
+dita 
+dita 
+youtube 
+youtube 
+xxx 
+hairypotter 
+high school musical 2 
+greek 
+greek.s 
+greek 5 
+greek 6 
+greek 7 
+greek 8 
+greek 9 
+greek 10 
+greek 11 
+battlestar gallactica 
+battlestar galactica 
+resident 
+simpson 
+famaly guy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+daily show 
+bob dylan 
+weired al 
+weird al 
+rubberfunk 
+
+rubberfunk 
+rescue me s0e4e13 
+rescue me s0e4e13 
+rescue me s0e4e13 
+rescue me s0e4e13 
+my first sex teacher 
+my first sex teacher 
+avi 
+divx 
+movies 
+sandviken 
+lol 
+tuneup advisor 
+tune-up advisor 
+advisor 
+regcure 
+search files, youtube and liveleak 
+divx ita 
+divx 
+divx ita 
+grillo 
+florin salem 
+it 
+ita 
+lost 
+lost it 
+rob thomas 
+intervideo windvd 8 
+
+
+juno 
+juno 
+pro7 
+ärzte 
+mac 
+osx 86 
+nina 
+nina mercedez 
+search files, youtube and liveleak 
+private 
+xxx 
+die ärzte 
+virgin 
+virginity 
+robot chicken 
+axxo 
+pthc 
+
+pthc 
+pedo 
+creampie 
+hentai 
+preteen 
+webcam 
+young 
+lsm 
+ls magazine 
+lsm 
+axxo 
+sandra 
+sandra teen 
+vcl 
+vclvlc 
+vlc 
+axxofiles, youtube and liveleak 
+elvis 
+ihra drag racing 
+ihra professional drag racing 
+joe cocker 
+ihra professional drag racing 
+ihra professional drag racing 
+morecambe wise 
+twenty four 
+twenty four season 6 
+twenty four season 6 
+make a memory 
+kelly clarkson 
+feist 
+isomagic 
+alcohol 
+search files, youtube and liveleak 
+muscle cam 
+magic iso 
+bring the noise 
+muscle cam 
+muscle cam 
+chris cam 
+mr roboto 
+hung huge 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+chapelle oprah 
+chapelle oprah interview 
+chapelle oprah 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hung huge 
+the brave one 
+war 
+ufc 
+sqirt 
+squirt 
+porn 
+porn 
+conspiracy 
+conspiracy 
+conspiracy 
+
+a 
+a 
+wing chun 
+300 
+die 
+nelly 
+nelly 
+heavenly sword 
+heavenly 
+perquisite 
+kraftwerk 
+kraftwerk 
+underworld 
+underworld 
+underworld 
+underworld 
+underworld 
+underworld 
+kraftwerk 
+underworld 
+tangerine dream 
+minimal music 
+oceans thiteen 
+oceans thirteen 
+fist 
+vlc 
+vlc 
+vlc 
+vlc 
+vlc 
+vlc 
+vlc 
+vlc 
+vlc 
+christina branco blof 
+lameirinhas 
+lameirinhas 
+lameirinhas 
+the. 
+angelina jolie 
+paris hilton 
+search files, youtube and liveleak 
+plyboy 
+cristina aguilera 
+a4proxy 
+proxyway 
+hidemyip 
+hide ip 
+house 
+smallville 
+smallville 
+ghostsurf 
+heroes 
+sex video 
+dead parrot 
+wicked 
+the gopod the bad the wicked 
+the good the bad the wicked 
+the good the bad the wicked 
+wicked productions 
+wicked productions 
+lesbian 
+shana 
\83ã¤ã\83\86 
+proxyway 
+economic concepts 
+economic concepts 
+economic concepts 
+hitman blood money reloaded 
+hitman blood 
+search files, youtube and liveleak 
+streettivo 
+streettivo 
+cricket 
+cricket 
+cricket 
+pakistan 
+house 
+ita 
+italy 
+discografia 
+finley 
+ministry of sound 
+sound 
+sound 
+sound 
+prison break 
+dexter 
+take 
+take 
+dexter 
+the queen 
+gympl 
+gina wild 
+gina wild 
+tickling 
+silvia saint 
+mathematica 
+mathematica 
+pornstar 
+pornstar 
+dover 
+kitzeln 
+media player 
+science fiction 
+arcade 
+game 
+weeds season 3 
+weeds 
+incest 
+taboo 
+taboo 
+sex 
+
+
+taboo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fun 
+fun 
+fun 
+lustiges 
+lustiges 
+search files, youtube and liveleak 
+dead parrot 
+talking heads 
+nero 8 
+police 
+kean 
+captain sensible 
+pil 
+public image limited 
+24 
+die 
+24 
+24 3 
+idiocracy 
+puppet mastaz 
+southpark 
+fs9 
+mediadefender 
+dexter 
+mediadefender 
+sensual 
+prank 
+prank 
+mediadefender 
+
+mediadefender 
+mediadefender 
+mediadefender 
+axxo 
+the one 
+"the one" 
+the one jet li 
+primary colors 
+
+ice cube 
+hindi 
+24 s06e09 
+24 s06e10 
+24 s06e11 
+24 s06e13 
+24 s06e22 
+24 s06e23 
+sa leao 
+southpark 
+orn 
+porn 
+rayman 
+korn 
+korn 
+korn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+a 
+woodcock 
+woodcock 
+woodcock 
+woodcock 
+balls of fury 
+504 boyz i can tell 
+armageddon empires 
+squirt 
+wet pant 
+wet 
+bobbi 
+
+peep 
+fantasy wars 
+fantasy wars 
+
+private 
+dead parrot 
+dead parrot 
+celeb 
+dead parrot 
+underage 
+twelve 
+dead parrot 
+
+mediadefender 
+mediadefender 
+ladytron 
+teach_my 
+teach 
+c# 
+tits 
+tits 
+softcore 
+woman 
+women 
+food 
+wu tang 
+rza 
+vai 
+fucking 
+funn 
+poo 
+daria 
+daria 
+daria 
+daria 
+katie fey 
+superman 
+swinger 
+katie fey 
+katie fey 
+wächter der nacht 
+fucked 
+nochnoi dozor 
+the bourne ultimatum 
+william orbit 
+ozric tentacles 
+
+chinese teens 
+sucks 
+adult sins 
+the bourne ultimatum 
+sucks cock 
+paris hilton 
+ivanovic 
+gay 
+gay 
+
+wu tang 
+blue states 
+
+orgy 
+two 
+two 
+brand new 
+kreatief 
+blowjob 
+sex 
+brand new 
+ricj 
+rock 
+rock your body 
+alternative 
+tiesto 2007 
+the bourne ultimatum 
+i wish 
+rock your body 
+wantens 
+tone loc wild thing 
+reckless 
+bodywork 
+david lee roth 
+filmes portugueses 
+patio das cantigas 
+take on me 
+jamiroquai 
+jamiroquai little l 
+need for speed carbon 
+ubuntu 
+stargate 
+leave britney alone 
+adobe photoshop 
+stickam 
+teens 
+cam girls 
+linux 
+prey 
+prey 
+yiff 
+the beatles i need you 
+adobe 
+bleach 
+finale 
+finale 2008 
+one peice 
+one piece 
+galactica 
+text editor 
+tycoon 
+text editor 
+resident evil 
+ditor 
+text 
+beryl 
+
+
+blade runner 
+adobe 
+transformers 
+
+phish 
+amateur 
+indian 
+indian 
+indian 
+indian 
+desi 
+desi 
+indian 
+desi 
+desi 
+phish 
+phish 
+
+phish 
+olivia monijca 
+desi 
+lesbian 
+olivia mojica 
+amateur 
+enemy territory 
+indian 
+buerets 
+arab 
+arab 
+knoppix 
+lela 
+dude where's my car 
+24 
+vanessa hudgens 
+death 
+
+jamie cull 
+jamie 
+cullen 
+harry 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+java 
+thinking in java 
+thinking in java 
+search files, youtube and liveleak 
+traviata 
+traviata pavarotti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+frangoulis 
+28 settimane dopo 
+28 settimane dopo 
+28 settimane dopo 
+28 settimane dopo 
+28 settimane dopo 
+search files, youtube and liveleak 
+28 settimane dopo 
+one piece vegapunk 
+handjob 
+handjob 
+handjob 
+handjob 
+weeds 
+akon 
+karma police 
+rabbit in your headlights 
+daily show 
+colbert report 
+upskirt 
+abba 
+dawson miller 
+abba 
+abba 
+abba 
+abba 
+abba 
+abba 
+upskirt 
+simpsons movie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anal 
+rock 
+rock 
+metal 
+metal 
+pop 
+pop 
+pop 
+pop 
+upskirt 
+ivete sangalo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle latin boy 
+muscle boy 
+upskirt 
+muscle boy video 
+muscle boy video 
+310 to yuma 
+oasis 
+oasis 
+dvd 
+film 
+film 
+film 
+film 
+mature 
+adobe cs3 ita 
+simple life 
+casting 
+casting +mature 
+casting +mature 
+casting and mature 
+tomtom 
+over 50 
+over50 
+hairy 
+moss 
+sharepoint 
+vintage 
+survivor 
+funny 
+dragon ball 
+linkin park 
+search files, youtube and liveleak 
+milf 
+italian 
+linkin park 
+dragon ball 
+dragon ball 
+dragon ball 
+trailer 
+pantyhose 
+stardust 
+far cry 
+madonna 
+britney spears 
+voetbal 
+psp metal gear solid 
+unreal tournament apple 
+unreal tournament 
+lsbian 
+doom 3 
+search files, youtube and liveleak 
+doom 3 mac 
+lesbian 
+apple doom 3 
+apple unreal 
+counter strike 
+search files, youtube and liveleak 
+counter strike os x 
+unreal tournament os x 
+unreal tournament for apple 
+unreal tournament for mac 
+umbrella rihanna 
+masturbation 
+rihanna 
+as 
+a 
+briana banks 
+banks 
+webcam 
+jacinta 
+webcam 
+norton 
+norton 
+norton 
+norton 
+norton 
+http://www.freerapidaccount.com/free/?r=1057 
+http://www.freerapidaccount.com/free/?r=1057 
+http://www.freerapidaccount.com/free/?r=1057 
+fanny lu 
+fanny lu 
+me enamora - juanes 
+me enamora 
+juanes 
+juanes 
+stone age 
+search files, youtube and liveleak 
+pjk 
+microsoft windows vista 
+pjk 
+pjk rbv 
+horses 
+horses 
+pjk 
+pjk 
+pjk 
+vista 
+ps2 
+venus 
+search files, youtube and liveleak 
+das streben nach glück 
+eagles of death metal 
+
+
+mediadefender 
+search files, youtube and liveleak 
+serch dragon ball 
+serch dragon ball 
+aniston 
+courtney cox 
+puffy 
+puffies 
+voab 
+heroes 
+dragon ball 
+naaktstrand 
+neger 
+negerlul 
+penis 
+big breasts 
+small breasts 
+rush 
+radiohead 
+fanny lu 
+van halen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rangers v stuttgart 
+rangers v stuttgart 
+lt 
+rus 
+[lt] 
+ita 
+boeijen 
+kite 
+search all files 
+manowar 
+meat loaf 
+boeijen 
+boeijen 
+cashback 
+leather boots 
+.net 
+.net 
+.net 
+
+evening gloves 
+
+
+azores 
+azores 
+azores 
+azores 
+azores 
+angra 
+ps2 
+dvd 
+dvd 
+dvd 
+blanzing 
+terceira 
+i 
+pico 
+pico 
+genesis 
+britney 
+heroes 
+heroes.s01e3 
+heroes.s01 
+heroes 
+azores 
+abby 
+lover 
+piss 
+nero 
+compilation 
+nude 
+nudist 
+fkk 
+scat 
+xxx 
+britney 
+britney 
+puke 
+elephants dream 
+modul 8 
+one piece 
+drunk 
+beach 
+beach 
+beach 
+celeb 
+celeb 
+celeb 
+mpg 
+wmv 
+l word 
+the l word 
+core rhythm 
+musical 
+shane carmen 
+you tube 
+you tube 
+the l word 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+avril lavigne 
+the l word italiano 
+the l word 
+l word 
+christina aguilera 
+l word 
+tiziano ferro 
+twain 
+twain 
+bob 
+bob 
+l word, youtube and liveleak 
+, youtube and liveleak 
+hairspray 
+rugby 
+eva mendes 
+panda antivirus 
+virtumonde 
+preeteens 
+thursday 
+fist 
+faust 
+emma 
+chopin 
+handicapt 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mandarin 
+downtown 
+downtown matsumoto 
+foo 
+matsumoto 
+matsumoto comedy 
+bourne 
+search files, youtube and liveleak 
+foo fighters 
+lela star 
+suse 
+amater 
+amater 
+amater 
+amater 
+amater 
+amater 
+amater 
+search files, youtube and liveleak 
+korea 
+unreal tournament 
+unreal tournament 
+unreal tournament 
+unreal tournament 
+fifa 08 
+xxx 
+domino 
+thai 
+hardcore 
+java 
+young teen 
+search files, youtube and liveleak 
+porn 
+californication 
+weeds 
+weeds 
+sylvia saint 
+jenna 
+jameson 
+blowjob 
+samantha 
+def leppard 
+lee aaron 
+thunder dirty love 
+charlie 
+darrian 
+red blue 
+ozzy 
+dexter 
+lavoie 
+halo 
+hazzard 
+lynda carter 
+baywatch 
+heroes 
+heroes season 
+hee haw 
+mandrell 
+alba 
+derek jeter 
+boy edgar 
+yankees 
+beyonce 
+shania 
+mcphee 
+kiss 
+lingerie 
+lynch mob river of love 
+charlie 
+charlie blowjob 
+football 
+playboy 
+recomended 
+dutch 
+dutch tvseries 
+public tv 
+mtv 
+public tv 
+public tv 
+kbs 
+britney 
+britney 
+dora 
+jeeg 
+nero 
+jericho 
+korea 
+linkin park 
+john hiatt 
+milburn 
+panda antivirus 2008 
+these are the facts 
+panda antivirus 2007 
+korea 
+search files, youtube and liveleak 
+guitarsynth 
+guitarsynth 
+guitar synth 
+guitar synth 
+these are the facts 
+freundin 
+teen 
+search all files 
+lomu 
+korea 
+rape 
+merenghe 
+merengue 
+jolie 
+moto gp 2007 ps2 
+tomtom 
+fwyh 
+black 
+avi 
+bioshock 
+nas my way 
+one mic 
+nas one mic 
+vasco 
+basic 
+zucchero 
+zucchero 
+zucchero 
+ebony 
+american choppers 
+mothers 
+mothers 
+harry 
+milf 
+dalla 
+luccio dalla 
+psp 
+psp 
+wwe 
+weeds 
+medal of honor 
+wallpaper 
+pink floyd 
+surf 
+film 
+lebanese 
+ultimate 
+vs php 
+blue crush 
+nude 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porn 
+anime 
+hentai 
+razor1991 
+razor1911 
+anime 
+heroes 
+heroess01 
+heroess01 
+heroes.s01 
+prison break 
+galactica 
+viv thomas 
+invasion 
+prison break 
+cricket 
+movie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+election 
+search files, youtube and liveleak 
+kank 
+hum tum 
+hum tum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+movie 
+lomu 
+lomu 
+close combat 
+das streben nach glück 
+scopata 
+search files, youtube and liveleak 
+debbie does dallas 
+amateur 
+amateur 
+amatoriale 
+amatoriale 
+thai vcd 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+spiaggia 
+pompino 
+pompino 
+pompino 
+pompino 
+fucking 
+pompino 
+pompino 
+pompino 
+pompino 
+amteur 
+foo fighters 
+amateur 
+foo fighter 
+britney spears 
+topa 
+luci rosse 
+nude 
+watch 
+nuda 
+korea 
+search all files 
+search all files 
+search all files 
+عÙุ± Ø§Ø¯Ù�¨ 
+kctvjeju 
+proxy way 
+anonimity 
+proxy 
+film porno 
+amy winehouse 
+jannes 
+69 
+jannes 
+seredova 
+heroes 
+proxyway 
+search files, youtube and liveleak 
+korea 
+korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+korea 
+boots crush 
+squirt 
+dildo 
+insertion 
+eureka 720p 
+eureka 
+
+eureka 
+latex 
+shrek 3 
+latex balloon 
+blow jobs 
+nero8 
+nero8 software 
+nero8 software 
+the.it.crowd 
+pirati dei caraibi 
+pirati dei caraibi 
+tribler 
+tribler 
+the.it.crowd s02e01 
+the.it.crowd s02e01 
+erlang 
+blowjob 
+blowjob 
+blowjob 
+sandoval 
+sandoval 
+arturo sandoval 
+arturo sandoval 
+search files, youtube and liveleak 
+radio recorders 
+radio recorders 
+fantozzi 
+007 
+fantastici 4 
+fantastici 4 
+fantastici 4 
+pozzeto 
+pozzetto 
+the it crowd 
+renato pozzetto 
+the it crowd 
+winrar 
+winrar 4 
+german 
+house german 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+sadomaso 
+monty 
+monty python 
+monty python 
+search files, youtube and liveleak 
+bbc 
+osama bin laden 
+bondage 
+leo can dive 
+tnt jackson 
+halo 
+search files, youtube and liveleak 
+tnt jackson 
+andre van duin 
+blowjob 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+unreal germa 
+unreal german 
+corset 
+unreal 
+inflatable 
+unreal 
+inflatable latex 
+guitrasynth 
+guitarsynth 
+guitarsynth 
+boots 
+fetish 
+mina 
+mirc 
+mirc 
+pantyhose 
+m irc 
+mirc 
+du super controler 
+airbag test 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fetish 
+brandweer 
+sony mhc5900 
+sony mhc5900 
+search files, youtube and liveleak 
+brandweer 
+brandweer 
+love 
+walker brothers 
+brandweer 
+dion 
+brandweer 
+.rar 
+marta sui tubi 
+marta sui tubi 
+route 66 
+brandbestrijding 
+search files, youtube and liveleak 
+sampletank 
+sampletank 
+frankie goes to hollywood 
+sezen aksu 
+rolling stones 
+50 cent 
+nagios 
+zenoss 
+nod32 
+mfö 
+top 40 
+kardashian 
+hilton 
+claymore 
+anime 
+linux 
+heroes 
+search files, youtube and liveleak 
+weeds 
+lexington steele 
+breakfast 
+breakfast at tiffany's 
+divertenti 
+pixar 
+sigle pixar 
+sigle pixar 
+sigle cinema 
+sigle cinema 
+sigle deramworks 
+sigle dreamworks 
+sigle dreamworks 
+sigle shrek 
+sigle shrek 
+sigle film cinema 
+sigle film cinema 
+sigle pixar 
+sigle pixar 
+sigle pixar lamps 
+sigle pixar 
+sigle pixar 
+pixar 
+pixar 
+colonne sonore 
+colonne sonore 
+colonne sonore film 
+ina deter 
+baglioni 
+baglioni 
+baglioni 
+baglioni 
+cutting edge 
+cutting edge 
+guitar 
+guitar hero 
+guitar hero ps2 
+guitar hero 1 ps2 
+aman 
+top chef 
+atlantis 
+aman 
+guitar hero iso 
+aman 
+atlantis 
+ps2 iso 
+manhunt 2 
+stargate 
+stargate atlantis 
+acronis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+norton 
+britney spears 
+inzest 
+inzest 
+inzest 
+gay 
+the way i are 
+super controller 
+house german 
+scubs 
+scrubs 
+stargate 
+candyman 
+star trek german 
+candyman 
+candyman 
+no te pido la luna 
+justin timberlake 
+sexy back 
+sexy back 
+sexy back 
+spice girls 
+chakaron 
+i see girls 
+gwen stefani 
+search files, youtube and liveleak 
+borsato 
+borsato 
+avril lavigne 
+belinda 
+superbad 
+ggg 
+xxx 
+anal 
+deutsch 
+anal 
+prison break 
+prison break 
+suck 
+xp 
+vista 
+heroes 
+hot 
+crysis 
+knocked up 
+jesse james 
+jesse james robert ford 
+fuck 
+planet terror 
+kim holland 
+dennis deyoung 
+hot strip 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+spiderman3 
+spiderman 3 
+axxo 
+nuance 
+csi 
+private 
+divx-ita 
+maradona 
+pirati 
+private video 
+cs3 
+ndesign 
+indesign 
+private 
+
+bbw 
+search files, youtube and liveleak 
+nfl 
+gay porn 
+steelers vs fortyniners 
+hard eight 
+3.10 to yuma 
+balls of fury 
+metalocalypse 
+revolver 
+rubber soul 
+foo fighters 
+we are marshall 
+we are marshall 
+ebony 
+ebony 
+bang bros 
+toast crack 
+pthc 
+pedo 
+kacey 
+illegal teen 
+kecey 
+kacey 
+ableton 
+balls of fury 
+balls of fury 
+
+
+divx 
+divx 
+divx 
+divx 
+terrohrtom 
+norton antivirus 2008 ita 
+norton antivirus 2008 
+norton antivirus 2007 
+resident evil 
+the maitrix 
+the maitrix 
+microsoft office 2007 
+zscudetto 2007 
+scudetto 2007 
+scudetto 2007 
+scudetto 
+scudetto 
+scudetto 
+scudetto2007 
+scudetto 2007 
+search files, youtube and liveleak 
+simpson 
+simpson 
+simpson 
+casablanca 
+casablanca 
+casablanca 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sigmund freud 
+sigmund freud 
+microsoft front page 
+microsoft front page 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+adult 
+harry potter order of the pheonix 
+adult 
+anal 
+anal 
+casablanca 
+search files, youtube and liveleak 
+nicola cancedda 
+mr woodcock 
+axxo 
+sevp 
+knopfler 
+samurai champloo 
+ratatouille 
+ratatouille german 
+island of lust 
+ratatouille 
+harry potter german 
+harry potter und der orden des phönix 
+s.e.v.p. 
+search files, youtube and liveleak 
+indiscreet passions 
+grand prix 
+filme 
+krezip pinkpop 
+krezip pinkpop 
+pinkpop krezip 
+ubuntu 
+ugly betty 
+film 
+muzika 
+muzika 
+indiscreet passions 
+indiscreet passions 
+gay 
+gay 
+anal 
+dragon ball z 
+area 51 
+search all files 
+search all files 
+search all files 
+search all files 
+cum 
+search files, youtube and liveleak 
+indian beauties 
+indian beauties 
+pregnant 
+r@ygold 
+smart 
+love 
+naked 
+porn 
+south park 
+rally 
+rally 
+rally 
+rally 
+sherk 
+sherk 3 
+sherk 3 
+matrix 
+ocean 12 
+braiana banks 
+jena jameson 
+superbad 
+superbad 
+superbad 
+superbad 
+superbad 
+blue sky mining 
+asstrafic 
+linux 
+porn 
+search files, youtube and liveleak 
+movies 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+moto 
+baglioni 
+games 
+eros ramazzotti 
+villeneuve 
+duri a morire 
+giochi 
+the best 
+gp 2007 
+gp f1 1978 
+the best 
+lara 
+lara 
+gp f1 1978 
+gp f1 1978 
+smith 
+gp f1 1978 
+gp f1 1978 
+smith 
+noiva 
+smith 
+smith 
+smith 
+smith 
+x 
+eva 
+bare minerals 
+manamana 
+ass 
+md formulations 
+search files, youtube and liveleak 
+the practice ita 
+mina 
+anal lick 
+in tribunale con lynn 
+mina 
+basketball 
+prendilo 
+pane e tulipani 
+terminator 
+itunes 
+veritas backup 
+shemale 
+shemale 
+anal 
+chaienn 
+pregnant 
+i now pronounce you chuck and larry 
+jessica biel 
+david 
+masturbate 
+
+mika 
+queen 
+pompini 
+female 
+search files, youtube and liveleak 
+rape 
+indian rape 
+indian rape 
+indian rape 
+indian rape 
+indian rape 
+little rape 
+little rape 
+rape 
+games 
+splinter cell 
+splinte
+splinte 
+psv 
+splinter cell 
+battlefield bad company 
+battlefield bad company 
+stargate atlantis 
+lektor 
+lektor 2007 
+muzyka 2007 
+techno 2007 
+akell hyde 
+stargate atlantis 
+stargate atlantis 
+potter 
+hip hop 
+blakes 7 
+blakes 7 
+survivor 
+doggy 
+avs video tools 
+avs video converter 
+avs video converter 
+avs video converter 
+doggy style 
+doggy style 
+kill 
+smith 
+ger 
+converter 
+war 
+dr who 
+pussy 
+zecchino doro 
+doctor who 
+doctor who invasion 
+dragonball z il legendario super sayan 
+dragonball z il legendario super sayan 
+dragonball z il legendario super sayan 
+dragonball z il film 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+openbsd 
+rammstein 
+rammstein 
+tera patrick 
+it crowd 
+rambo 
+rambo 
+rambo 
+i guerrieri della notte 
+i guerrieri della notte 
+doctor who invasion 5 
+dragon wars 
+doctor who invasion 6 
+slim shady lp 
+slim shady lp 
+slim shady lp 
+slim shady 
+slim shady 
+eminem 
+watch 
+newsom 
+nelly 
+countrygrammer 
+country grammer 
+country grammer 
+country grammer 
+nuance 
+chuck and larry 
+dvd ita 
+dvd ita 2007 
+dvx ita 2007 
+mauro nardi 
+wow 
+world of warcraft the burning crusade 
+crash 
+paul de leeuw 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mana 
+marco antonio solis 
+mana 
+harry potter order of the pheonix 
+harry potter order of the pheonix dvd 
+harry potter order of the pheonix dvd 
+teen 
+kkk 
+harry potter order of the pheonix 
+step up 
+manowar 
+shrek 3 
+pc game ita 
+manowar gods made metal 
+samim 
+aliens vs predator 
+massage 
+egy makulátlan elme örök ragyogása 
+zene 
+musik 
+zene 
+mika 
+sesso 
+search files, youtube and liveleak 
+mana 
+adobe 
+nds 
+nintendo ds 
+nintendo ds 
+nes 
+juiced 2 nds 
+juiced 2 nds 
+joe satriani 
+eric clapton 
+divx 
+divx ita 
+guys gone wild 
+search files, youtube and liveleak 
+casa 
+casa 
+casa 
+amore 
+arturo gatti 
+best buds get tagged 
+corbin fisher 
+st. vincent 
+gay 
+gay 
+bang bros 
+bang bros 
+karaoke 
+it crowd 
+sicko 
+pirates of the caribbean 
+htid 
+softimage 
+3d studio max 
+linux xp 
+bang bros 
+cameltoe 
+sexe 
+search files, youtube and liveleak 
+german 
+live tv 
+search files, youtube and liveleak 
+porno 
+sex 
+prison break 
+gay 
+wheel of fortune 
+wheel of fortune 
+korea 
+chearleader 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ben hur 
+ben hur 
+editorial 
+computer editorial 
+how to build a computer editorial 
+how to build a computer 
+how to build a computer 
+robinson 
+house 
+russian 
+mature 
+ps2 
+ps2 moto gp 
+moto gp 
+ps2 
+ps2 ita 
+ps2 it 
+guitar hero ps2 
+harry potter 
+fantastici quattro e silver surfer ita 
+fantastici quattro e silver surfer 
+transformers it 
+fantastici quattro it 
+fantastici quattro 
+notte 
+fantastic four 
+fantastic four it 
+harry potter it 
+harry potter ita 
+harry potter ita 
+xxx asia 
+xxx cina 
+xxx tokyo 
+asian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+schooltv 
+schooltv brandpreventie 
+schooltv 
+schooltv-brandpreventie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anita meijer 
+schooltv-brandpreventie 
+schooltv-brandpreventie 
+schooltv-brandpreventie 
+schooltv-brand 
+eric clapton 
+schooltv 
+search files, youtube and liveleak 
+premonition 
+premonition eng 
+asian teen 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+883 
+883 
+883 
+883 
+ps2 
+harry potter ita 
+spiderman 
+spiderman 
+spiderman 
+grow grow grow 
+grow grow grow 
+bdsm 
+k3 
+k3 
+suburban nights 
+hard fi 
+goo goo dolls 
+iris 
+sex 
+sleepers 
+brad pitt 
+carmen electra 
+angeline jolie 
+hard fi 
+goo goo dolls 
+boris kovac 
+dom lomp 
+opposites 
+fiction plane 
+bruce willis 
+die hard 
+die hard 
+die hard 
+die hard 4 
+die hard 4 
+angeline jolie 
+angeline jolie 
+angeline jolie 
+angelina jolie 
+elle mcpherson 
+stolen tape 
+homemade videos 
+tatort 
+tatort 
+tatort 
+doggie style 
+madonna 
+madonna 
+madonna 
+madonna 
+madonna 
+syllart 
+search files, youtube and liveleak 
+
+street 
+street 
+fatman 
+fatman 
+blue 
+street 
+street line 
+street lan 
+fatman scoop 
+fatman scop 
+porn 
+porn 
+porn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+prison break 
+prison break ita 
+chiara 
+sara 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bourne 
+mediadefender 
+when the mountains tremb 
+when the mountains tremble 
+house german 
+dutch sub 
+german 
+gloria 
+vrt 
+food 
+appletv 
+it crowd 
+bon jovi 
+u2 
+shemale 
+sean paul 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+big titts 
+big titts 
+titts fuck 
+fuck 
+ass 
+ass 
+jenna 
+kingdom under fire 
+kingdom under fire 
+kingdom under fire xbox 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+manciano 
+doggy stile 
+doggy 
+doggy 
+anita meijer 
+anita meijer 
+margie ball 
+andress 
+creedence clearwater 
+streisand 
+avinu 
+streisand 
+search files, youtube and liveleak 
+300 
+300 
+phill collins 
+black christmas ita 
+black christmas 
+vasco rossi 
+vasco rossi 
+divx ita 
+phil collins 
+pearl harbor 
+pearl harbor 
+cs3 mac 
+cs3 mac 
+cs3 
+cs3 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+desperado 
+desperado dvx 
+desperado dvix 
+desperado ita 
+desperado ita 
+dvd ita 
+heroes 
+heroes 2x 
+cd foongids 
+foongids 
+gids 
+cycling 
+blowjob 
+dragonball z il film 
+veronica zemanova 
+veronica zemanova 
+veronica zemanova 
+porno 
+sex 
+xxx 
+dvd ita 
+xxx 
+xxx 
+xxx 
+xxx 
+dvd ita 
+mad world 
+metallica 
+keba 
+heroes 
+hazes 
+hazes 
+hazes 
+hazes 
+hazes 
+hazes 
+l'ultima legione 
+boy on boy 
+crtani 
+crtani xxx 
+divx 
+xxx 
+divx ita 
+dragana 
+divx ita 
+divx ita 
+dragana 
+dragana 
+shrek 3 
+film ita 
+it crowd 
+dvd ita 
+the great world of sound 
+closing escrow 
+self medicated 
+kill bill 
+loose change 
+spss 
+spss 
+spss 
+dvd ita 2007 
+gigi d'agostino 
+sean paul 
+kelis 
+dvd ita 
+bones ita 
+dvd ita 
+jimmy reed 
+can't turn you loose 
+divx ita 
+divx ita 2007 
+divx film ita 2007 
+shrek 3 
+superbad 
+shrek 3 film 
+the simpson movie 
+the simpson movie ita 
+porno 
+the simpson ita 
+i simpson il film 
+il sette e l'otto 
+ficarra e picone 
+ficarra e picone 
+il sette e l'otto 
+oceans thirteen 
+oceans thirteen 
+oceans thirteen ita 
+divx ita 
+film ale e franz 
+dutch 
+doctor 
+infermiere 
+pornostar 
+pornstar 
+movie ita 2007 
+movie ita 2007 
+lesbian foot 
+movie 2007 
+film 2007 
+lesbian 
+ed2k://|file|[divx%20ita]%20porky%20college%202%202007%20by%20minyc%20dj.avi|1408829714|6923a49c1d90a5e4677ba62e6394a2a0|/ 
+film ita 
+parky college 
+porky college 
+porky college film 
+porky college film 
+lesbo 
+dvd ita 
+film ita 
+xxx 
+youporn 
+porno 
+sex 
+nl subt 
+pop 
+teen 
+skirt dance 
+flipsasa 
+lightroom 
+les miserables 
+phantom of the opera 
+shrek 3 
+shrek 3 ita 
+shrek ita 
+simon and garfunkel 
+simon and garfunkel 
+simon and garfunkel 
+simon and garfunkel 
+dvd ita 
+dvd il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+ficarra e picone 
+ficarra e picone 
+ale e franz 
+dvd-ita 
+dvd-ita 
+games 
+divx ita 
+porn 
+divx 
+il dolce e l'amaro 
+games 
+il bacio che aspettavo 
+il bacio che aspettavo 
+manuale d'amore 2 
+album 
+manuale d'amore 
+manuale d'amore 
+manuale d'amore 2 
+manuale d'amore 2 
+night of the living homeless 
+manuale d'amore 2 
+south park 
+southpark 
+americas army 
+linux 
+ubuntu 
+open office 
+doctor who 
+photoshop cs3 
+resident evil 
+serif 
+draw plus 
+cartao 
+cartao de visita 
+dvd ita 
+dvd ita 
+ita 
+ita 2007 
+xvid ita 
+sexcams 
+sexcams 
+sexcams 
+sexcams 
+sexcams 
+tania russof 
+ccorel 
+gorillaz 
+sublime 
+resident evil 
+house 
+mika 
+severance ita 
+vi dichiaro marito e marito 
+premonition ita 
+ita 
+it crowd 
+search files, youtube and liveleak 
+laura 
+cazzi 
+hard 
+clasical 
+vista 
+twin peak 
+twin peax 
+twin 
+twinpix 
+twinpeax 
+vista 64
+vista 64 
+vista 64 
+vista 64 
+twin peaks 
+pes 
+search files, youtube and liveleak 
+xxx 
+pro evolution soccer 
+tanita 
+tanita 
+tanita tikaram 
+el polaco 
+red hat 
+mi pc 
+red hat linux 
+webcam 
+instrumenals 
+instrumentals 
+teen 
+beos 
+3gp 
+arina 
+matrix os 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+kelly joyce 
+game pc ita 
+game pc ita 
+game pc 2007 
+game pc 
+photoshop cs3 
+game 2007 
+game pc 
+tits 
+game pc 
+game pc 
+pes 7 
+pes 
+pes 
+pc game 
+pc game ita 
+pc game trailer 
+game trailer 
+alvaro vitali 
+lesbo 
+simpson 
+simpson ita 
+ita 
+shrek 3 
+shrek 3 ita 
+shrek ita 
+skinny 
+u2 
+skinny 
+big clit 
+search files, youtube and liveleak 
+spiderman 3 
+rammstein 
+rammstein 
+moviemania 
+commodore 64 
+rammstein 
+amiga 
+blues brothers 
+electric six 
+electric six 
+wagner 
+wagner 
+wagner 
+wagner 
+wagner 
+battles atlas 
+ferrari 
+amiga emulator 
+arkanoid 
+arkanoid 
+ho'oponopono 
+ass 
+dancing 
+booty meat 
+booty shake 
+ass 
+ass shake 
+lapdance 
+blowjob 
+booty meat 
+booty meat 
+lolita 
+teen 
+indra 
+soccer 
+playboy 
+soccer 
+bangbros 
+pussy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kylee reese 
+asian 
+halloween 1978 
+halloween 1978 
+halloween 1978 
+chuck 
+chuck -pronounce 
+heroes -s01 
+pthc 
+loose change 
+sarah 
+heroes 
+heroes s02 
+ghostsurf 
+ghostsurf 
+ghostsurf 
+spears 
+css 
+php 
+php 
+porn 
+private xxx 
+animal 
+zoo sex 
+zoo sex 
+sex farm 
+evergreen 
+evergreen 
+andy william 
+boulevard 
+search files, youtube and liveleak 
+ligabue 
+scarface 
+biagio antonaccci 
+biagio antonacci 
+gigi finizio 
+lucio dalla 
+finizio 
+foto 
+video porno 
+eva henger 
+freedom 
+colonne sonore 
+monella 
+biancaneve sotto i nani film porno 
+biancaneve sotto i nani film porno 
+pinocchi porno 
+pinocchio porno 
+film porno 
+scarface ita 
+biancaneve film porno 
+sesso in classe 
+professoresse nude 
+the simpson 
+the simpson italiano 
+porno 
+eva henger 
+film porno 
+film porno 
+candy store 
+candy dulfer 
+film italiano 
+bob sinclair 
+love is gone 
+gemelli diversi 
+napoli 
+gemelli diversi 
+yoga 
+simpson film 
+ramazzotti 
+tutto 
+оÑÃ\90¾Ð±Ð¾Ðµ Ð¼Ð½ÐµÐ½Ð¸Ðµ 
+minority report 
+fmp 
+fullmetal 
+fullmetal 
\83\99ãÄ\9eãÅ¥ãÄ\9eãÅ» 
+çÄ\84修正 
+office 
+çÄ\84修正 
+電マ 
+電マ 
+i simpson il film 
+distretto 
+distretto 
+distretto 
+distretto 
+distretto 
+distretto 
+distretto di polizia 6 
+see dick run 
+fun with dick e jane 
+ghost raides 
+rocky 
+giochi playstation 2 
+playstation 2 
+distretto di polizia 6 
+tre metri sopra al cielo 
+playstation 2 giochi 
+fonogramma 
+anniÅÂ\86a vanniÅÎ\94Â\81 
+cultura moderna 
+cultura moderna 
+cultura moderna 
+cultura moderna gioco 
+cultura moderna gioco 
+cultura moderna gioco 
+gran turismo 
+moto gp ps2 
+spider man ps2 
+shrek 3 
+spider man ps2 
+jazz 
+metal gear ps2 
+new amsterdams 
+new amsterdams 
+search all files 
+new amsterdams 
+new amsterdams 
+new amsterdams 
+new amsterdams 
+pdf 
+kde4 
+music 
+bourne 
+chopin 
+chopin 
+classic 
+classic 
+classic 
+classic 
+classic 
+css 
+leg 
+photography 
+
+cock 
+real hot 
+czech 
+cream 
+girlsoutwest 
+seventeen 
+seventeen 
+seventeen 
+anal 
+anal 
+boobs 
+boobs 
+mediadef 
+mediadefender 
+public 
+mediadefender 
+public 
+defender 
+kaze no stigma 
+darker than black 
+tokyo majin 7 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+"family tree maker 2008 
+"family tree maker 2008" 
+"familytree maker 2008" 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+chopin 
+http://www.liveleak.com/view?i=c66_1176846833 
+no reservations 
+batman 
+wedding daze 
+bourne ultimatum 
+bourne 
+wedding daze 
+no reservations 
+dr steel 
+"family tree maker 2008" 
+"family tree maker 2008" 
+family tree maker 2008 
+sosft 
+sisoft 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+search files, youtube and liveleak 
+sex 
+accordion 
+defender 
+habitat 
+habit 
+krezip 
+ryan adams 
+the others 
+chessbase 
+search files, youtube and liveleak 
+acronis migrate 
+teen 
+prison break 
+brotherhood 
+search files, youtube and liveleak 
+californication 
+counter strike source 
+dvd ita 2007 
+dvd ita 2007 
+dutch 
+dvd ita 
+l'ultima legione dvd 
+mp3 
+xvid 
+xvid ita 
+xvid de luxe 
+italian subbed 
+zwartboek 
+high school musical 
+high school musical ita 
+xvid imc 
+tettone 
+tettone 
+tettone 
+dutch movie 
+lesbian 
+britney 
+soft erotic 
+soft porn 
+soft porn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xvid ita 
+xvid 
+xvid 
+xvid 
+fantasmes 
+bar hopping hotties 2 
+bar hopping hotties 
+kama sutra 
+emmanuelle 
+objects of her desire 
+confessions of a groupie 
+confessions of a groupie 
+naked encounters 
+sin city diaries 
+fantasmes 
+my first female lover 
+bound cargo 
+playboy 
+oral 
+teenage cavegirl 
+teenage cavegirl 
+amateurs 
+erotic 
+womb raider 
+porn 
+sex 
+lesbian 
+lesbian 
+playboy 
+a woman's touch 
+paprika 
+passion cove 
+torrentsmd.com 
+torrentsmd.com 
+paid in full 
+paid in full 
+paid in full 
+paid in full 
+nas illmatic 
+toots 
+thielemans 
+death note 
+mike 
+search files, youtube and liveleak 
+ocean 
+hot fuzz 
+hot fuzz ita 
+anita blonde 
+sylvia saint 
+youtube 
+timbaland 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sybian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+original flavor 
+roling stones 
+feat boy slim 
+fat boy slim 
+fat boy slim 
+fat boy slim 
+fat boy slim 
+dragon ball z 
+dragonball z 
+dragonball z 
+dragonball z 
+ricochet 
+pink floyd 
+pink floyd 
+pink floyd 
+pink floyd 
+depeche mode 
+depeche mode 
+depache mode 
+depache mode 
+depache mode 
+depache mode 
+depache mode 
+depache mode 
+lamas 
+lamas 
+lamas 
+rhcp 
+thai 
+oil 
+orgasm 
+tnt 
+teen 
+halo 
+luca giurato 
+loretta goggi 
+jaguar 
+navtech 
+terminator 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kamasutra 
+fr 
+pirates 
+pirates of silicone 
+proof 
+terror 
+720p 
+720p king kong 
+king kong 
+king kong xvid 
+720p 
+hdtv 
+jade 
+jade 
+1080p 
+search files, youtube and liveleak 
+dragon baal 
+1080i 
+pantyhose 
+omas 
+mature 
+lessen to love 
+level 2 
+granny 
+playstation 2 
+fifa 2008 
+bmx playstation 2 
+playstation 2 
+i listen to love 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+oma 
+tomb raider anniversary 
+tomb raider anniversary ps2 
+spider man 3 ps2 
+tomb raider anniversary ps2 
+dragon ball z ps2 
+dragon ball z psp 
+giochi psp 
+giochi psp 
+psp 
+ps2 
+navteq 
+brazzers 
+brassens 
+brassens 
+brassens 
+brassens 
+bush 
+grannis 
+grannies 
+grandma 
+grandmother 
+grandmother 
+grandmother 
+vieilles 
+the police 
+search files, qcad 
+search files, ubunt 
+the police 
+sting 
+sting 
+ubuntu cube 
+ubuntu 
+gary moore 
+ubuntu plugin 
+q cad 
+dream theater 
+ubuntu 
+search files, youtube and liveleak 
+incest 
+heroes s02 
+mom 
+mature.nl 
+mature 
+old 
+grandmother 
+grandma 
+grandmas 
+lusty grandmas 
+lusty 
+mature woman 
+alt 
+gilmour 
+fortzen 
+vieille 
+femme mure 
+femme 
+femme 
+mure 
+xxx 
+bj 
+flow 
+blow 
+handjob 
+hand job 
+stroke 
+jobs 
+search files, youtube and liveleak 
+jobs 
+jobs 
+jobs 
+suck 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+50 cent 
+good luck chuck 
+the game plan 
+dragon wars 
+office 2007 
+walking tall 2 
+walking tall 2 
+wrong turn 2 
+drivergenius 
+photoshooting 
+soccer 
+best of fashion tv 
+sex 
+女åÄ\92 
+女åÄ\92 
+sex 
+victoriassecret 
+natacha peyre photoshooting 
+natacha peyre 
+fotoshooting 
+kader loth 
+fotoshooting 
+prison break 
+prison break hr 
+inxs 
+inxs 
+rock 
+zz top 
+zz top 
+sex 
+angelina jolie 
+pamela 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ghulam ali 
+ghulam ali 
+ghulam ali 
+ghulam ali download 
+ghulam ali download 
+planet earth 
+blue planet 
+heros 
+heros s02e01 
+muft.tv 
+ghulam ali 
+ghulam ali 
+mmovie 
+family guy 
+house season 4 
+house alone 
+house s4ep1 
+torchwood 
+paris 
+sapphic 
+midwset 
+midwest 
+midwest 
+ahmadinejad 
+bartoli 
+private 
+simpson 
+simpson ita 
+serenity 
+serenity ita 
+serenity ita 
+openid 
+maria - the album 
+maria - the album 
+maria 
+bartoli 
+shrek 
+pirates 
+search files, youtube and liveleak 
+wam 
+wet and messy 
+piss 
+pee 
+simpson 
+wam 
+selen 
+trasformer 
+sompson 
+i simpson 
+i simpson ita 
+zetsubou 
+corso di ballo 
+(divx) corso di ballo - lezioni di salsa portoricana - lezione di base.mpg 
+(divx) corso di ballo - lezioni di salsa portoricana - lezione di base.mpg 
+(divx) corso di ballo - lezioni di salsa portoricana - lezione di base.mpg 
+i simpson 
+moana 
+rizzo 
+caraibi 
+gessica 
+300 
+novita 
+*.* 
+dvd 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+dvd 
+dvd ita 
+pirati dei caraibi 
+dvd ita 
+dvd ita 
+dvd ita 
+dvd ita 
+friends 
+tra 
+friends ita 
+nansy 
+nansy 
+madonna 
+sparagna 
+sparagna 
+friends ita 
+friends ita 
+divx ita 
+divx ita friends 
+foo 
+sex 
+heroes 
+search files, youtube and liveleak 
+xbox 
+dreamcast 
+xbox 
+divx ita 
+divx ita 
+divx ita 
+prison break 
+tango 
+legal 
+legaltorrents 
+legaltorrents 
+bbc ansi 
+bbc 
+bbc ansi 
+ansi 
+ansi bbc 
+bbc 
+matia bazar grido 
+aqua teen hiung 
+aqua teen hiun 
+aqua teen hiun 
+aqua teen hiun 
+aqua teen hunger force 
+clubfanatix 
+mac os 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+compiz 
+wolfram 
+pirati dei caraibi 
+carnivale 
+vmware 
+madonna 
+mature 
+mature 
+gta 6 
+gta sesto 
+hack 
+malmsteen 
+malmsteen 
+yngwie 
+malmsteen 
+creative cow 
+schaak 
+german 
+xxx 
+cum 
+girls 
+seinfeld 
+girls 
+sigur ros 
+sigur ros 
+erik mongrain 
+slut 
+maple 
+maple 
+quantum 
+seinfeld 
+seinfeld 
+rey leon 
+licencia para casarse 
+queenn 
+queen 
+todo poderoso 
+todo poderoso 
+slayer 
+search files, youtube and liveleak 
+prison break 
+prison break.s03 
+prisonbreak.s03 
+prisonbreak 
+prison.break.s03 
+prison.break.s03 
+prison.break.s0 
+prison.break.s 
+prison.break. 
+prison.break 
+prison break s03 
+prison break s03 
+prison break s0302 
+prison break s03 02 
+dragonball z 
+chevelle 
+chevelle 
+medieval 2 total war 
+medieval 2 total war ita 
+prison break s03 02 
+prison break s03 
+prison break s03 
+teens 
+longman 
+xxx 
+lara pausini dove l'aria è polvere 
+laura pausini dove l'aria è polvere 
+private 
+private 
+chevelle 
+swedish 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+greatest opera show on earth 
+greatest opera show on earth 
+greatest opera show on earth 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+paard 
+sinterklaas 
+sinterklaas 
+usa maps 
+usa maps 
+usa maps 
+usa maps 
+usa map 
+us map 
+us map 
+acrobat 8 professional 
+larell hardy 
+laurell hardy 
+laurel hardy 
+laurel hardy 
+laurel hardy 
+chess 
+potter phoenix 
+seinfeld 
+resident evil 
+sims 2 
+beatles 
+ratatouille 
+wedding daze 
+inter sampdoria live 
+inter sampdoria live 
+sugarbabes 
+grönemayer 
+grönemeyer 
+grönemeyer 
+720p 
+laurel hardy 
+tango 
+300 
+dj tiesto 
+dvx ita 
+daddy yankee 
+daddy yankee 
+riverdance 
+riverdance 
+amateur 
+pharrell 
+ti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+strumpfhose 
+strumpfhose 
+personal branding 
+family guy star wars 
+fantastici 4 
+slingblade 
+lanois 
+cash 
+the war 
+fantastic four 
+cum guzzlers 
+gay bareback 
+search files, youtube and liveleak 
+most evil 
+most evil 
+brotherhood 
+most evil 
+gay bare 
+gay spiderman 
+house 
+grindhouse 
+planet terror 
+axxo 
+mastadon 
+ubuntu 
+the brave one 
+the brave one 
+claymore 
+search files, youtube and liveleak 
+dirty rich 
+dirty sexy money 
+teen 
+steeleye span 
+steeleye span 
+vista 
+hitzone 43 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 
+joan of arcadia 
+joan of arcadia 
+joan of arcadia 
+joan of arcadia 
+lost 
+shine mitchell 
+friday night lights 
+groningen 
+sonata arctica 
+sonata arctika 
+sonata arctica 
+star trek 
+walt disney 
+tanja savic 
+tanja savic 
+tanja savic 
+salsa 
+lenny bruce 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+
+search files, youtube and liveleak 
+python 
+samba 
+www.orkut.com 
+search files, youtube and liveleak 
+www.orkut.com 
+www.orkut.com 
+alicia keys 
+turnstall 
+kd 
+thurn 
+mom and son 
+walt disney 
+kurnstall 
+kt turnstall 
+bioshock 
+meatloaf 
+vinyl 
+1080 
+1080 
+search files, youtube and liveleak 
+playboy magazine 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+naruto 
+britney spears 
+tango 
+borland 
+segreto sahara 
+porno 
+porno 
+elisa 
+hd 
+atlantis hd 
+stargate hd 
+vivaldi 
+atlantis hd 
+hd 
+segreto sahara 
+manuela arcuri 
+vivaldi 
+electric six 
+wheels of steel 
+wheels of steel 
+wheels of steel 
+casino royal 
+pc game 
+csi 
+csi 
+csi 
+c.s.i 
+gta 
+moto gp 
+i soprano 
+porno 
+ac/dc 
+sex 
+settlers the game 
+hentai 
+hentai 
+maura 
+hentai 
+hentai 
+riverdance 
+hentai school 
+robin trower 
+hentai rape 
+immoral sisters 
+immoral sisters 
+hentai 
+hentai rape sex 
+hentai rape 
+hentai sex 
+rape hentai 
+piano 
+schoolgirl hentai 
+hentai movie 
+immoral sisters 
+schoolgirl sex 
+schoolgirl sex 
+sex 
+hentai 
+the bourne ultimatum 
+the office 
+the bourne ultimatum 
+simon and garfunkel 
+harry potter 
+hot 
+busty 
+sigur ros 
+search files, youtube and liveleak 
+demonoid 
+anal 
+grillo 
+bukkake 
+dummies 
+torrent 
+bittorrent 
+faccial 
+faccial 
+facial 
+bittorrent 
+xxx 
+dummies 
+xxx 
+xxx 
+facial 
+facial 
+facial 
+facial 
+torrent 
+torrent 
\8f�¢é\87\91å\88\9a 
\8f�¢é\87\91å\88\9a 
+blade trinity 
+24 
+24 
+24s06e17 
+24s06e17 
+28 weeks later 
+24 
+24 
+24 
+24 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+mr woodcock 
+shoop da woop 
+mac os x 
+harry potter mac 
+harry.potter mac 
+harry potter 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/-jmgjdesjwy"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/-jmgjdesjwy" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+bionic woman 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/-jmgjdesjwy"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/-jmgjdesjwy" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+http://br.youtube.com/watch?v=-jmgjdesjwy 
+styx 
+billy squier 
+complete discography 
+greatest hits 
+bourn 
+bionic 
+bionic 
+transformers 
+jericho 
+teen 
+asian 
+elton john 
+santana 
+coldplay 
+coldplay 
+bleach 
+mysql 
+best of 
+elton 
+mindmapping 
+wicked 
+vivid 
+vivid 
+bittorrent 
+dummies 
+vso 
+bittorrent 
+run fat boy 
+kvcd 
+matrix 
+aa 
+ab 
+free energy 
+asian 
+mp4 
+.mp4 
+.mp4 
+encoure 
+fossett 
+leave britney alone 
+japanese 
+tit 
+tits 
+asian 
+latina 
+
+algebra 
+algebra 
+course 
+course 
+240x320 
+320x240 
+(320 
+(320x240) 
+porno 
+musica 
+ipaq 
+you kill me 
+you kill me 
+elephants 
+knocked up 
+the 11th hour 
+fataal 
+the 11th hour 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fossett 
+fossett 
+legal torrents 
+van der graaf 
+iceclimbing 
+kiwi flight 
+fossett 
+fossett 
+eurosong 
+fantastic four 
+luba hegre 
+tina hegre 
+neverwinter 
+tina 
+virtual 
+bionic woman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mediadefender 
+lexx 
+prison break 
+24 
+ayo 
+dj sy 
+mylene farmer 
+mylene farmer 
+mylene farmer 
+sublisoft 
+dylan 
+nova 
+bill moyers 
+cum 
+atb hold you 
+xxx 
+history of disbelief 
+tony rice 
+atb hold you 
+atb hold you 
+atb hold you 
+atb hold you 
+atb hold you 
+atb hold you 
+dylan 
+hold you 
+dylan 
+atb 
+jenna jameson 
+hold you 
+linkin park 
+hold you 
+hold you 
+hold you 
+hold you atb 
+hold you atb 
+hold you atb 
+spanish 
+linkin park 
+you tube 
+atb 
+simpson 
+simpson 
+mom 
+l 
+a 
+a 
+lily 
+pontiac gto 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+van halen 
+mr. brooks 
+mr. brooks 
+eastern promises 
+dragonball z 
+search files, youtube and liveleak 
+lilly thai 
+dragonball z 
+dragonball z 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball z 
+gto 
+metalocalypse 
+tony moser 
+indesign 
+indesign 
+smartdraw 
+milton ericksson 
+milton erickson 
+hypnos 
+search files, youtube and liveleak 
+diner spinner 
+half life 
+joe rogan 
+powerthirst 
+powerthirst 
+venditti 
+ramazzotti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+acdc 
+acdc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+gianluca grignani 
+gianluca grignani 
+to love somebod 
+to love somebody 
+gianluca grignani 
+3gp hard 
+3gp xxx 
+vasco 
+gianni celeste 
+gianni togni 
+2007 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+dvd rip ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kenner 
+kinzie kenner 
+photo edit 
+photoshop 
+rape 
+rape bride 
+bestman bride 
+bride 
+incest 
+incest drug 
+incest rape 
+incest 
+rape 
+dolly parten 
+celine dion 
+search files, youtube and liveleak 
+sandra und andres 
+rape 
+veritas backup 
+rape 
+rape 
+rape 
+jennifer lopez 
+britney rears 
+britney spears 
+search files, youtube and liveleak 
+policy 
+policy 
+the policy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asian 
+tia tanaka 
+blowjob 
+asian blowjob 
+blowjob 
+jap 
+jap 
+jap 
+jap 
+rape 
+public 
+sex 
+sex 
+sex 
+porn 
+amateur 
+flag 
+gils 
+girls 
+girls 
+girls 
+girls 
+sheen and rockets 
+sheena and rockets 
+sheena 
+hentai 
+sex tape 
+sims 2 
+photoshop 
+lucky star 
+lucky star 
+adobe cs3 
+zetsubou 
+lucky star 
+ultraman mebius 
+house 
+purcell 
+russian girls 
+rocco 
+pick up artist 
+lucky star 
+lucky star raw 
+zetsubou 
+zetsubou sensei 
+debian 
+search files, youtube and liveleak 
+debian etch 
+debian iso 
+powerpc linux 
+stalin 
+silvia 
+tt5 
+silvia 
+star club 
+rebecca 
+anarchy 
+anarchy 
+blowjob 
+burst city 
+teacher 
+russia 
+silvia saint 
+hellsing 
+hellsing 
+hellsing raw 
+vmware 
+knoppix 
+pc88 
+ayayo 
+ayayo 
+negima 
+negima 
+luciano pavarotti 
+nerima 
+resident evil 
+chechen 
+corel 
+jessica rizzoi 
+jessica rizzoi 
+jessica rizzo 
+jessica rizzo 
+jessica rizzo 
+porno 
+helvis 
+elvis presley. 
+elvis presley 
+bolin 
+bolin 
+linux 
+free energy 
+search files, youtube and liveleak 
+traci lords 
+tracey 
+addams 
+trac addams 
+tracy addams 
+tracey addams 
+gabrielle 
+austin kincaid 
+segreto sahara 
+resident evil 
+ramones 
+photoshop 
+killing 
+spears 
+aha 
+dance hall days 
+linux 
+titans 
+bolin 
+tania russof 
+jenna jameson 
+underage 
+iphone 
+teen 
+teen cum 
+teen cum 
+freaks of nature 
+bolin 
+monster cock 
+moray 
+moray valve 
+moray valve 
+freaks of nature 
+madonna 
+madonna 
+starcraft 
+jesus christ super star 
+jesus christ super star 
+jesus christ super star 
+search files, youtube and liveleak 
+south park 
+mickey mouse 
+stargate sg1 
+stargate sg1 
+stargate sg1 
+stargate sg1 
+black cock 
+avast 4.7 professional 
+avast 4 
+eastern promises 
+desi 
+leviation 
+ana ivanovic 
+tesla coil 
+induction coil 
+dream theater 
+rainbow 
+kicking coil 
+hookup wire tesla 
+eros ramazzotti 
+del amitri 
+madonna 
+madonna 
+erasure 
+erasure 
+erasure 
+erasure 
+joni mitchell 
+erasure 
+dexter 
+puke 
+è¨ï¿½Â\97æ\88\91å\97\8e 
+james bond 
+james bond 
+james bond 
+james bond 
+ubuntu 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+parietti 
+scarlett 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+napoli 
+napoli 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adobe 
+search files, youtube and liveleak 
+paris 
+japan 
+matlab 2007a 
+matlab 2007a 
+xxx movies 
+high school musical 
+search files, youtube and liveleak 
+nadine 
+scarlett 
+ellen 
+ellen 
+brass 
+tinto brass 
+grace 
+tirol 
+tirol 
+britney 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pc game 
+tiziano ferro 
+xxx 
+dunstan 
+fifa 
+fifa psp 
+psp fifa 08 
+moric 
+niplpe 
+nipple 
+heroes ita 
+heroes ita 
+almodovar 
+search files, youtube and liveleak 
+crash test 
+chienese crash test 
+evan almighty 
+adobe indesign 
+adobe indisign 
+clapton 
+underage 
+300 
+om 
+om shanti 
+chal 
+mountains of makkah 
+trailers 
+trailers 
+patner 
+war 
+lula 
+battle 
+tycoon 
+medieval 
+age of 
+vista 
+paganini 
+paganini 
+300 
+wagner 
+zuppy 
+. 
+ita 
+333 
+battle 
+battle 
+ling 
+king 
+corso 
+corso 
+corso 
+desi 
+sail 
+ubuntu 
+gta cops and gangsters 
+300 
+wi-fi 
+gta 
+wi 
+print 
+rise 
+300 
+knight 
+lula 
+napoletana 
+trans 
+grey 
+battle 
+star wars 
+ita 
+transformers 
+miss potter 
+transformers 
+gatos que hablan 
+blazing angels 2 
+tits 
+jan smit 
+f16 
+south park 
+into the wild 
+foo fighters 
+simpsons 
+resident evil 
+resident evil german 
+resident evil 3 
+nesox 
+email 
+get high 
+männer wie wir 
+容祖兒 
+proxyway 
+avg antispyware 
+fanfare 
+haanstra 
+madonna 
+novarip 
+vibrazioni 
+vibrazioni 
+madonna 
+once upon time west 
+blazing angels 2 
+collin mcrea 
+collin mcrea dirt 
+colin mc rea dirt 
+dtm race driver 3 
+neutron 
+big trouble 
+michael jackson janet 
+come and get it 
+debian 
+2pac 
+rock 
+2pac.mp3 
+punk 
+changes.mp3 
+changes.mp3 
+stalin 
+stalin 
+stalin 
+anime 
+anime 
+macross 
+guren 
+lagan 
+transformes 
+transformers 
+tank 
+jgsdf 
+oceans 13 
+chechen 
+shrek 
+halo3 
+halo 3 
+games 
+sukhoi 
+tokyo wars 
+tokyo wars 
+test 
+flyboys 
+flyboys 
+flyboys 
+toprisoner 
+prisoner 
+deutsch spanisch kurs 
+deutsch spanisch 
+spanisch 
+u.f.o 
+501 verbs 
+501 
+vista 
+knyacki 
+high voltage 
+inu 
+inu 
+snakeman show 
+spanish verbs 
+snakeman show 
+snakeman show 
+sonhouse 
+seena 
+u-15 
+dears 
+momoi 
+poemi 
+flcl 
+anarchy 
+anarchy 
+anarchy 
+arb 
+moudoku 
+japan 
+michro 
+michro 
+hikage 
+hikage 
+hikage 
+travler 
+traveler 
+501 spanish verbs 
+legia 
+winrar 
+hamilton 
+heroes 
+trans 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+no results 
+notpresentonanysystemfile 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666 
+alive 
+xxx 
+indian movies 
+big black 
+big black 
+kean 
+katja kean 
+hentai 
+planet terror 
+beethoven gilels 
+katja kean 
+young 
+hamilton 
+david hamilton 
+parker posey 
+1000 
+gay boy 
+milf 
+adobe indesing 
+adobe indising 
+dreamweaver cs3 
+dreamweaver cs3 
+adobe 
+terminator 
+terminator 
+blond 
+open source 
+finger 
+masturbate 
+esc 1989 
+database 
+adobe 
+slutty hot 
+lemon angel 
+adobe indesing 
+adobe indesing 
+haruhi 
+cream lemon 
+missaka 
+adobe indesing 
+hamas 
+slutty 
+black 
+syoko 
+aum 
+amateur 
+eurowings 
+ryders 
+adobe cs3 
+burst city 
+burst city 
+burst city 
+crazy skb 
+60 anpo 
+anpo 
+ashampoo 
+lemon tea 
+lemon angel 
+redacted 
+xanadu 
+peanuts 
+izumiya 
+timers 
+nadeshico 
+excel saga 
+poemi 
+islael 
+kou machida 
+machida 
+hirohito 
+paprika 
+dainippon 
+doors 
+animal sex 
+horse 
+shemale 
+horse dick 
+wife 
+hairy 
+hairy 
+hentai 
+terry tate 
+indiana jones 
+terry tate 
+terry tate 
+little children 
+kusturika 
+non siamo soli 
+ihra drag racing 
+fados carlos saura 
+preg 
+preggo 
+preggo 
+search files, youtube and liveleak 
+kanye west 
+nelly furtado 
+nelly furtado 
+juno 
+little miss sunshine 
+justim timberland 
+justim timberland 
+justim timberland 
+pimsleur 
+woodwork 
+bob baumeister 
+search files, youtube and liveleak 
+ju 
+ju 
+ju 
+plaquinha magica 
+plaquinha magica 
+heleem 
+heleem 
+filmes 
+filmes 
+maksim 
+sex 
+deja.vu 
+deja 
+james blunt 
+search files, youtube and liveleak 
+guitar 
+guitar lessons 
+guitar lessons 
+guitar lessons 
+isso pro 
+fifa 
+isspro 
+isspro evolution 
+gina 
+pro evolution 
+santana 
+die siedler vi 
+divx 
+divx ita 
+house 
+linux 
+sopranos 
+hoi 
+deja 
+public nudity 
+* 
+gay dad 
+ebony feet 
+prison break s03e03 
+prison break s03e04 
+bind 9 
+bind9 
+o'reilly 
+nadine 
+big tits 
+renato zero 
+renato zero 
+vetorizar corel 12 
+vetorizar 
+high school musical 2 
+corel 12 
+lafee 
+pink floyd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+corel 12 vetor 
+vetor 
+video 200 
+video 2000 
+les 
+ls 
+heros ita 
+britney 
+nude cel 
+harry potter 
+nadine 
+one three hill 
+amsterdam 
+webcam 
+webcam 
+lindsay lohan 
+lohan 
+lohan 
+3js 
+ilahi 
+3j's 
+itil 
+smallville 
+itil 
+itil 
+smallville 
+smallville full 
+ich und ich 
+mango 
+mature 
+mature porn 
+mature porn 
+mature porn 
+porn 
+porno video 
+search files, youtube and liveleak 
+lesbian 
+lesbian 
+topless 
+paris hilton 
+search files, youtube and liveleak 
+porno 
+thompson-necu izdat ja 
+thompson-necu izdat ja 
+thompson-necu izdat ja 
+thompson 
+necu izdat ja 
+necu izdat ja 
+thompson-necu izdat ja 
+roff 
+roff 
+nero 
+ubuntu 
+desperate hosewive 
+desperate housewive 
+desperate housewive s3 fr 
+desperate housewive s3 
+naruto fr 
+naruto 
+narut 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+linux xp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+linux 
+video 
+video xxx 
+video xxx 
+7.10 final 
+7.10 final 
+xxx 
+7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+xxx 
+gusty 7.10 
+parol pou ri 
+parol pou 
+parole pou 
+morne a l'eau 
+guadeloupe 
+search files, youtube and liveleak 
+lv 
+black 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+search all files 
+300 
+the simposon ita 
+search files, youtube and liveleak 
+salsa 
+die hard 4 
+binic woman 
+bionic woman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+journeyman 
+pirates 
+pirates worlds 
+reason 
+jim carrey 
+jim carrey 
+porn 
+carrey 
+nl 
+prison break 3x01 
+bourne 
+fat women 
+fat womens 
+fat 
+search files, youtube and liveleak 
+fat 
+fat 
+suse 
+ana reverte 
+ita dvd5 
+nude 
+eva longoria home video hot milf 
+eva longoria 
+85 ben amin 
+ben amin 
+prison break 
+ben amin 
+ben amin 
+ben amin 
+ben amin 
+sherine 
+anna vissi 
+lilian haddad 
+katia abbas 
+warwa 
+najwa sultan 
+najwa 
+chevel 
+mazicana 
+boys 
+oscar athie 
+oscar athie 
+oscar athie 
+pasteles verde 
+los pasteles verde 
+los iracundos 
+santo & yohnny 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+medieval 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+paulina rubio 
+paulina rubio 
+paulina rubio 
+thalia 
+rihanna 
+rihanna 
+rihanna 
+rihanna 
+rihanna 
+cumshot 
+cumshot 
+cumshot 
+scientist 
+stargate atlantis 
+stargate atlantis ita 
+stargate atlantis ita 
+stargate atlantis ita 
+stargate atlantis 
+ghost 
+fantasmi 
+fantascienza 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+crisys 
+crysis 
+search files, youtube and liveleak 
+halo 2 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+suriname 
+ita 
+lamu 
+sport 
+live 
+pompino 
+tv 
+suriname 
+suriname 
+suriname 
+ita 
+bang brothers 
+live sport 
+live tv 
+pornography 
+chuck und larry 
+porno 
+rotterdam 
+ani di franco 
+ani di franco 
+spellbound 
+spellboundance 
+mauro astolfi 
+suriname 
+tori amos 
+tori amos 
+tribler 4.1.4 
+tribler 
+tribler 
+tribler 
+superbad deutsch 
+superbad german 
+superbad 
+superbad ger 
+mauro astolfi 
+mauro astolfi 
+dr house 4x 
+aap 
+aap 
+aap 
+die 
+die 
+die 
+die 
+hustler 
+pthc 
+preteen 
+kinderficker 
+kinderficker 
+könig der wellen 
+hindi 
+johnny gaddar 
+johnny gaddar 
+sex 
+die 
+antiarp4.3.1 
+die 
+antiarp4.3.1 
+antiarp 
+hallo jelle 
+poetry 
+spoken words 
+spoken word 
+audio book 
+couple 
+hardcore 
+black 
+group 
+group 
+transfoermer 
+transformers 
+group 
+simpsons 
+kinderliedjes 
+kerst 
+la ink 
+linux 
+police 
+la ink 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+la ink 
+la sposa cadavere 
+die 
+die 
+la sposa cadavere 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+dashboard 
+dashboard 
+bellucci 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+kingdom 
+pinnacle studio 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+horror 2007 
+horror 2007 
+horror 2007 
+horror 2007 
+horror 2007 
+horror 2007 divx 
+divx horror 2007 
+divx horror 
+divx ita horror 
+die 
+latex 
+alles is liefde 
+latex 
+latex 
+dvd 
+close combat 
+kiss 
+rubber 
+gattaca 
+him 
+kiss 
+ku 
+kut 
+slikken 
+sperm 
+teen 
+amateur xxx 
+die 
+transformer 
+transformer 
+lost 
+fetish 
+lyx 
+adobe indisign 
+game 
+game 
+tombraider 
+commando 
+dreamwreaver cs3 
+dreamwreaver 
+dreamwreaver 
+adobe 
+serj tankian 
+to feggaraki 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stardust 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+daron malakian 
+serj tankian 
+stardust 
+mika 
+mika 
+akon 
+serj tankian 
+die 
+to feggaraki 
+halo 2 
+halo 2 
+greece 
+desparate housewives 
+transformer 
+school 
+to feggaraki 
+to feggaraki 
+nino d angelo 
+senza giacca e cravatta 
+harry potter 
+autodesk autocad 
+autodesk autocad lt 
+gemelli diversi 
+insieme 
+dj francesco 
+o'reilly 
+o'reilly 
+o'reilly 
+o'reilly 
+salta 
+gigi d alessio 
+bob seger 
+dove sei 
+gigi finizio 
+bob seger 
+bob seger 
+wwe 
+ninananna 
+ni na nanna 
+alabama 
+alabama 
+alabama 
+alabama 
+office 2003 br 
+battlestar galactica 
+ancilia 
+cable guy 
+aero flux 
+surf up 
+ww2 
+sex4idiots 
+jordenvandijk 
+the crow 
+hot action cofee 
+counter strike 
+scarface 
+here comes the pain 
+jackass the game 
+smackdown just bring it 
+smackdown vs raw pc 
+fever for the flava 
+stuntman ignition 
+stuntman ignition game 
+gay older 
+here comes the pain game 
+gay daddy 
+gay dad 
+smackdown vs raw 2008 game 
+gay old 
+pro evolution soccer 2008 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+she male 
+rise and fall 
+pro evolution soccer 2008 game 
+hi dear delaila 
+hi dear delaila 
+sex 
+dear delaila 
+education 
+dear 
+dear 
+dear 
+dear 
+dear mr president 
+dear mr president 
+fetish 
+dear delaila 
+dear delilah 
+dear delilah 
+there delilah 
+hey there delilah 
+hey dear delilah 
+scooter 
+special d 
+old harrys game 
+cum drinking 
+clips 
+soriety 
+soriety 
+hazing 
+hazing 
+paris hilton 
+little children 
+eyes wide shot 
+nod 32 ita 
+search files, youtube and liveleak 
+sean kingston 
+sean kingston 
+sean kingston 
+ub40 
+50cent 
+50cent 
+50cent 
+50cent 
+50cent 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tull 
+tull 
+tull 
+tull 
+eminem 
+a 
+sting 
+dog 
+c# 
+c# 
+.net 
+sql 
+dalla 
+lisa 
+lisa 
+elisa 
+elisa 
+dj 
+fear and loathing in las vegas 
+fear and loathing in las vegas spanish 
+fear and loathing in las vegas espanol 
+fear and loathing in las vegas 
+the red violon 
+the red violin 
+the red violin 
+the red violin 
+the red violin 
+le violon rouge 
+dirty dancing 2 
+windows 
+kama sutra: a tale of love 
+the motorcycle diaries 
+the motorcycle diaries 
+the life is beautiful 
+v for vendetta 
+spy game 
+casablanca 
+cidade de deus 
+pontello 
+faust 
+dildos 
+candy 
+bonn 
+taxi driver 
+big fish 
+pedofilia 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amores perros 
+memoirs of a geisha 
+la cage aux folles 
+asp.net 
+malua 
+elvis 
+elvis 
+frans bouwer 
+adobe 
+adobe bridge keygen 
+adobe bridge 
+adobe bridge 
+adobe bridge 
+tv west 
+search files, youtube and liveleak 
+tv west 
+yes-r 
+yes-r 
+aziz el berkani 
+search files, youtube and liveleak 
+sean kingston 
+pokertracker 
+latex 
+it's always 
+it's always sunny 
+cable guy 
+battlestar galactica 
+windows 2003 
+bianca buchamp 
+windows 2003 r2 
+ancilia tilia 
+lolita 
+lost s04 
+sex 
+russian 
+lolita 
+fist 
+pre-teen 
+preteen 
+childsex 
+teensex 
+interacial 
+interracial 
+interracial 
+surf's up: i re delle onde 
+shakespeare 
+nude 
+dfx 
+bbc the planet earth 
+gay sex wrestling 
+gay sex wrestgay sexling 
+gay sex 
+gay sex 
+muscle wrestling 
+booty 
+muscle wrestling 
+muscle wrestling 
+transformers 
+muscle wrestling 
+gay hump 
+francais 
+fr 
+slightly stoopid 
+pinnacle studio 11 ultimate 
+spiderman 3 
+until death van damme 
+until death 
+air force two 
+xxx 
+xxx asian 
+dvd fab platinum 
+xxx asian teen 
+xxxjapanteen 
+xxx japan teen 
+russian incest 
+sensual 
+nudist 
+russian 
+nudist 
+lolita 
+russian xxx 
+russian incest 
+asian teen 
+nicky 
+zett 
+gurren 
+neighbor 
+preteen 
+16 teen 
+love teen 
+baby doll 
+coitus 
+sexy 
+pthc 
+incest 
+r@ygold 
+interracial sex 
+interracial 
+caramel 
+pthc 
+kinder 
+kinderporn 
+svay pak 
+cambodia 
+passwordlist 
+lolita 
+war 
+mpg 
+ecdl 
+emma shapplin 
+learning 
+learning 
+rush hour 
+barbie 
+ita i soliti ignoti 
+i soliti ignoti 
+learning 
+lara fabian 
+lara fabian 
+learning 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+il gladiatore 
+web 
+mpg 
+mpg 
+mpg 
+mpg 
+mpg 
+how i met your mother s3e1 
+how i met your mother 301 
+how i met your mother 301 
+how i met your mother 
+mpg 
+mpg 
+mpg 
+mpg 
+
+johnny mneumonic 
+white girl dancing 
+search files, youtube and liveleak 
+esx 
+mpg 
+diamonds are forever 
+james bond 
+mpg 
+mpg 
+ps2 
+search files, youtube and liveleak 
+porno 
+porno 
+dutch 
+serj tankian 
+windows 
+ocean 13 
+ubuntu 
+kaspersky 
+autoccad 
+autocad 
+power dvd 
+power dvd 
+power dvd 
+power dvd 
+pooh 
+pooh in concerto 
+bokito 
+il trionfo dell'amore 
+manuale d'amore 
+ls 
+windows 
+dj 
+dalla 
+video angels 
+james bond jr 
+dragon ball 
+dragon ball digitally remastered 
+dragon ball remastered 
+dragon ball remastered 
+dragon ball remastered 
+james bond 
+never say never again 
+psp 
+james bond .cbr 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+.cbr 
+cbr 
+bond cbr 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cbr 
+007 cbr 
+spy cbr 
+free running 
+free running 
+free running 
+free running 
+big heat 
+big booty white girls 
+never say never again 
+warcraft 
+big booty white girls 
+diamonds are forever 
+the kingdom 
+creampie 
+breaking benjamin 
+das fünfte element 
+diva 
+diva song 
+diva 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bronskbeat 
+bronskibeat 
+bronski 
+tvu:\\53060 
+beata dubasova 
+karmen 
+dance 
+pavarotti 
+aloha shooby-doo 
+hight school musical 2 
+hight school musical 2 
+hight school musical 2 
+bear vs shark 
+bear vs shark 
+terror hawk 
+terrorhawk 
+bear vs shark 
+beck 
+beck 
+jpg 
+porno 
+achi 
+a chi 
+kut 
+sidera 
+sidera 
+bkini 
+ridera 
+bikini 
+nude 
+diavolo biondo 
+diavolo biondo 
+spiderman 
+axxo 
+tits 
+dite a laura che l amo 
+dite a laura che l amo 
+boobs 
+di te a laura che l amo 
+dirte laura che l amo 
+laura che l amo 
+franco 1 franco4 
+ho scrito tamo sulla sabia 
+ho scrito tamo sulla sabia 
+ho scritto tamo sulla sabia 
+ho scritto tamo sulla sabbia 
+ho scritto t amo sulla sabbia 
+tanta voglia di lei 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gino d angelo 
+nino d angelo 
+senza giacca e cravatta 
+i ricordi dell cuore 
+i ricordi dell cuore 
+ricordi dell cuore 
+amedeo minghi 
+i ricordi del cuore 
+ricordi del cuore 
+clowns und helden 
+und helden 
+ndw 
+neue deutsche welle 
+das modul 
+norton 
+prison break 
+prison break 1 
+prison break 1 episode18 
+prison break 1 episode17 
+prison break 1 
+prison break 1 
+prison break season 1 
+prison break flight 
+crocodile dundee 
+prison break go 
+prison break 21 
+crocodile dundee 
+g-unit 
+reason 
+g-unit 
+reason 
+g-unit 
+moby 
+the office 
+orkester sound bank 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tenacious d pick 
+tenacious d pick 
+tenacious d pick 
+g-unit 
+tusenee 
+anita blonde 
+spiderman 3 
+mythbusters 
+gpethes erben 
+goethes erben 
+goethes erben 
+asia 
+heatha hunter 
+heather hunter 
+teen 
+resident evil: extinction 
+könige der wellen 
+wächter des tages 
+planet terror 
+resident evil: extinction 
+mai dire candid 
+search files, youtube and liveleak 
+fifa 
+moby 
+beastiality 
+windows xp 
+nero 7.10 
+nero 
+search files, youtube and liveleak 
+private 
+trivial pursuit 
+sims 
+wedding 
+wedding date 
+wedding date 
+wedding date 
+koksijde 
+koksijde 
+sims 
+prison break 
+eros ramazzotti 
+il buio nell'anima 
+300 
+300 ita 
+hed kandhi 
+bourne ultimatum 
+switch 
+dance 
+hits 
+freeloader 
+mika 
+regi 
+xxx 
+search files, youtube and liveleak 
+moskovskiy gigolo 
+moskovskiy gigolo 
+regi 
+dance 2007 
+reason 
+muschi 
+muschi 
+fotze 
+sex 
+xbox 
+xbox fifa 
+fifa 2007 
+fifa 2007 xbox 
+desperate 3 
+desperate 3 stagione completa 
+gouden kooi 
+desperate housewives 3 stagione completa 
+desperate housewives 3 stagione 
+desperate housewives 3 
+desperate housewives 3 ita 
+desperate housewives 3 ita 
+desperate housewives 3 
+amatuer 
+amateur 
+lesbian 
+desperate housewives 3 ita 
+desperate 3 ita 
+sandra 
+desperate pioggia 
+korn 
+beatles 
+beatles 
+mega mindy 
+de gouden kooi 
+big brother 
+shrek 
+sports 
+ps3 
+playstation 
+naughty 
+paris letterman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tenacious d pick 
+
+letterman paris 
+pick of destiny 
+andre hazes 
+sex 
+mpg 
+dmg 
+andre hazes 
+andre hazes 
+andre hazes 
+andre hazes 
+andre hazes 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+muziek 
+software 
+software 
+audio 
+search files, youtube and liveleak 
+films 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+visio 
+teijema 
+fedex 
+caribean 
+boy nude 
+boy nude 
+boy nude 
+young boy 
+this boy's life 
+spanish 
+autos locos 
+8 
+shemale fuck guy 
+pedo boy 
+surfs up 
+the simpsons 
+friends 
+straightheads 
+dawn patrol 
+jesse james 
+ratatouille 
+1200 
+til lies do us part 
+southpark 
+last legion 
+scientist 
+bjork 
+lost so3e17 
+search files, youtube and liveleak 
+guitar lessons 
+ac/dc 
+the office 
+the office the injury 
+http://torrents.thepiratebay.org/3725005/top_1000_pop_hits_of_the_80s_(4.32gb)__.3725005.tpb.torrent 
+dexter 
+denis chamber 
+threes company 
+the prisoner 
+sexy 
+teens 
+wil ferell 
+sexy 
+barbara dennerlein 
+china xxx 
+milly d'abbraccio 
+milly d'abraccio 
+milly 
+selen 
+veronika 
+anna 
+russian incest 
+backgammon 
+pablo neruda 
+poesie 
+hungarian 
+stanlio 
+search files, youtube and liveleak 
+fantasmi 
+teen 
+stanlio 
+stanlio 
+teen in school 
+snowboard 
+naruto 
+naruto folge 1 
+cage fighting fury 
+midi 
+ps2 
+asian 
+last legion 
+the brave one 
+the brave one dvdrip 
+last legion 
+rape 
+saw dvdrip 
+lilly the world 
+search files, youtube and liveleak 
+netball 
+sisster 
+sister 
+povia 
+zucchero 
+henti 
+hentai 
+gynslip 
+gymslip 
+forced th 
+forced tg 
+forced 
+femdom 
+swollow 
+swollow 
+swallow 
+nappy 
+long feet girl 
+transformed 
+pool change 
+pool 
+pool girl 
+pigtails 
+panties 
+panties 
+shopping gitl 
+shopping girl 
+shopping gitl 
+shopping girl 
+shopping girl 
+ptff 
+feet cartoon 
+feet cartoon 
+anime feet xxx 
+kiss feet anime 
+http://www.fulldls.com/download-others-31710-test_drive_unlimited_pal_multi5_ps2dvdkudos.torrent 
+http://www.fulldls.com/download-others-31710-test_drive_unlimited_pal_multi5_ps2dvdkudos.torrent 
+chet baker 
+snuffx 
+snuffx xxx 
+vagina torture 
+mfx feet 
+xxx extreme 
+ptff 
+child feet 
+feet mature 
+feet soles 
+you are the music in me 
+mikayla feet 
+search files, youtube and liveleak 
+teacher 
+hot mom 
+search files, youtube and liveleak 
+padova 
+search files, youtube and liveleak 
+sex 
+squrting 
+squrting xxx 
+squrti xxx 
+squirt xxx 
+squirt xxx you tube 
+pumping pussy 
+pump pussy 
+mac & bumble 
+mac & bumble 
+layla 
+stretch 
+speculum 
+speculum 
+votze 
+muschi 
+pussy 
+acrbat 8 
+open 
+garfield 
+mcdonalds 
+garfield 
+garfield 
+yanni 
+jesse cook 
+axxo 
+dvdrip 
+dvdrip 2007 
+vasco rossi 
+surf'up 
+aerosmith 
+babyshambles 
+forro 
+pis 
+flamengo 
+ccnp 
+642 891 
+linkin park 
+linkin park 
+timbaland, timberlake, 50 cent 
+timbaland 
+timberland 
+timberlake 
+ponygirl 
+spears gimme 
+spears gimme gimme 
+javascript:deliverjwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny');search files, youtube and liveleak 
+jwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny') 
+jwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny') 
+pussycat 
+pussycat dolls 
+spacehog 
+tatu 
+sex 
+apple dvd studio 
+osx 
+apple 
+halo 
+diy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+precious 
+etheridge 
+nero 
+alice bondage 
+ballbusting 
+leash 
+puppyplay 
+ponyplay 
+speaking 
+resturant empire 
+empire 
+afromania 
+gurren 
+php 
+photoshop 
+batman 
+imaishi 
+tarzan 
+battle athletes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+daiundoukai 
+* 
+* 
+moana 
+drunk 
+sex 
+superman 
+lois lane 
+lois 
+returns 
+night cam 
+night cam 
+spy cam 
+spacehog 
+windows vista 
+hits 2007 
+versuz 
+bacardi 
+new 
+corrupção 
+corrupção 
+mix 
+steel 
+steel 
+steel 
+highschool 
+rounders 
+frankie goes 
+veronica 
+maksim 
+radio veronica 
+maksim 
+maksim 
+maksim 
+radio caroline 
+psp 
+.mp4 
+n73 
+resident evil 
+route 
+nero 
+chess 
+dirt 
+symbian 
+crash 
+ghostrider 
+stunt 
+dj pagal 
+command 
+swat 
+disney 
+fatherland 
+lightness 
+disney 
+llead 
+ulead 
+code pack 
+upskirt 
+sister 
+naughty america 
+milf 
+granny 
+mature 
+older 
+mother 
+in law 
+spycam 
+sister 
+toilet 
+ringtone 
+realtone 
+handyman 
+battle 
+amiga 
+cloanto 
+forever 
+barry white 
+barry white 
+barry white 
+tomtom 
+hip hop hood 
+15 
+vlc 
+mature 
+incest 
+incest 
+sharon stone 
+sharon stone 
+autocad 
+winxp black 
+black xp 
+pig tails 
+games skirt 
+change 
+hentia 
+hentia 
+hentia 
+ecdh 
+elliptic 
+elliptic curve cryptology 
+search all files 
+prive 
+pink 
+voyeur 
+search files, youtube and liveleak 
+change room 
+lockeroom 
+hellman 
+lockeroom 
+changingroom 
+showeroom 
+showe 
+shower 
+change room 
+change room 
+change room 
+hidden movies 
+hidden movies 
+handjobs 
+handjobs 
+handjobs 
+the kingdom 
+handjobs 
+handjobs 
+the brave one 
+browulf 
+the dark knight 
+30 days of night 
+dragon wars 
+imagine me & you 
+hitman 
+the invasion 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+we're getting nowhere 
+imagine me & you 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+childsex 
+planet 
+pink handjobs 
+fucking 
+fucking machines 
+device bondage 
+sex submission 
+hogties 
+hogtied 
+imagine me & you 
+search files, youtube and liveleak 
+hogties 
+hogtied 
+hogtied 
+fucking machine 
+uninettuno 
+pippo 
+linux 
+heroes 
+fantozzi 
+lupin 
+sybian 
+animal 
+stardust 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+foto 
+a 
+a 
+a 
+a 
+die hard 4 
+divx 
+divx 
+jetzt oder nie jodie 
+jetzt oder nie jodie 
+jetzt oder nie 
+encarta 
+ms office languages 
+ms office languages 
+ms office 
+lost 
+ms.office.language 
+ms.office.language.pack.italian 
+ms.office 
+ms office 
+mirc 
+ts 
+layla 
+incest 
+shemale 
+asian 
+å°Ã¦¾¤å\9c\93 
+tiesto 
+radiohead 
+oops 
+tomtom 
+pedo 
+tottenham 
+mature black 
+mature black 
+mature 
+celeb 
+appleby 
+portugal 
+orgasm the faces of estacy 
+davideo 
+vivid 
+de andrè 
+jerry lewis 
+picasa 
+de andre 
+windows xp 
+windows vista 
+criminal minds 
+when a man loves a woman 
+reason 
+dying to dance 
+gba rom 
+i'm a boy anorexic 
+nina harthly 
+minotaur 
+nina harthly 
+nina harthly 
+korn 
+deutsch 
+squ 
+spiderman 
+annie lenox 
+annie lennox 
+hd dvd 
+annie lennox 
+ferrari 
+720p 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sport 
+hd dvd movies 
+wrestling 
+catfight 
+hd dvd 
+catfight 
+batman begins hd 
+slipknot 
+casting 
+darering 
+poker 
+sexgame 
+assparade 
+sexgame 
+mixed fight 
+assparade 
+dww 
+fkk 
+rare silk 
+casting 
+ballet 
+nipple slip 
+erotic 
+greg lake 
+spong 
+james gang 
+barbara dennerlein 
+five finger 
+five finger 
+five finger 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+speed up my pc 
+shrek 
+black ps2 
+atreyu 
+atreyu 
+kill bill ru 
+as i lay dying 
+kurasawa 
+clubland extreme 
+clubland 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kagemusha 
+shichinin 
+samurai 
+samurai 
+the heartbreak kid 
+tool 
+avatar 
+tool 
+xbox360 
+xbox360 halo 3 
+tool music 
+snowboard 
+xbox360 halo 3 
+prison break season 3 
+robot chicken 
+robot chicken season 3 
+boondock 
+the boondock 
+boondocks cartoon 
+franky tunes 
+family guy 
+password list 
+junior songfestival 2007 
+rapidshare 
+torrent 2.0 
+roisin murphy 
+airport tycoon 
+happy feet jumpstyle 
+force 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trample 
+face 
+adaware 
+adaware 
+trampling 
+trampling 
+trampling 
+search files, youtube and liveleak 
+visual basic 6 
+visual basic 6 
+visual basic 6 
+vb6 
+queen 
+vb6 
+heroes 
+heroes ita 
+heroes 11 ita 
+heroes 11 ita 
+pro evo 
+pro evo 08 
+pro evo 08 
+pro evo 08 
+porno hentai 
+pro evo 
+airport tycoon 3 
+airport tycoon 3 
+pee 
+pis 
+666 
+pis 
+ggg 
+thompson 
+thompson 
+pee 
+ggg 
+dvd ita 2007 
+heroes 
+heroes ita 
+mauro nardi 
+lucio battisti 
+alles 
+dvx ita 
+harry potter ita 
+herbalife 
+codegear 
+search files, youtube and liveleak 
+adult 
+roboform 
+roboform 
+stardust memories 
+annie hall 
+annie hall 
+piss 
+hwk 
+hwk 
+ufs 
+surf's up ita 
+surf's up ita 
+surf's up ita 
+she wants revenge 
+saras soft 
+sarassoft 
+sarassoft 
+sarassoft 
+n-box 
+n-box 
+this is forever 
+in rainbows 
+dave gahan kingdom 
+ita divx 
+dave gahan hourglass 
+divx ita 2007 
+surf's up: i re delle onde 
+jenna 
+porn 
+beastiality 
+mac 
+anal teen 
+anal teen 
+world of warcraft 
+elan 
+madona 
+games 
+warcraft 
+amv 
+loli 
+womens wrestling 
+tampa 
+womens pro wrestling 
+brother bear 
+bob and doug 
+search files, youtube and liveleak 
+abby winters 
+abby winters 
+nwwl 
+viv thomas 
+girlfriends films 
+twilight women 
+rsp2psharespy 
+ultimato burner 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+smallville 
+satellite tv 
+smallville 
+fifa 
+fifa 2006 
+winning elleven 
+cable tv 
+tv 
+nero 8 
+chama 
+internet satellite tv player 
+dubai 
+craig david 
+sexo 
+eva green 
+eva green 
+eva green os sonhadores 
+copo de colera 
+la fabbrica illuminata 
+vista 
+vista 
+eva green 
+eva green dreamers 
+lindberg 
+christian lindberg 
+eva green dreamers scene 
+de la nux 
+joseph alessi 
+os 
+p v de la nux 
+mac os 
+nicole graves 
+office 
+office 
+office2007 
+office2007 
+office2007 
+nicole graves 
+nicole graves 
+rita coolidge 
+secret 
+public nudity 
+naked 
+topless 
+jimmy rosenberg 
+brawl nintendo 
+korea 
+diablo 
+planetes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pthc 
+search files, youtube and liveleak 
+ivana 
+pedo 
+grillo 
+francesco e chiara 
+korea 
+placebo 
+prince 
+prince live 
+amateur home 
+amateur home 
+amateur home 
+amateur 
+prince 
+dvd ita 2007 
+porn] 
+porn 
+dvd 
+metallica 
+metallica 
+porn 
+raped 
+black 
+raven riley 
+nicole graves 
+next door nikki 
+next door nikky 
+next door nikky 
+autodesk animator 
+next door nikki 
+next door 
+kate's playground 
+kate 
+heroes ita 
+tottenham 
+interpol 
+heros ita 
+heroes ita 
+dvd ita 2007 
+search files, youtube and liveleak 
+live 
+cum 
+sexy 
+couple 
+cleevage 
+cleevage 
+cleevage 
+elisa 
+pop 
+lavasoft 
+pop 
+the mouse hunt 
+goes mi heart 
+goes mi heart 
+goes mi heart 
+amateur 
+goes mi heart 
+goes my heart 
+goes my heart 
+24 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+eric johnson 
+steve morse 
+team galaxy 
+malmsteen 
+kate bush 
+queen 
+abba 
+gimme more 
+ls 
+ls 
+ls magazine 
+ls magazine 
+valeria marini 
+15 y 
+14y 
+14 y 
+web cam teen 
+web cam preteen 
+valeria marini 
+eva henger 
+ambrus 
+gitarre 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fiest 
+feist 
+feist 
+anal 
+feist 
+health 
+health 
+nutritional 
+centerfold 
+centerfold 
+centerfold 
+giles 
+geiles 
+jill 
+jaime pressly 
+click 
+cameltoe 
+cameltoe 
+cameltoe 
+guns n' roses 
+snowboard 
+metalica 
+metallica 
+metallica 
+nothing else matters 
+tobias regner 
+i stlii burn 
+i still burn 
+lolita models 
+lolita models 
+13 y 
+eat pre 
+eat pre 
+young 
+young 
+painkiller 
+judas priest 
+pairs skating 
+windows vista 
+amateur 
+homemade 
+kuschelrock 
+reign 
+adamski 
+meeuwis 
+dune 
+dune 
+do you know 
+do you know 
+op zoek naar evita 
+feist 
+ist 
+fist 
+fist 
+feist 
+exdesi 
+xxx 
+xxx 
+xxx 
+13 
+eminem 
+exdesi 
+exdesi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+babel 
+a mighty heart 
+feist 
+squirt 
+hot party 
+mmf 
+mff 
+mff 
+george michael 
+iwork 
+laura pausini 
+laura pausini 
+snatch 
+fight club 
+a mighty heart 
+anal 
+breast 
+pocketwijzer 2007 
+search files, youtube and liveleak 
+rocco 
+breast 
+breast 
+george michael 
+haze 
+samatha 
+samantha 
+silvia 
+spanking 
+99bb 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+foto 
+barbie sexy 
+barbie sexy 
+nomadi 
+nomadi 
+totò 
+totò 
+xfiles 
+toto' 
+totò 
+totò 
+mollo tutto 
+harry potter 
+harry potter ita 
+300 
+300 iso 
+300 
+masterbation 
+girls gone wild 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vasco rossi 
+poweriso 
+google earthn 
+google earth 
+bitdefender 
+bitdefender total security 
+transformers ita 
+crossroads festival 
+crossroads festival 
+harry potter 
+harry potter ita 
+vasco rossi 
+female masterbation 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+girls gone wild 
+bimaxx 
+bisexual 
+daniel bedingfild 
+uitzending gemist 
+billy talent 
+ice baby 
+top20 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tuning car 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+feist 
+fiat punto 
+i fell good 
+i want a break fee 
+i want a break free 
+higt school musical 
+punto tuning 
+wallpaper 1280 
+disco 
+prison break 
+queen 
+sex 
+pop goes my heart 
+porno 
+drifter 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+twsited sisterç 
+twsited sister 
+twsted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+divx ita rush hour 
+long feet girl 
+korn 
+molotov 
+pop goes my heart 
+download molotov 
+download molotov 
+pop goes my heart 
+download molotov 
+download molotov 
+pop goes my heart 
+pop goes my heart 
+large tongue 
+pop goes my heart 
+biggest xxx 
+dvx ita 
+dvx ita 
+snuff 
+snuffx 
+snuff xxx 
+large vagina 
+large pussy 
+bebe long tongue 
+bebe long tongue 
+babe long tongue 
+live for speed 
+save ryan 
+convert 
+babe long tongue 
+shemale 
+big dick 
+shemale 
+shemale 
+squirt 
+dramatic chipmunk 
+model 
+dramatic chipmunk 
+defrag 
+dramatic chipmunk 
+o&o 
+dramatic chipmunk 
+dramatic chipmunk 
+juanita 
+search files, youtube and liveleak 
+lou reed 
+heroes 
+omnipage 16 
+omnipage 
+dvdshrink 
+autocad 2008 
+autocad 2008 
+omnipage 
+omnipage 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+it crowd 
+jenna haze 
+lisa 
+amanda peat 
+betty 
+christy 
+naughty 
+legal 
+barely legal 
+campbell 
+anetta 
+erica 
+erica 
+sunny 
+amy 
+vivid 
+private 
+search files, youtube and liveleak 
+dr who 
+brozman 
+brozman lumiere 
+brozman 
+brozman 
+lumiere 
+lumiere bro 
+tanita tikaram 
+tanita tikaram 
+tanita tikaram 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+a 
+tanita tikaram 
+tanita tikaram 
+anime 
+earth world 
+slevin 
+ita slevin 
+donnie 
+magix music maker 
+sexy 
+sexy girl 
+sexy love 
+sexy love 
+sexy make love 
+make love 
+sex love 
+amsterdam 
+search files, youtube and liveleak 
+gay 
+gay 
+hard 
+hard 
+porno 
+sesso 
+green 
+asia 
+heroes s02e03 
+gateway 
+chuck s01e0 
+chuck s01e03 
+manuale 
+babes xxx 
+manuale d'amore 
+babe xxx 
+babe long tongue 
+manuale d'amore 
+greys anatomie 
+greys anatomie 
+search files, youtube and liveleak 
+dvd 
+dvd ita 
+long tongue 
+un impresa da dio 
+sweet tongue 
+sweet long tongu 
+sweet long tongue 
+sweet feet 
+50 cent 
+black label 
+sixx 
+nin 
+nine 
+nine 
+black label 
+nero 
+nero 8 keygen 
+foo fighters 
+german 
+dio 
+exceed 
+britney 
+skat 
+gladiator 
+gladiator 
+adobe indesign cs3 premium 
+adobe indesign cs3 premium italiano 
+vanessa del rio 
+fonts 
+search files, youtube and liveleak 
+ita divx 
+pandora tomorrow 
+blonde 
+adobe creative cs3 premium 
+adobe creative cs3 premium italiano 
+adobe creative cs3 premium ita 
+adobe creative cs3 premium 
+adobe creative cs3 premium it 
+adobe creative cs3 premium ita 
+nena 
+nena 
+tits 
+nomadi 
+rolling stones 
+pussy 
+phoneix 
+oblivion multiplication bug 
+oblivion multiplication 
+oblivion 
+oblivion 
+oblivion 
+cumshot 
+vagina 
+berkeyley course 
+berkeyley course 
+berkeyley course 
+e-book 
+filerecovery 3.1 
+filerecovery 3.1 
+filerecovery 3.1 
+e-book 
+filerecovery 3.1 
+file recovery 3.1 
+file recovery 
+filerecovery 
+file recovery 
+porn 
+gay porn 
+one piece 
+ita 
+train 
+game pc 
+game pc 
+ita 
+spoed 
+search files, youtube and liveleak 
+alma chua 
+titanic 
+titanic 
+zoo 
+pc games 
+divx ita enterprise 
+divx ita enterprise 
+pc adult games 
+beastiality 
+divx ita enterprise 
+snake 
+dog 
+horse 
+enterprise ita 
+pc games 
+train 
+ratatouille 
+meluha 
+melua 
+anal 
+train model 
+virutal 
+virtual 
+pinnacle studio 11 
+stardust 
+nero 8 keygen 
+album 
+vangelis 
+syngress 
+books 
+cherry ghost 
+grey anatomy 
+chateau 
+young harlots 
+preteen 
+animal 
+train crach 
+johnny cash john henry 
+johnny cash john henry 
+johnny cash john henry 
+rape 
+cherry ghost people 
+dylan dog 
+300 
+300 
+shrek terzo 
+prison break s03e03 
+complete 
+completa 
+xbox 360 pes 08 
+xbox 360 
+madame butterfly 
+los simpson 
+300 
+300 spa 
+alcudia 
+the kingdom 
+death sentence 
+trade 
+dragon naturally speaking 
+xxx 
+dragon naturally speaking serial 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+the kingdom 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+alles is liefde 
+shoot 'em up 
+codec 
+codec 
+codec 
+codec 
+codec 
+rape 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+feet 
+foot 
+tintin en america 
+red dragon - before the silence 
+8mm 
+the agency 
+the agency 
+korn 
+the missing 
+porno 
+johnny cash john henry 
+johnny cash cd 
+massive r & b 
+massive r & b winter collection 
+300 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+2600 
+isazi 
+activation xp sp2 
+hacker 
+webcam 
+brasilian 
+brasilian 
+brasilian 
+brazilian 
+harry potter 
+harry potter and the order of the phoenix 
+wwe 
+tna 
+wwe 
+xxx 
+wwe 
+wwe 
+wwe 10 
+match 
+goo goo 
+deadwood 
+xxx 
+shrek 
+pirates of the carribean 
+adobe photoshop 
+fuck 
+sex party 
+sex party 
+parkour 
+parkour 
+johny cash 
+office:mac 
+search files, youtube and liveleak 
+gay 
+film 
+gay dad 
+dad son 
+father figure 
+gay 
+craymo 
+gay 
+resident evil extinction 
+colton ford 
+anal 
+project vector 
+fall out boy 
+gay 
+gay 
+simple plan 
+femdom 
+gay 
+high heels 
+beethoven 
+la mamma 
+la figa 
+ocean thirteen 
+capoeira 
+foto 
+foto 
+search files, youtube and liveleak 
+xbox 
+animal 
+scott 
+tony scott 
+ulrich 
+southpark 
+scientology 
+ita divx 
+divx 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+prison break 303e01 
+prison break s03e01 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+.mp3 
+prison break s03e01 heb 
+.exe 
+prison break s03e01 
+× ×�×Â\98 
+where the boys 18 
+where the boys 
+styles yamaha 
+anal 
+italy anal 
+italy 
+italy 
+psr 
+diva 
+simulator 
+the frames 
+python 
+stage 
+hiroko 
+php 
+ubuntu 
+psr 8000 
+napoli xxx 
+amateur 
+napoli 
+napoli 
+napoli 
+foggia 
+ragazze 
+sexy girl 
+divx ita 
+webcam italy 
+telecamera nascosta 
+php 
+explosions in the sky 
+ebony 
+black 
+miles davis 
+cs3 
+star wars kid 
+php 
+php 
+gladiator 
+vno ncw 
+black 
+creampie 
+wii ps3 
+php 
+28 weeks later 
+madonna 
+the gossip 
+lost 
+lost sub nl 
+lost sub dutch 
+wii ps3 
+tolle 
+byron katie 
+columbo 
+course of miracles 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+youtube 
+saw 4 
+saw 4 parody 
+leg 
+art of noise 
+art of noise 
+mvc 
+xxx 
+csi 
+beauty 
+beauty 
+girls 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pregnant 
+search files, youtube and liveleak 
+pregnant 
+lee ann rimes 
+studente 
+leann rimes 
+barely legal 
+le fate ignoranti 
+le fate ignoranti 
+ragazze a beverly hills 
+search files, youtube and liveleak 
+http://www.youtube.com/ 
+eckharrt tolle 
+eckharrt tolle 
+eckhart tolle 
+http://www.youtube.com/ 
+http://www.inbox.lv/ 
+http://www.inbox.lv/ 
+fifa 2008 
+fifa 2008 
+search files, youtube and liveleak 
+china 
+chinesse 
+chineese 
+chinese 
+korea 
+terror 
+thai 
+helo 
+pc games 
+rat 
+radiohead 
+radiohead 
+pamela 
+music 
+jannes 
+macromedia 
+serial 
+snow 
+prison break 
+prison break 304 
+snow 
+saw 
+trombones 
+trombones 
+trombones 
+trombones 
+trombone 
+trombone 
+snow queen 
+snow queen 
+snow 
+top gear 
+lost 3.14 
+lost 3 
+lost 
+lost season 3 
+massive attack 
+audio books 
+lost 3 
+heroes 
+ipod 
+stargate 
+lost episode 14 
+lost episode 14 
+lost season 
+keiser chiefs 
+just jinger 
+big brother 
+mac 
+results 
+katie 
+mature 
+bangbros 
+game video 
+biggest 
+biggest 
+big 
+ines 
+big tits 
+mein kampf 
+mein kampf 
+verboten 
+verboten 
+banned 
+wwe 
+katie melua 
+taxi4 
+"king of kong" 
+footloose 
+milk inc 
+regi 
+regi pinxten 
+milk inc 
+search files, youtube and liveleak 
+toca race 3 
+tokio hotel 
+toca race 3 
+fergie 
+fergie 
+heroes 
+ome henk 
+ome henk 
+preuteleute 
+spanish 
+pc wolfenstein 
+pc wolfenstein 
+pc olimpics 
+pc olympics 
+pc submarine game 
+pc silent hunter 
+pc silent hunter 
+pc submarine 
+pc submarine hunter 
+pc tour off duty 
+umbarella 
+madworld 
+r.kelly 
+search files, youtube and liveleak 
+katie melua star 
+search files, youtube and liveleak 
+katie+malua+star 
+katie+malua 
+katiemalua 
+katie malua 
+katie melua 
+katie melua star 
+porn 
+katie melua 
+hamburg 
+hamburg 
+sailboot katie 
+crash 
+u pana boga w ogródku 
+alex agnew 
+sheila 
+sybian 
+hayden 
+hayden panettiere 
+hayden panettiere 
+hayden panettiere 
+jenna jameson 
+jenna jameson 
+amateur 
+shemale 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+hayden panettiere 
+jordan 
+seu merda 
+resident evil 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+sex 
+sex 
+sex 
+sex 
+girls 
+knoked up 
+young 
+insomniac 
+certe notti 
+creepa kid 
+silverhawks 
+wife 
+groningen 
+4400 
+clapton 
+mark knopfler 
+scooter wheely on motorway 
+gay 
+multiagent systems 
+multiagent systems 
+multiagent systems 
+kamal 
+vista wallpapers 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ebook 
+aisha 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jade 
+microsoft office 2007 
+agent 
+search files, youtube and liveleak 
+team america 
+james uk group 
+an introduction to multiagent systems 
+madness complete 
+fist 
+faust 
+brutal 
+rape 
+fist 
+michael wooldridge 
+introduction to multiagent system 
+multiagent systems - a modern approach to distributed artificial intelligence 
+james blunt 
+kate melua 
+quantum leap 
+james morrison 
+pablo montero 
+east west quantum leap 
+east west quantum leap 
+quake 3 
+try teens 
+preteen 
+search files, youtube and liveleak 
+dr who 
+pedo 
+chamillioaire 
+carmen 
+jimi hendrix 
+in rainbows 
+barenaked ladies 
+opus iii 
+new order 
+the simpsons 
+red drawf 
+chemistry 
+house of ass 
+xana 
+bi apple 
+the elegant spanking 
+black glove 
+tristan 
+search files, youtube and liveleak 
+sport 
+private 
+madonna 
+cream 
+osx 
+lesbian 
+search files, youtube and liveleak 
+asian 
+porno 
+porno russian 
+hostel 
+csi 
+la fabbrica illuminata 
+luigi nono 
+ncis 
+russian 
+renato zero 
+renato zero il cielo 
+vanishing point 
+era glaciale 
+leonard bernstein norton 
+leonard bernstein 
+leonard bernstein 
+classical 
+website builder 
+chucl 
+chuck s01e03 
+knocked up 
+blok 
+het blok 
+het blok net 5 
+mika 
+bravo hits 
+fifa 08 pc games 
+fifa 08 pc game 
+fifa 08 
+pc games 
+fifa 
+jan 
+jan smit 
+smit 
+bzn 
+bzn 
+pavarotti 
+romeo and juliet 
+satellite tv player 
+satellite tv titanium 
+satellite tv titanium 
+peter pan 
+internet satellite tv player 
+peter pan full 
+pissing 
+tv players 
+videofonino 
+tv 
+maial college 
+search files, youtube and liveleak 
+http://www.afhakers.nl/media.asp?x=6695#afhurl 
+heros season s02e02,03 
+heros season s02e02e03 
+heros season s02e02e03 
+heros season s02 
+heros season 2 
+het blok 
+rambo ii 
+rambo ii 
+blok 
+fall in love with 
+fall in love with 
+fall in love with 
+fall in love with 
+fall in love with 
+blok 
+heros season 2 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+visual studio 2005 
+search files, youtube and liveleak 
+heros season 2.complete[2007]dvd 
+heros season 2.complete[2007] 
+visual studio 2005 
+prison break 
+heros season 2.complete[2007]dv... 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+heros.season.2.complete[2007]dv... 
+search files, youtube and liveleak 
+vasco rossi 
+chuck s01e03 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lino banfi 
+ita 
+ps2 
+massive r&b 
+google 
+paris 
+google 
+google 
+ps2 
+google video 
+simpson 
+herman van veen 
+sandee 
+mondo marcio 
+mondo marcio 
+mondo marcio 
+rambo 
+bare 
+playboy 
+mondo marcio 
+mondo marcio 
+mondo marcio 
+topless 
+lino banfi 
+rambo 
+lino banfi 
+mondo marcio 
+mario merola 
+mario merola 
+mario merola 
+mario merola 
+silver stallone 
+silver stallone 
+animali 
+company man 
+heroes s01e17 
+heroes s01e17 
+machete ita 
+playboy 
+red hot chilly peppers 
+red hot chilly peppers 
+red hot chilly peppers 
+search files, youtube and liveleak 
+laura 
+face of death 
+skate 
+se 
+csi 
+stevie ray vaughan 
+omnipage 16 activator 
+massive r&b 
+massive r&b 
+massive r&b 
+madness 
+surf's up: i re delle onde 
+sms - sotto mentite spoglie 
+porn 
+call of duty 2 
+search files, youtube pedoand liveleak 
+pedo 
+pedo 
+ma 
+porn 
+bi 
+pdf 
+haitian tv 
+tivice 
+rage against 
+ti vice 
+tivice 
+sex 
+how to have sex 
+lessons 
+cum inside 
+gay 
+bisex 
+fuckingmachine 
+sex 
+breed 
+sex tape 
+gay inside 
+crampie gay 
+breed 
+bred 
+internal 
+sec 
+sex 
+asian 
+machine 
+group 
+education 
+trois couleur 
+decalogue 
+moving castle 
+the simpsons movie 
+transformers 
+howl's moving castle 
+surfs up 
+the kingdom 
+exploited 
+day watch 
+hustler 
+school 
+dvd 
+grindhouse 
+pirates 
+dvd 
+audio cd creator 
+rape 
+mp3 
+the best 
+soundtrack 
+avi 
+prince 
+johnny cash 
+fith 
+mestre bimba 
+mestre bimba a capoeira ilumunada 
+a capoeira ilumunada 
+capoeira 
+war 
+cfosspeed 
+schranz 
+hide ip 
+hide ip 
+sweet caroline 
+sweet caroline 
+caroline 
+sweet_caroline 
+sweet caroline 
+hide ip 
+john denver 
+hide ip 
+best off 
+ps2 
+ps2 
+suzuki bach cantatas 
+ebpny 
+ebony 
+limp bizkit 
+foo fighters 
+lindsay lohan 
+gide ip 
+hide ip 
+mask surf 
+lustiges 
+lustiges 
+lustiges 
+tao 
+john denver 
+carpenters 
+k3 
+tits 
+madonna 
+nnw 
+bbw 
+art 
+bbw 
+windows xp professional 
+bondage 
+motoqueiro fantasma 
+office 2007 
+ghost rider 
+resident evil 4 ita 
+resident evil 4 italiano 
+resident evil 4 it 
+resident evil 4 
+pussy red 
+shaun the sheep 
+shaun the sheep 
+adult beach 
+shaun the sheep 
+shaun the sheep 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+planet terror ita 
+beach cam 
+nudist beach 
+startrek 
+elisa 
+james blunt 
+morrisin 
+morrison 
+james morrison 
+james morrison 
+resident evil extinction ita 
+mp3 
+mp3 
+mp3 
+ita 
+jericho 
+clive barker's jericho 
+hand to 
+hand to mouth 
+ita 
+ita 
+3d studio max 
+resident evil ita 
+avi ita 
+resident evil ita avi 
+resident evil 
+resident evil ita 
+grindhouse.planet.terror.2007.italian.md.dvdrip.xvid-gold 
+grindhouse.planet.terror.2007.italian.md.dvdrip.xvid-gold 
+planet terror ita 
+planet terror ita 
+ita 
+infernal affairs ost 
+johnnie to 
+ps2 ita 
+xbox ita 
+planet terror 
+far cry 
+resident evil ita 
+resident evil 
+soft image 
+maya 
+stardust ita 
+stardust ita 
+hide ip 
+hide ip 
+stardust 
+isolee 
+search files, youtube and liveleak 
+isolee 
+radiohead 
+stardust ita 
+hide ip 
+hide ip 
+ita 
+ita 
+pcmesh 
+easy hide 
+mp3 
+britney spears 
+search files, youtube and liveleak 
+anonymizer 
+anonymous 
+ghost 
+ita 
+drive snapshot 
+r-drive image 
+valeria marini 
+hide ip 
+il 7 e l'8 
+fear ita 
+hide ip 
+ikke 
+f.e.a.r ita 
+f.e.a.r 
+porno 
+ultraiso 
+hide ip 
+hide my ip 
+befour 
+ip hider 
+umbrella 
+rihanna 
+hide ip 
+ich und ich 
+gib uns nicht auf 
+matthias reim 
+xavier naidoo 
+poweriso 
+zara whites 
+search files, youtube and liveleak 
+video 
+pictures 
+hidden tiger 
+my name is earl 
+fiest 
+old man leudeck 
+guns of navarone 
+pc silent hunter 
+pc silent hunter 
+pc silent hunter 
+ps2 
+onkelz 
+teen 
+aniamal 
+animal 
+animal 
+animal 
+animal 
+animal 
+animal 
+nudist 
+nudism 
+lolita 
+nudism 
+young 
+zoophilla 
+zoophilla 
+zoo 
+pussy 
+fisting 
+dutch commandos 
+commandos 
+commandos 
+adobe 
+adobe indesing 
+adobe 
+mysql 
+php/mysql 
+jack johnson 
+jack johnson 
+jack johnson 
+jack johnson 
+adobe master 
+adobe premium 
+adobe standard 
+adobe standard 
+adobe 
+piano 
+mehldau 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+commando daffy 
+action girls 
+commando 
+commando 
+dutch commandos 
+dutch commandos 
+dutch commandos 
+jenna 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ps2 
+pillu 
+shakira 
+fukalot 
+kari sweets 
+planet katie 
+teen 
+jenna haze 
+surf up 
+ita dvix 
+il signore degli anelli 
+i simpson 
+gigi finizio 
+gost riders 
+resident evil ita 
+resident evil 
+gosth riders 
+ghost riders 
+ita 
+ordine della fenice 
+wow 
+wow illidan 
+battlestar galactica 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+overlocked 
+xxx 
+anal 
+prison break 
+prison break ita 
+blockhead 
+caligula 
+tiesto 
+abschied nehmen 
+inconvenient truth 
+strip 
+teen 
+elvis presley 
+30 rock 
+dexter 
+ps2 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anal 
+anal 
+anal 
+anal 
+natursekt 
+pee 
+mom 
+milf 
+yutube 
+you tube 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube 
+rmvb 
+granular 
+anna 
+guitar 
+search files, youtube and liveleak 
+fast and furious 
+seeker 
+manuale di java 
+manuale di java 
+the ghost and the darkness 
+white teens 
+teens 
+simon 
+crazy 
+c.r.a.z.y. 
+hot fuzz 
+simon 2004 
+bitreactor.to 
+bitreactor.to 
+bitreactor.to 
+granular 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gag 
+gag 
+bitreactor.to 
+08 
+08 pc 
+18yo 
+abba 
+corton 
+carton 
+carton 
+carton 
+flash 
+fifa 
+radiohead 
+transformers 
+granular 
+granular 
+granular 
+operation kingdom 
+otorrent 
+torrent 
+torrent kongdom 
+torrent kingdom 
+50 cent 
+operation: kingdom 
+operation: kingdom 
+torrent 
+dvdrip 
+dvdrip german 
+feyenoord 
+pes8 
+capture one pro 
+porn 
+xxx 
+capture one pro 
+scf 
+canon digital pro 
+euromaster 
+euromaster 
+amsterdam 
+brother bear 
+brother bear 
+mini disco 
+high school musical 2 
+high school musical 2 
+granular 
+tna 
+earth 
+knocked up 
+milf 
+dean martin 
+paganini 
+minz 
+minz 
+nero 
+milf 
+the simpsons movie 
+dusche 
+dusche 
+dusche 
+dusche 
+dusche 
+privat 
+creatief met word 
+creatief met word 
+bf2 
+battlefield 2 
+tribler 
+firmin 
+preuteleute 
+spiderman 3 ita 
+lost 
+fantastici 4 
+giuliana lampis 
+giuliana 
+giordanelli 
+sassari 
+sassari 
+sassari 
+sassari 
+granular 
+carbonia 
+korn 
+pedo 
+pedo 
+blowjob 
+tribler 
+house 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+russian 
+pretty 
+ass 
+wwe 
+tna 
+russia 
+search all files 
+search all files 
+kean 
+cs3 
+perfume: the story of a murderer 
+perfume the story of a murderer 
+shrek 3 
+cs3 production 
+rape 
+windows xp dutch 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+windows vista 
+windows vista 
+partition 
+http://www.youtube.com/watch?v=6yehunaljpg&feature=bz303 
+funkeiras rebolando 
+oceans 13 
+brick lace 
+brick lace 
+we are 
+the way i are 
+sertanejas 
+search files, youtube and liveleak 
+love is wicked 
+love is wicked 
+tribler 
+tribler_4.1.7 
+barbara miranda santos 
+bes cristiano ronaldo 
+erotic 
+comedy 
+28 semaines plus tard 
+big dicks 
+blonde ass 
+round ass 
+monster cock 
+kathy barry 
+kathy barry 
+kathy barry 
+search files, youtube and liveleak 
+tribler_4.1.7 
+tribler 
+iluzionista 
+iluzionist 
+illusionist 
+beatles 
+era 
+fellowship of the ring 
+lord of the ringd 
+lord of the rings 
+soundtrack 
+soundtrack rings 
+soundtrack lotr 
+soundtrack king 
+soundtrackretun 
+soundtrack retrun 
+soundtrack return 
+soundtrack lord 
+soundtrack ring 
+soundtrack ring 
+fellowship 
+fellowship 
+fellowship 
+tolkie 
+tolkien 
+ebook 
+rings 
+rings 
+rings 
+kryl 
+karel 
+karel 
+nohavica 
+era 
+metalica 
+metal 
+corss 
+corrs 
+the cors 
+dylan 
+creampie 
+dylan 
+telugu 
+simpson 
+at five in the afternoon 
+at five in the afternoon 
+panj e asr 
+torrents 
+at five in the afternoon torrent 
+shrek terzo 
+divx ita 
+porno 
+psp 
+ita 
+encarta 2007 
+creampie 
+bones 
+rosanna fratello 
+celentano 
+celentano 
+adriano celentano 
+grindhouse 
+green day 
+cum 
+die ärzte 
+fuck 
+cff 
+paris hilton 
+brittany spears 
+search files, youtube and liveleak 
+britney spears 
+ruggeri 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+james last 
+jurasic 
+the beatles 
+the beatles 
+search files, youtube and liveleak 
+foto 
+tina turner 
+eros ramazotti 
+jan smit 
+nick en simon 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+andre hazes 
+trasformazione vista 
+windows vista 
+harry potter 
+l'ordine della fenice 
+il dolce e l'amaro 
+tuba büyüküstün 
+catacoms 
+tuba buyukustun 
+tuba buyukustun 
+catacombs 
+asi 
+ıhlamurlar altında 
+ıhlamurlar altında 
+ıhlamurlar altında 
+ıhlamurlar altında 
+i fantastici quattro 
+i fantastici quattro e silver 
+i fantastici quattro e silver 
+i fantastici quattro e silver 
+transformers 
+nba 
+nba pc 
+08 pc 
+黑目曈 
+黑目曈 
+黑目曈 
+黑目曈 
+黑目曈 
+水電工 
+éé\83½³¢ 
+av女åÄ\92 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+av 
+schoolgirl 
+school girl 
+schoolware 
+baby 
+hentia 
+fazz music 
+fazz music 
+fazz music 
+fazz music 
+linkin park 
+linkin park 
+pamela 
+radio tracker 
+deutsch 
+radio tracker 
+britney, youtube and liveleak 
+britney 
+britney spears 
+friday the 13th 
+a nightmare on elm street 
+pc risk 
+pc games silent hunter4 
+need for speed 
+need for speed pro street 
+pc games silent hunter4 
+need for speed pro street pc 
+muscle men posing 
+pc silent hunter4 
+pc silent hunter4 
+silent hunter4 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+muscle boy 
+baglioni 
+baglioni 
+iron maiden 
+caparezza 
+caparezza 
+kayne wayne sheperd 
+tokio hotel 
+kanny wayne shepherd 
+kenny wayne shepherd 
+akvis enhancer 
+adobe acrobat 
+munich city nights 
+munich city nights 
+munich city nights 
+hardrock 
+hardrock 
+hardrock 
+av 
+ds9 
+ds9 
+hard 
+seemannslieder 
+gangbang 
+throat 
+beyounce 
+la danze del cowboy 
+la danze del cowboy 
+la danze del cowboy 
+la danze del cowboy 
+medal of honor 
+medal of honor pc dvd 
+magix music maker 
+magix music maker 
+magix music maker 
+magix music maker 
+akon 
+dont matter 
+akon 
+a bay bay 
+truespace7.5 
+beyounce 
+truespace7 
+truespace 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+eva cash 
+t pain 
+gohst rider 
+3d studio max 
+ghost rider 
+ghost rider german dvd rip 
+ghost rider ger 
+sean kingston 
+zbrush 
+don't matter 
+don't matter 
+akon 
+this is why i'm hot 
+magix music maker 
+akon 
+qbase 4.0 
+ayo technology 
+eve 
+rihana 
+rihana 
+rihanna 
+300 
+haanstra 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+wwe 
+film 
+wwf 
+3d studio max 
+boob 
+tribler 
+german 
+bbc 
+physics 
+sport 
+game 
+spss 
+gis 
+linux 
+ubuntu 
+get me bodied 
+cubase studio 
+cubase studio 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+natural 
+liefde 
+j.lo 
+bitreactor 
+hot 
+hot 
+dj 
+dj 
+dj 
+ea games 
+pc dvd 
+new 
+tomtom 
+dadyyanki 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+torrent.to 
+cubase studio 
+cubase studio 
+d bo 
+d bo san souci 
+d-bo 
+bushido 
+pcdvd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+film 
+film 
+film 
+film 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+nino fiorello 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+mia martini 
+musica 
+commedia 
+commedia 
+orror 
+angelica bella 
+film 
+tribler 
+films 
+anita dark 
+anita blond 
+bella 
+blond 
+blond 
+bruna 
+bionda 
+school 
+school 
+blond 
+girl 
+anita dark 
+ramazzotti 
+ramazzotti eros 
+paprika 
+culo 
+la figa 
+angelica 
+angela 
+sara 
+300 
+get me bodied 
+film azione 
+azione 
+azione 
+commedia sexi 
+commedia sexi 
+azione 
+erica egresia 
+erica egresia 
+erica egresia 
+erica egresia 
+mia martini 
+ghana 
+ghana 
+twi 
+amakye 
+nino d'angelo 
+nino d'angelo 
+sara tommasi 
+jessica alba 
+pompini 
+pompini 
+pompini 
+sucked 
+pompini 
+pompini 
+porn 
+italian 
+sesamstraat 
+italiane 
+italian girl 
+films 
+films 
+magazzino 
+magazzino 
+magazzino 
+school 
+home 
+pamela anderson 
+blow 
+hand 
+home 
+all 
+italian 
+italiane 
+italian film 
+rape 
+italian 
+handjob 
+italian 
+donna 
+bella 
+sara 
+porn 
+handjob 
+italian porn 
+hot 
+adult 
+nederlands 
+dutch 
+granular 
+mixw 
+hard 
+sola 
+house 
+stardust 
+2061 
+impresa da dio 
+resident evil extintion ita 
+resident evil extintion 
+resident evil extintion italiano 
+molto incinta 
+sonata arctica 
+impresa da dio 
+asstraffic 
+asstraffic 
+kein bund fürs leben 
+bitreactor 
+harry potter e l'ordine della fenice 
+celentano 
+adriano celentano 
+ficarra e picone 
+room 1408 
+golf r32 
+adobe lightroom 
+ita 
+resident evil 
+resident evil ita 
+molto incinta 
+stardust ita 
+common 
+mariah carey 
+sonata arctica 
+stardust 
+call of duty demo 
+vasco 
+ita 
+arnhem 
+winterswijk 
+nl 
+amsterdam 
+eindhoven 
+arnhem 
+arnhem 
+ita 
+fuck 
+nude 
+nba 08 
+nba 08 pc 
+nba 08 
+nascar 
+burnout 
+arnhem 
+aquarium 
+busbnies 
+business continuity 
+"business continuity" 
+tartaruga 
+zecchino d'oro 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+duffle bag boy 
+duffle bag boy 
+duffle bag boy 
+duffle bag boy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+superman 
+brahms 
+brahms 
+casablanca 
+cream 
+the fountainhead 
+angelica bella 
+ritorno al futuro 
+to kill a mockingbird 
+xxx 
+trojan 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+marley 
+gatto 
+gatto 
+gatto 
+dvdshrink 
+gatto documentario 
+antivirus 
+ma che bella fattoria 
+zuigt 
+troy 
+windows xp 
+zuigt 
+zuigt 
+pirates 
+windows vista 
+tomtom mobile nokia n73 
+tomtom mobile nokia n73 
+tomtom mobile nokia n73 
+sleeping policeman 
+tomtom mobile nokia 
+sleeping policeman 
+sleeping policeman 
+sleeping policeman 
+sleeping policeman 
+tomtom mobile nokia 
+michael clayton 
+sleeping policeman 
+sleeping policeman 
+waitress 
+maroc 
+the grift 
+marocco 
+marocco 
+marocco 
+star trek 
+wwe 
+il vento fa il suo giro 
+greys 
+handjob 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trhote 
+deepthroat 
+deepthroat 
+screen saver 
+norton 360 
+norton 360 
+norton 360 
+grey's anatomy 
+simpsons 
+youtube 
+youtube 
+windows vista 
+abba 
+abba 
+zecchino d'oro 
+44 gatti 
+matrix 
+signore degli anelli 
+signore degli anelli 
+signore degli anelli 
+matrix 
+le due torri 
+ritorno del re 
+molto incinta 
+le ragioni dell' aragosta 
+underage 
+nude teen 
+nude rpeteens 
+nude preteens 
+nude underage 
+sex 
+hot summer in the city 
+"hot summer in the city" 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+google earth 
+kid nation 01 
+kid nation 02 
+kid nation 
+kid nation s01 
+kid nation s01.e03 
+pirates 
+handbook 
+beckman 
+marketing 
+marketing 
+astro 
+astro 
+stream flash video server 
+stream flash 
+blade runner 
+charlie the unicorn 
+invasion 
+divx ita 
+flight of the conchords 
+720p 
+flight of the conchords 
+720p 
+hd 
+good luck chuck 
+the game plan 
+search files, youtube and liveleak 
+nod32 
+zuigt 
+apartment 
+japanese 
+lovers guide 
+korean 
+asian mpg 
+asian 
+jquery 
+search files, youtube and liveleak 
+gravity 
+lactation 
+creation 
+navigatore nokia 
+navigatore nokia 
+navigatore nokia 
+navigatore nokia 
+navigatore nokia 
+navigatore nokia 
+navigatore nokia 
+search files, youtube and liveleak 
+music 
+music 
+belami 
+belami 
+belami 
+linux 
+jordenvandijk 
+nero 8 
+jordenvandijk 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+dvd cose molto cattive 
+dvd-r ita 
+search files, youtube and liveleak 
+dvd-r ita 
+dvd 
+dutch sub 
+dutch sub 
+barbara tucker 
+barbara tucker 
+barbara tucker 
+dutch sub 
+dutch sub 
+barbara tucker 
+barbara tucker 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dvd 
+dvd 
+austin powell 
+austin powell 
+dvd austin powell 
+german top 
+dvd austin powell 
+dvd austin powell 
+porco dio 
+bitreactor.to 
+bitreactor.to 
+bitreactor 
+hide ip 
+hide ip 
+risk2 
+proxyway 
+mask surf 
+dj 
+welch 
+gillian welch 
+highschool 
+highschool 
+matrix 
+beyonce 
+heroes 
+ggg 
+helen duval 
+helen duval 
+kim holland 
+handyman 
+sfondi nokia 
+sfondi nokia 
+sfondi nokia 
+sfondi nokia 
+girl 
+girl 
+girl 
+cazzo 
+mariniers kapel 
+marinierskapel 
+cazzo 
+keng 
+keng leiden 
+concordia leiden 
+k&g leiden 
+keng leiden 
+amigo leiden 
+dvs katwijk 
+fountainhead 
+big fish 
+simona ventura 
+divx 07 
+divx 07 ita 
+xvid 07 ita 
+molto incinta 
+invasion ita 
+next ita 
+film 
+film ita 
+rrar 
+search files, youtube and livereelleak 
+reelleak 
+reel 
+rbf 
+new found glory 
+alla deriva 
+4 mesi,3 settimane e due giorni 
+porn 
+jim rohn 
+fuck 
+mom 
+rolling stones 
+trompetterkorps 
+michelle marsh 
+freaks of cock 
+huge cock 
+huge cock 
+cock 
+euro 2008 qualifying matches 
+psp 
+shakira 
+mango 
+fantastici 4 
+euro 2008 
+soldier destiny's child 
+psp 
+psp iso 
+yasmine 
+2008 euro soccer 
+harry potter psp 
+wayne dyer 
+saw 4 
+dance music 
+gigi d'alessio 
+porno 
+black books 
+divx ita 
+divx ita 07 
+lo chiamavano trinità 
+lo chiamavano trinità 
+dark messiah 
+search files, youtube and liveleak 
+dvb viewer 
+dvb viewer 
+dvb viewer 
+dvb viewer 
+top 40 
+top 40 2007 
+wheels 
+xvid ita 07 
+shawn lee's ping pong orchestra 
+soulfood & jadoo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lali puna 
+lali puna 
+kaya project-elixir 
+stargate 
+simpsons staffel 
+simpsons folge 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+karger trifft 
+kargar trifft 
+kargar trifft 
+web cam 
+comedy 
+dutch movie 
+dutch 
+heros 
+comedy 
+ocean 13 
+vista 
+navteq 
+navtec 
+navtech 
+vdo 
+dayton 
+http://link.brightcove.com/services/link/bcpid18617176/bclid18584623/bctid229701048 
+http://link.brightcove.com/services/link/bcpid18617176/bclid18584623/bctid229701048 
+dayton 
+bokito 
+navtech 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+comedy 
+vdo 
+sonata arctica 
+44 gatti 
+dragonhart 
+divx ita 
+damien rice 
+damien rice 
+damien rice 
+damien rice 
+damien rice 
+damien rice 
+shemale 
+gay 
+shemale 
+gay 
+gay homemade 
+damien rice 
+gay homemade 
+gay 
+manga 
+damien rice 
+damien rice 
+damien rice 
+gay cum inside ass 
+gay 
+living things 
+living thing 
+bowling for soup 
+shemale bukkake 
+shemale 
+divx 
+divx ita 
+bob sinclar 
+gay mpeg 
+bob sinclar 
+queendom of eros 
+sora aoi 
+av 
+search files, youtube and liveleak 
+gay mp4 
+jav 
+av 
+baantjer 
+baantjer 
+baantjer 
+gay mpeg 
+gay mpeg 
+gay mpeg 
+gay mpeg 
+mpeg 
+queer as folk 
+gay mpeg 
+texas chainsaw massacre 
+pc silent hunter 
+gooische vrouwen 
+gooise vrouwen 
+rescue me s04e02 
+march of the penguins 
+chuck 
+search files, youtube and liveleak 
+sleep don't weep 
+sleep don't weep 
+sleep don't weep 
+sleep don't weep 
+il prescelto 
+sleep don't weep 
+film il prescelto 
+il prescelto 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers italiano 
+transformers ita 
+prison break 
+porn 
+sonata artica 
+sonata arctica 
+montecarlo night 
+montecarlo night 
+sonata arctica 
+montecarlo 
+montecarlo 
+montecarlo 
+montecarlo 
+musiche popolari 
+musiche popolari 
+musiche popolari 
+sonata arctica 
+sesamstraat 
+sesamstraat 
+sesamstraat 
+musiche popolari 
+musiche popolari 
+canzoni popolari 
+search files, youtube and liveleak 
+dog 
+dog 
+dog 
+bike 
+bike 
+bike 
+bike 
+bike 
+sex 
+sex 
+sex 
+sex 
+cat 
+beck the information 
+boostspeed 
+u2 
+rem 
+adobe 
+prison break 
+\ 
+gay 
+normaal 
+gay mpeg 
+gay live 
+gay live 
+gay mpeg 
+father daughter 
+la linea 
+gay homemade 
+lewis black 
+zappa 
+miles 
+jarrett 
+steven wright 
+be my escape 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+sex 
+wired 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+scrubs german 
+pixelperfect 
+paris 
+andria 
+talvin singh 
+gay 
+direttoregenerale als bat 
+direttore generale als bat 
+webcam 
+rolling stones 
+giovanni 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+comedy 
+i see you.com 
+i see you.com 
+akon 
+007 
+telfon telaviv 
+avril 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rise of legends 
+telfon telaviv 
+mature 
+transformers 
+maya 
+maya 
+office 
+transformers 
+heroes 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jeff dunham 
+pirates of caribean 
+mary 
+ftv 
+metart 
+jenny rom 
+pirates of caribean 
+shemale 
+miriam 
+gonzalez 
+10cc 
+grindhouse 
+ita grindhouse 
+dvd grindhouse 
+10cc 
+search files, youtube and liveleak 
+dvd ita 
+search files, youtube and liveleak 
+stewart 
+grindhouse 
+er 
+er.1401 
+emergency room 
+emergency room 
+emergency room 
+emergency room 
+emergency room 
+emergency room 
+s14e03 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+windows 
+office2007 
+molto incinta 
+divx ita 
+s14e03 
+s14e03 
+s13e03 
+ratt 
+search files, youtube and liveleak 
+s14e03 
+silverchair 
+s14e03 er 
+ratatouille 
+acdsee 9 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+logos 
+bourne ultimatum 
+chemical brothers 
+l'ultima legione 
+ultima legione 
+amores perros 
+search files, youtube and liveleak 
+house 
+divx ita 
+tribler 
+britney 
+search files, youtube and liveleak 
+ostro sledovane vlaky 
+vlaky 
+vlaky ostro 
+vlaky ostre 
+vlaky ostre 
+vlaky ostre 
+vlaky 
+menzel 
+jiri menzel 
+observed trains 
+observed trains 
+observed trains 
+rocco 
+rocco 
+rocco 
+rocco 
+sindleruv seznam 
+schindleruv seznam 
+lord of the rings 
+lord of the rings 
+lord of the rings 
+lord of the rings extended 
+rocco 
+rocco teen 
+rocco 
+brokesky 
+brookesky 
+ivana 
+anal plug teen 
+plug teen 
+toy teen 
+dutch 
+oblivion 
+ena 
+enea 
+enae 
+era 
+eradion 
+dion 
+the last legion 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+transformers 
+xbox 
+xbox360 emulator 
+xbox emulator 
+xbox e 
+xbox 
+vista 
+msn 
+rescue me 
+elfman 
+elfman 
+danny elfman 
+darkened 
+chris brown 
+chris browncole 
+cole 
+keyshia cole 
+kiss kiss 
+nero8 
+nero8 
+castellina pasi 
+adobe photoshop cs3 for mac 
+sting 
+s 
+sting 
+sting 
+sting 
+4 hour work week 
+four hour work week 
+the police 
+four hour work week 
+vivid 
+sex 
+barca 
+simpsons 
+fluch der karibik 
+fluch der karibik 
+fluch der karibik 3 
+fluch der karibik 3 
+fluch der karibik 3 
+fluch der karibik 3 
+search files, youtube and liveleak 
+pirates of caribean 
+pirates of caribean 
+pirates of caribean 
+pirates of caribean 
+marc almond 
+pirates of caribean 
+pirates of caribean 
+apocalypse now 
+pirates of caribean 
+tomtom 6 
+tomtom 7 
+battisti 
+thai cuties 
+platoon 
+platoon 
+platoon 
+battisti 
+kate bush 
+battisti 
+battisti 
+mina 
+michael stipe 
+mina 
+your ghost 
+tettone 
+tettone 
+tettone 
+tettone 
+tettone 
+tettone 
+big boobs 
+big boobs 
+tomtom 6 
+harry potter 
+harry potter italian 
+celentano 
+adriano celentano 
+morandi 
+baglioni 
+natale a new york 
+natale a new york 
+natale a new york 
+natale a new york 
+natale a new york de sica 
+vacanze di natale a neew york 
+vacanze di natale a neew york 
+natale a miami 
+natale a miami 
+vacanze di natale a miami 
+bella 
+marcella bella 
+300 
+il gladiatore 
+il gladiatore italiano 
+il gladiatorein italiano 
+il gladiatore in italiano 
+il gladiatore 
+la grande abbuffata 
+film 
+film in italiano 
+big breats 
+big breasts 
+italian film 
+striptease 
+beatles 
+beatles 
+the queen 
+strip 
+bondage 
+abba 
+i pooh 
+nina private session 
+the pooh 
+nina hartley 
+private sessions 
+the pooh italian 
+audio italian 
+the pooh 
+battisti 
+private sessions 
+mina 
+nina hartley 
+battisti 
+sherek 3 
+4 hour working week 
+shrek 3 
+shrek 3 italiano 
+shrek 3 italiano 
+shrek 3 italian 
+shrek 3 
+dramweaver cs3 
+search files, youtube and liveleak 
+weeds 
+weeds s03e09 
+heroes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bjork 
+bourne 
+harry potter 
+search files, youtube and liveleak 
+così fan tutte 
+heroes ita 
+mozart 
+bach 
+beethoven 
+debussy 
+ravel 
+stardust 
+beatles 
+stardust ita 
+nero 8 
+n.e.r.d. 
+nerd 
+pharell 
+neptunes 
+13yo girl 
+siza 
+brian setzer 
+schubert 
+schumann 
+monteverdi 
+segovia 
+lost 
+lucia 
+paco lucia 
+fitzgerald 
+ellington 
+basie 
+west 
+prison break s02 
+prison break s03 
+nero 
+nero linx 
+nero 3 
+nero v3 
+video files 
+avril lavigne 
+nero linux 
+nero linux 
+nero ahead 
+ahead 
+pro evolution 
+fifa 08 
+fifa 08 pc 
+tony hawk pro skater 4 
+fifa 08 pc english 
+spiderman 3 ita 
+brian setzer 
+winx 
+winx 
+mark knopfler 
+winx-ita 
+winx 
+bg audio 
+cronache di provincia 
+tenc 
+whore 
+barbie torrents 
+blowjob 
+barbie 
+mackay 
+mackay 
+transformer 
+transformers 
+divx 
+dvd 
+pinnacle 
+search files, youtube and liveleak 
+vista 
+xp swe 
+vista 
+transformers proper 
+rescue me 
+search files, youtube and liveleak 
+the queen 
+vista 
+vista 
+vista 32 
+dvd ita 
+vista 32 
+celentano 
+grindhouse 
+ita 
+autocad 
+nero 8 
+harry potter 
+big breast 
+big boobs 
+dvd ita 
+massive meloons 
+massive melons 
+massive melon 
+long tongue 
+rescue me season 
+tettone 
+vista 32 64 
+vista 32 64 
+vista 
+office 
+porno big boobs 
+office 
+scopata big boobs 
+office2007 
+video porno 
+akon 
+akon 
+akon 
+dailyshow 
+akon 
+akon 
+office2007 
+office2007 
+office2007 
+office2007 
+office2007 
+akon boss ac 
+office 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+office 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+office 
+akon boss ac 
+akon boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+akon 
+office 
+office 
+akon boss 
+office 
+nip and tuck 
+nip & tuck 
+nip. tuck 
+nip 
+nip.tuck. 
+nip tuck 
+nip tuck ita 
+italian 
+italian 
+ita 
+3 somes mmf 
+3 somes mff 
+sex 
+sex threesome 
+mark 
+ubuntu 
+pro evolution 
+fifa 08 eng 
+lightroom 
+video 
+youtube 
+youtube 
+hardcore penguin 
+hardcore penguin 
+hardcore penguin 
+hardcore penguin 
+hardcore penguin 
+drama 
+hardcore penguin 
+melue 
+melua 
+joplin 
+kim karda 
+sex 
+drama prairie 
+drama prairie 
+elvis 
+drama prairie 
+nora jones 
+drama prairie 
+rob de nijs 
+rob de nijs 
+rob de nijs 
+gerard van maasakker 
+tomtom 
+kabouterbond 
+ozi explorer 
+works 
+rika aiuki 
+rika aiuchi 
+travel 
+ita 
+psr2700 
+psr2700 yamaha 
+yamaha 
+yamaha psr 
+yamaha psr 
+yamaha psr 
+vista crack 
+vista crack 
+iicd 
+nero 
+nero burning 
+dvd ita 
+italian 
+two and a half men 
+search files, youtube and liveleak 
+vista 
+heroes 
+tell me more dutch 
+carrie underwood 
+blondes 
+blondes 
+booty 
+ass 
+perfect ass 
+brunette 
+blonde 
+mark knopfler 
+pink floyd 
+chiavari 
+chiavari 
+bari 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trani 
+stones 
+bushido 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nba live 
+nba live pc dvd 
+fleetwood 
+bollywood 
+eagles of death metal 
+sextapes 
+sjaakie 
+german 
+eagles of death metal 
+küzey rüzgari 
+eagles of death metal 
+absolute linux 
+absolute 
+logan´s run 
+nick simon 
+fleetwood 
+thai 
+search files, youtube and liveleak 
+porno 
+küzey rüzgari 
+saban 
+sex 
+türksex 
+cazzo 
+sesso 
+porn 
+sextape 
+tette 
+amatoriale 
+que deus 
+shab deurne 
+shab deurne 
+shab deurne 
+absloute 
+abslute 
+absolute 
+robert wyatt 
+cancion de julieta 
+cancion de julieta 
+hip hop 
+hold that train conductor 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+peter gunn 
+point me at the sky 
+work song 
+skandal 
+türk 
+reding 
+otis 
+rolling 
+star wars theme 
+star wars theme 
+star wars 
+star wars youtube 
+sesamstraat 
+miles 
+pop art 
+golf pcgame 
+golf pc game 
+schiffer 
+neu 
+star wars theme 
+death sentence 
+death sentence german 
+stolen 
+desperate house wifes 
+mary had a little lamb 
+milf 
+bush 
+borsato 
+bullock 
+jon gindick 
+symphony of hope 
+search files, youtube and liveleak 
+prison break 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+korn 
+cradle 
+hentai 
+winx 
+winx 
+desperate housewifes 
+sponge bob 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fgdfg 
+adobe indesign cs2 
+piccole canaglie 
+migone 
+stargate 
+dvd ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+curtis mayfield 
+hd 
+curtis mayfield 
+search files, youtube and liveleak 
+jeunesse chinoise 
+summer palace 
+lavigne 
+moana 
+moana 
+pamela anderson 
+italian 2007 
+linux 
+windows vista 
+2007 italian 
+saracav 
+forced 
+paris 
+paris .avi 
+paris .av 
+paris .a 
+paris .a 
+paris .a 
+paris .a 
+paris 
+6.75 
+6.75 
+ufo 
+six feet under 
+simpsons 
+death in june 
+surf's up - i re delle onde 
+surf's up 
+behind the mask 
+surf's up ita 
+i re delle onde 
+invasion 
+behind the mask death in june 
+behind the mask douglas 
+blutharsch 
+graumahd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anna 
+search files, youtube and liveleak 
+liz vicious 
+suicide girls 
+simpsons 
+suspension 
+ufo 
+lesbo 
+zero tolerance 
+lesbo 
+babes 
+colonna sonora heroes 
+heroes 
+heroes 
+heroes song 
+babes 
+babes 
+exploited college 
+maya 2008 
+exploited college girls 
+rogue wave 
+rogue wave 
+rogue wave 
+heroes 1x15 
+heroes 1x15 corri 
+heroes 1x15 
+babes 
+olga 
+sexy 
+tna 
+search files, youtube and liveleak 
+girls 
+pamela 
+pamela anderson 
+pamela anderson 
+martina stella 
+jessica 
+300 
+sony vegas 
+zzz 
+modest mouse 
+cocconut records 
+nighttiming 
+nighttiming 
+cocconut 
+risiko 
+troia 
+xxx 
+risiko 
+encyclopedia brittanica 
+encyclopaedia brittanica 
+encyclopaedia brittanica 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+jan smit 
+magix 
+7.10 
+magix radio 
+radio software 
+dvd 
+software 
+abba 
+flight of the concords 
+flight of the concords hbo 
+hbo: one night stand flight of the concords 
+hbo: one night stand 
+jim reeves 
+estelle joseph 
+alkohol120 
+osx 
+osx adobe 
+alcohol120 
+scurbs 
+keygen 
+allkeygen 
+allsoftware 
+allsoftware 
+ritorno al futuro 
+plug&playsoftware 
+plug&playsoftware 
+plug&play 
+plug&play 
+software 
+software 
+software 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il gladiatore 
+fetish 
+ass 
+ass 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+domination 
+domination 
+top 100 juckebox 
+top 100 jukebox 
+top 100 jukebox 
+top 100 jukebox 
+top 100 jukebox 
+xxx 
+search files, youtube and liveleak 
+asia 
+search files, youtube and liveleak 
+bound for glory 
+bryan adams 
+bryan adams 
+tna 
+bryan adams 
+bryan adams 
+adams 
+adams 
+resident evil 
+dl.tv 
+chines 
+taiwan 
+arsch 
+jack 
+youtube 
+liveleak 
+halo 
+microsoft 
+halo 
+computer 
+spiegel 
+stern 
+bbc 
+windows vista ultimate 
+spiegel 
+pro7 
+china 
+windows vista ultimate activator 
+robin hood 
+endgame 
+monday night raw 
+the death of cartman southpark 
+monday night raw 
+monday night raw 
+heroes 
+heroes 02 04 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dvx ita 
+dvx ita 
+dvx ita 
+dvx ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+saving face 
+naruto 
+naruto 251 
+naruto 4th movie 
+samurai champloo song 
+fmily guy 
+family guy ep 156 
+family guy 156 
+family guy 13 
+family guy 
+cartoon newwork 
+tommy walsh diy guide 
+prison break 
+windows vista 
+winrar 
+strippers 
+femalestrippers 
+female strippers 
+csi 
+csi ita 
+senza traccia 
+sony vegas 
+zombie holocaust 
+csi miami 
+csi miami ita 
+sony vegas 
+whore cum 
+ghost recon 2 
+eros ramazzotti 
+oracle 
+cameltoe 
+divx - ita 
+divx - ita 300 
+divx - ita 300 
+divx ita 
+ita 
+ita avi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lost italian 
+lost 
+lost ita 
+suicide girls 
+nokia 
+dvd ita 
+domplayer 
+domplayer 
+domplayer 
+prison break 
+magicisomaker 
+magic iso maker 
+booty 
+monica 
+monica 
+monica 
+booty 
+booty 
+booty 
+workshop manual volvo v40 
+workshop manual volvo v40 
+isobuster 
+windows xp home 
+magic iso 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+sitter 
+search files, youtube and liveleak 
+heroes 
+heroes s02 e04 
+janzen 
+xxx 
+dvdxxx 
+video xxx 
+dvx xxx 
+rocco 
+avido 
+http://torrents.thepiratebay.org/3843897/techno.fucker.cum.party.[english].xxx.dvdrip.xvid.3843897.tpb.torrent 
+search files, youtube and liveleak 
+portable apps 
+pinacle 
+pinaclle 
+pinaclle 
+search files, youtube and liveleak 
+brian setzer 
+brian setzer 
+rogue assassin 
+rogue 
+assassin 
+lions for lambs 
+the kingdom 
+adobe osx kegen 
+adobe osx keygen 
+the invasion 
+osx 
+rendition 
+cartoni animati 
+cartoni animati 
+maxic 
+magix 
+cartoon 
+mac 
+macos 
+mac.os.x.tiger 
+mac.os.x.tiger 
+mac.os.x.tiger 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+101 amateurs 
+amateurs audition 
+mac.os.x. 
+mac.os.x. 
+mac.os.x. 
+mac.os.x. 
+mac.os.x.tiger 
+mac.os.x.tiger 
+mac.os.x.tiger 
+search files, youtube and liveleak 
+alles is liefde 
+divx ita 
+fft 
+pirate deluxe 
+il custode 
+neuken 
+il custode 
+border town 
+border town ita 
+border town ita 
+divx ita 
+pijpen 
+pijpen 
+piss 
+passions 
+pirate luxe 
+tanya hyde 
+dumeter 
+ska 
+shakira 
+nba 08 
+nba 08 pc 
+half life 2 
+rushhour 3 
+stormbreaker 
+rescue me 
+eva 
+milf 
+trei hetcher 
+teri hetcher 
+.avi 
+.avi 
+.avi 
+.avi 
+a chi dice 
+fugalo 
+wwe 
+bzn 
+bzn 
+breath easy 
+tiziano ferro 
+magiciso 
+twins 
+twins 
+celorico 
+casey 
+casey parker 
+shane world 
+surfer girls 
+casey parker 
+casey puerto 
+surfer girls shane 
+surfer girls 
+surfer girls 
+casey parker 
+bnn 
+bnn 
+teens 
+teen 
+abba 
+dutch sub 
+purcell 
+dutch sub 
+dutch sub 
+dutch sub 
+eros ramazzotti 
+http://torrents.thepiratebay.org/3844495/wwe_monday_night_raw_-_october_15_2007.3844495.tpb.torrent 
+tna 
+absolute 
+granular 
+creature comforts 
+hollander 
+holland 
+lolita 
+playboy 
+superman 
+superman return 
+test 
+beastiality 
+fist 
+project pitchfork 
+die hard 4 ita 
+andre rieu 
+superman return italiano 
+machined 
+machine 
+simpson 
+cs3 workshop 
+workshop 
+tokio hotell 
+creature comforts 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+simpson italiano 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+simpson film italiano 
+simpson film 
+adobe manual 
+desperados 
+adobe docs 
+adobe dox 
+black girls 
+black girls 
+epica 
+epica the divine conspiracy 
+mekong delta fear 
+divx ita 
+seamstraat 
+sesamstraat 
+mero nepal 
+king kong 
+independence day 
+mero nepal 
+mero nepal 
+bisex 
+fifa 2008 
+stargate 
+mero nepala 
+bisexual 
+spiderman2 
+prey 
+dutch 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ilife 08 
+search files, youtube and liveleak 
+ilife 08 
+beethoven 
+bach 
+nightingale 
+saw4 
+saw 
+mac 
+orphaned land 
+orphaned land 
+orphaned land 
+metallica 
+metallica 
+osx 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+orphaned land 
+kataklysm 
+kataklysm 
+kataklysm 
+exploitedteens 
+overkill 
+overkill wrecking everything 
+wrecking everything 
+king kong 
+nightingale 
+lord of war ita 
+lord of war 
+romancing the stone 
+lord of war ita 
+search files, youtube and liveleak 
+b'day 
+ilife 08 
+search all files 
+search all files 
+search all files 
+matrix 
+rome 
+californication 
+jeff dunham 
+paolo nut 
+californication 
+nightingale+ 
+nightingale 
+rome 
+search files, youtube and liveleak 
+banfi 
+ggg 
+rome 
+pedo 
+sex 
+scat 
+vomit 
+porca 
+peto 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+buddha's lost children 
+ggg 
+tibetan book of the dead 
+kiss 
+search files, youtube and liveleak 
+een eigen huis 
+xxx 
+rene froger 
+redlight 
+porn 
+hirsi 
+honda 
+sperma 
+scat 
+scat 
+scat 
+rome french 
+devs 
+discrete event systems 
+discrete event systems 
+rome 
+rome french 
+swing 
+swing jazz 
+blouble 
+bouble 
+bouble 
+swing 
+buble 
+tna bound for glory 
+setzer 
+porno valley 
+porno valley 
+vista 
+nema raja bez rodnog kraja 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ratatouille 
+sinovi manjace 
+animal 
+animal 
+animal 
+porno 
+webcam 
+webcam 
+car 
+cars 
+wwe 
+tna 
+avi 
+ableton 
+the hallowed ground 
+the hallowed ground 
+house 
+transformers 
+absolute 
+wwe 
+tna 
+tribler 
+ebony feet 
+squirt 
+. 
+heroes 
+house 
+wwe 
+tna 
+lawrence of arabia 
+a chi mi dice 
+breath easy 
+heroes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+creature comforts 
+pireates 
+worlds end 
+corel draw x3 
+wizard 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+disney 
+30 days of night 
+dltv 
+ecw 
+wwe 
+eztv 
+zzz 
+800 
+nwwl 
+the seeker 
+broodwar 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+foo fighters 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+resident evil 
+resident evil axxo 
+ratatouille.dvdrip 
+ratatouille 
+windows vista 
+we own the night *xvid 
+we own the night 
+hostel part ii ita 
+hostel part ii ita 
+hostel part ii ita 
+hostel part ii ita 
+simpson 
+simpson ita 
+hostel ita 
+porn 
+run fat boy run 
+hitman 2 
+alles is liefde 
+southpark 
+lil boosie 
+nero 8 
+bubble gum 
+rasheeda 
+rasheeda 
+wipe me down 
+bubble gum 
+rasheeda 
+nero 
+driver detective 
+tambourine 
+beyonce 
+search files, youtube and liveleak 
+mr bean 
+little women 
+little women 
+little women 
+little women 
+little women 
+little women 
+kelly rowlands 
+avril lavigne 
+elephant man 
+shaba ranks 
+shaaba ranks 
+shaba ranks 
+shaba ranks 
+shaba ranks 
+shaba ranks 
+shaba ranks 
+lover man 
+clture 
+culture 
+reggae 
+high school 
+burning spear 
+pro evolution 
+burning spear 
+raven riley 
+raven riley 
+raven riley 
+rugby 
+pro rugby 
+rugby 
+boys to men 
+boys to men 
+prison break s03e05 
+heroes s02e04 
+chuck s01e04 
+billy joel 
+the shield 
+modo 301 
+jonny wilkinson 
+far cry 
+hit & run 
+the simpsons hit & run 
+search files, youtube and liveleak 
+michael jackson 
+cabaret 
+eddy murpy 
+mac os x 10.4.8 [jas amd intel sse2 sse3 ppf1 + ppf2] - for intel and amd/sse2 or sse3 
+mac os x 10.4.8 [jas amd 
+trailer 
+trailer hd 
+navigon 
+tomtom 
+gsm 
+fifa 08 psp 
+fifa 08 psp ita 
+generation x 
+managing generations 
+fifa 08 pc 
+generations 
+fifa 08 pc ita 
+colombo 
+baby boomers 
+dutch 
+search files, youtube and liveleak 
+lou reed 
+lou reed 
+lou reed 
+damien rice 9 
+lou reed 
+lou reed perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+tiken 
+tiken 
+tiken 
+tiken 
+tiken 
+tiken 
+tiken jah fakoly 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+corel 
+perfect day 
+ali b j o 
+ali b met g o 
+ali b met 
+gio 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+pixel 
+pixelperfect 
+pixelperfect-- 
+pixelperfect--00 
+pixelperfect--00 
+pixel 
+foo fighters 
+high definition 
+patsy cline 
+patsy cline 
+patsy cline 
+foo fighters hyde park 
+foo fighters hyde park motorhead 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+nitin sawhney 
+nitin sawhney 
+nitin sawhney nadia 
+nitin sawhney nadia 
+nitin sawhney nadia 
+the cranes 
+the cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+gare du nord pablo 
+gare du nord pablo 
+gare du nord 
+gare du nord 
+gare du nord 
+gare du nord 
+nitin sawhney 
+nitin sawhney 
+u.n.k.l.e 
+u.n.k.l.e 
+unkle 
+marie kiss la joue 
+marie kiss la joue 
+marie kiss la joue 
+air 
+jarod 
+air 
+reagan 
+air sexy boy 
+air sexy boy 
+american dad season 
+coldcut 
+softcam.key 
+coldcut 
+coldcut 
+druivensuiker 
+druivensuiker 
+druivensuiker 
+druivensuiker 
+playboy 
+playboy 
+playboy 
+playboy 
+tricky 
+tricky 
+billy joel 
+just jack 
+just jack 
+jarod camaleonte 
+ez3kiel 
+wax tailor 
+alessandro fiorello 
+wax tailor 
+wax tailor 
+wax tailor 
+alessandro fiorello 
+alessandro fiorello 
+alessandro fiorello 
+alessandro fiorello 
+beth gibbons 
+alessandro fiorello 
+alessandro fiorello 
+wrong trousers 
+crysis 
+toilet 
+rape 
+un chilo 
+hunde 
+un chilo zucchero 
+hunde 
+un chilo zucchero 
+la uno bianca 
+laura pausini 
+gianni nanni 
+franco staco 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+laura pausini 
+adobe 
+50 cent 
+xxx 
+sjaak 
+a broken heart 
+heart 
+silent hunter 
+leopard 
+hypnosis 
+absolute 
+granular 
+granular 
+scrubs 
+zzz 
+scrubs ita 
+hypnography 
+hypnography 
+hypnography 
+laura pausini 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ilife 08 
+elena paparizou 
+gloria guida 
+gloria guida 
+hot blond 
+fenech 
+cars 
+biagio antonacci 
+exdesi 
+search files, youtube and liveleak 
+sport ro 
+sport ro 
+sport ro 
+sport ro 
+partytv 
+party tv 
+riccardo scamarcio 
+my number one 
+nr. one 
+number one 
+elena paparizou 
+
+nba 08 
+lapo elkann 
+2k8 
+chak de india 
+partner 
+partner desi 
+sworn to a great divide 
+soilwork 
+hitch 
+.divx 
+.divx 
+.divx 
+sweetes girl 
+babylon 
+ecw 
+mike oldfield 
+mike oldfield 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+madona 
+madona 
+madona 
+madona 
+madona 
+madona 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fanning 
+golden compass 
+deep throa 
+beowulf 
+heat 
+stevie ann 
+beowulf 
+nagraj 
+superman 
+spiderman 
+ladyboy 
+thai 
+korea 
+china 
+chinesse 
+chinese 
+russian 
+ukraine 
+czech 
+tiny 
+bangbus 
+florida 
+international 
+contest 
+win 
+lolita 
+gang 
+kingdom 
+ladyboy 
+german dvd rip 
+ladyboy 
+alison krauss 
+down to the river to peay 
+down to the river to pray 
+go to sleep you little 
+spiderman 
+operation mindcrime 
+man of constant sorrow 
+man of constant sorrow 
+search files, youtube and liveleak 
+ford mustang 
+queesryche 
+empire 
+queensryche empire 
+go to sleep you 
+pirati dei caraibi 
+led zepplin 
+led zeppelin 
+rowan moore 
+rowan moore 
+king crimson 
+king crimson discography 
+king crimson 2 
+king crimson 2 
+king crimson 2 
+lolicon 
+.ita 
+.italian 
+dee desi 
+brown fucking machines 
+rape 
+sound forge 
+sistem works 
+norton 
+norton 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+carlitto way 
+worthing 
+brighton hove albion 
+hot fuzz 
+dj zone 
+dj zone 53 
+k pax 
+wild hogs 
+k pax 
+k-pax 
+divx 
+wild hogs 
+indian 
+divx 
+astronomy 
+astronomy 
+starry night 
+meade 
+lx 200 r 
+lxd 200 r 
+meade lx 200 r 
+meade lx 200 
+house 
+house s04 
+pirates 
+gossip girl 
+corset 
+xtorrent 
+lrock 
+lopez 
+beyonce 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dog 
+house s04 
+house s04 e05 
+k pax 
+meade 
+dog 
+rock 
+dog 
+todo en uno 8 
+todo en uno 8 
+todo en uno 8 
+todo en uno 8 
+todo en uno 8 
+todo en uno 8 
+todo en uno 8 
+band of brothers 
+zzz 
+harry potter 
+the killers 
+search files, youtube and liveleak 
+the kingdom 
+porno 
+zzz 
+high school musical 
+chemistry 
+cnn 
+kama sutra 
+kama sutra 
+kama sutra 
+angel & airwaves 
+kama sutra 
+kama sutra 
+kama sutra 
+kama sutra 
+kama sutra 
+gloria guida 
+kama sutra 
+kama sutra 
+kama sutra 
+kama sutra 
+videos caseiros 
+videos caseiros 
+videos caseiros 
+videos caseiros 
+videos caseiros 
+videos 
+click 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+300 
+marco paolini 
+marco paolini 
+stardust 
+pan's labyrinth 
+paolini 
+paolini 
+genesis 
+genesis 
+genesis 
+marco paolinio 
+marco paolinii 
+marco paolini 
+vajont 
+vajont 
+west side story 
+tribler 
+vista unattendeb 
+vista unatended 
+vista 
+vista unatended 
+vista unattended 
+incinta 
+moltoincinta 
+cyberlink 
+cyberlink ita 
+song 
+un'impresa da dio 
+cyberlink 
+tweakvi 
+vistaclean 
+vistaclean 
+poweriso 
+vistaclean 
+vista clean 
+fantozzi 
+king crimson 2 
+search files, youtube and liveleak 
+erba di casa mia 
+erba di casa mia 
+adriano celentano 
+adriano celentano 
+adriano celentano 
+celentano 
+celentano chi non lavora 
+pierino 
+beatles 
+winrar italiano + crac 
+unattended 
+unattended 
+vista 
+vista unattended 
+adobe photosh0p pro v10 
+adobe photosh0p pro 
+adobe photosh0p 
+xp 
+vista 
+axxo 
+ass 
+cum 
+search files, youtube and liveleak 
+2007 
+2007 
+panzer pc game 
+panzer game file 
+porn 
+single 
+panzer elite action 
+porn 
+milf 
+nude 
+sexy 
+anal 
+sex 
+*.wmv 
+king crimson 
+heroes ita 
+axxo 
+leaving on a jet plane 
+24 ita 
+leaving on a jet plane 
+divx 24 ita 4x 
+search files, youtube and liveleak 
+divx 24 ita 4x 
+divx house ita 
+ncis 5 
+marco paolini 
+jaco pastorius slang 
+lo svarione degli anelli 
+jaco pastorius slang 
+pink floyd 
+jaco pastorius 
+battlefield gun sound 
+victor vooten 
+victor wooten 
+pulp fiction 
+pulp fiction ita 
+teen 
+pulp fiction 
+cemento armato 
+magic music maker 
+teen 
+one night in paris 
+sex 
+teen 
+second love 
+pedo 
+boy 
+pain of salvation second love 
+pain of salvation second love 
+pain of salvation second love 
+boy 
+boy gay 
+boy gay 
+pedo 
+pedo 
+pedophilia 
+ubuntu 
+cute 
+ubuntu 
+pain of salvation second love 
+pain of salvation second love 
+tribler 
+concha buika 
+tribler 
+tribler 
+muscle men 
+tribler_4.1.7.exe 
+muscle men 
+muscle men 
+muscle men 
+muscle men 
+muscle men 
+legaltorrent 
+eigh tlives down 
+eight lives down 
+muscle men 
+muscle men 
+muscle men 
+muscle men 
+u2 
+styx 
+kansas 
+1914 shells of fury 
+1914 shells of fury 
+secretary day 
+film riccardo scamarcio 
+film roul bova 
+film alvaro vitali 
+biagio antonacci 
+canzoni biagio antonacci 
+canzone iris biagio antonacci 
+nek 
+for downling canzoni di nek 
+download porno 
+download film moana pozzi 
+moana pozzi 
+alles is liefde 
+pain of salaion 
+pain of salvation 
+pain of salvation hallelujah 
+search files, youtube and liveleak 
+the heardbreak kid 
+the heartbreak kid 
+19942 
+1942 
+superman 
+trade 
+ps3 
+pirates of caeibean 
+pirates of caeibean 
+ass to 
+aqu 
+fetish 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trade 2007 
+pirates of caribean 
+sicko 
+indesign 
+black adder 
+malcolm in the middle 
+black books 
+two and a half men 
+massive r&b 
+dom 
+domination 
+mouth 
+palmaria 
+ass to mouth 
+protal 
+chinese 
+russian 
+sweden 
+private 
+search files, youtube and liveleak 
+faust 
+food 
+model 
+video 
+nanni 
+palmaria 
+rocco 
+media center 
+saturnino 
+shark tale 
+xilisoft 
+search files, youtube and liveleak 
+xxx 
+massive r&b 
+frans bauer 
+super converter 
+frans 
+nederlands 
+blowjob 
+generation 
+hardcore 
+rape 
+ass 
+lucy pinder 
+norton 
+kansas 
+ratatouille 
+blof 
+prince 
+jenna jameson 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zzz 
+heroes 
+csi 
+roots 
+james 
+james uk 
+blondie 
+chuck berry 
+heroes sub 
+aerosmith - crying 
+u2 
+james best of 
+u2 best of 
+matrix 
+football manager 2008 
+uomo ragno 
+uomo ragno 
+uomo ragno 
+phil collins 
+tette 
+tette 
+tette 
+tettone 
+tettone 
+tettone 
+cher 
+cher best of 
+rod stewart 
+deja vu 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+highheels 
+highheels 
+high heels 
+high heels 
+high heels 
+high 
+nero dvd 
+hook 
+search files, youtube and liveleak 
+shanghai 
+beijing 
+china 
+nero 6 dvd 
+vietnam 
+phili 
+thai 
+thailand 
+taiwan 
+nero dvd 
+nero dvd 
+roger waters 
+vista vmdk 
+vmdk 
+nero 
+nero dvd 
+wwe 
+serious business 
+the serious business of happiness 
+serious business of happiness 
+happiness 
+centerfold 
+centerfold dictator 
+soulja boy 
+estelle 
+swiss beatz 
+swiss beatz 
+swiss beatz 
+tna 
+british 
+harry potter 
+nesn 
+linux 
+smallville 
+nero 
+nero 
+www.torrentspy.com 
+russian 
+roger waters 
+smallville 
+torchwood 
+shipudden episode5 
+shipudden episode 5 
+shipudden episode 5 
+russian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lolita 
+search files, youtube and liveleak 
+the 4400 
+pthc 
+kabul 
+search files, youtube and liveleak 
+dark shadows 
+smiling in a war zone 
+dark side 
+darkside 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+map 
+vmdk 
+hd italy 
+fights 
+el cantante 
+2007 
+paswoord 
+wachtwoord 
+de wereld draait door 
+bus 
+blond 
+black 
+private 
+bus 
+celeb 
+blond 
+xxx 
+heroes 1 
+heroes so1 
+heroes.so1 
+heroes.s01 
+heroes.s01 
+heroes. 
+sn 
+sn 
+the wall 
+nude 
+house 
+lost 
+legend 
+alain delon 
+winrar 3.71 
+iphone 
+300 
+transformers 
+iphone 
+search files, youtube and liveleak 
+heros 
+heroes 
+milf 
+small breast 
+puffy 
+rape 
+extreme 
+teen 
+amateur 
+amateur 
+search files, youtube and liveleak 
+a 
+v 
+v 
+v 
+vista 
+ultimate 
+ultimate 
+drunk 
+sex 
+die hard 
+ultimate 
+32bit 
+gouden kooie 
+gouden 
+ultimate32bit 
+ultimate32bit 
+ultimate32bit 
+ultimate 32bit 
+ultimate 32bit 
+ultimate 32-64bit 
+ultimate 32-64bit 
+vista ultimate 32-64bit 
+vista ultimate 32-64bit 
+vista 32-64 
+vista 32-64 
+vista 32-64 
+oops 
+microsoft vista 32-64 
+microsoft vista 32-64 
+microsoft windows vista 32-64 
+clit 
+fortwo 
+shrek 3 
+sex 
+teen 
+shrek3 
+lolita 
+teen 
+young 
+die hard 
+reaper 
+cissp 
+cissp 
+orange box 
+orange box 
+mandingo 
+transformers 
+nostradamus 
+radiohead 
+the life and times 
+girls 
+girls 
+girls 
+three kings 
+giantess 
+orange box 
+orange box 
+search files, youtube and liveleak 
+the 
+blink 
+blink 
+blink 
+blink 
+gothic 
+gothic 
+s01e01 
+the unit 
+linkin park bleed it out 
+crackaddict 
+crack addict 
+crack addict 
+massive 
+windows 
+floor 
+floorplan 
+turbofloorplan 
+young 
+overdeveloped 
+electric torture 
+gay 
+bjork 
+bjork 
+shrek ita 
+shrek ita 
+amateur 
+kalifornia ita 
+kalifornia ita 
+creampie 
+kalifornia ita 
+marco paolini 
+marco paolini 
+marco paolini 
+creampie 
+search files, youtube and liveleak 
+metal 
+queen stone age 
+pink floyd 
+steve jensen 
+steve jensen 
+queen stone age 
+surfs up 
+queen 
+teen 
+2007 
+ubuntu 7.10 
+2007 
+ubuntu 7.10 
+2007 
+ambient 
+ncis 
+ncis 5 
+ncis 5 
+csi 4 
+criminal minds 
+criminal minds 3 
+spiderman 3 
+qi 
+turistas 
+windows xp for mac 
+le colline hanno gli occhi 
+le colline hanno gli occhi 
+sunshine 
+sunshine ita 
+sunshine 
+superman 
+when nignt is falling 
+grey's anatomy 
+norton 
+blue chart 
+flowers 
+search files, youtube and liveleak 
+vista 
+öblivion 
+öblivion 
+öblivion 
+friends subtitles 
+delphi components 
+3d studio 
+dune 
+öblivion 
+megan monroe 
+milf hunter 
+milf hunter 
+blow 
+the ring 
+transformers 
+spiderman 3 
+transformers 
+vista 
+kira reed 
+nederland 
+nederlands 
+strictly 
+norton internet security 
+playboy calender 
+prada 
+ita 
+gay 
+sex videos 
+dr katz 
+search files, youtube and liveleak 
+dr kats 
+high stakes poker 
+porno holocaust 
+ea sports 
+ea games 
+raven riley 
+xxx 
+saturnino 
+smith ken 
+smith ken 
+smith ken 
+smith ken 
+smith ken 
+search files, youtube and liveleak 
+eva longoria 
+radiohead 
+dido 
+blunt 
+java 
+search files, youtube and liveleak 
+film italiani 
+need for speed carbon 
+need for speed carbon pc 
+io canto 
+io canto 
+io canto 
+io canto 
+io canto 
+l'era glaciale 
+non aprire cuela porta 
+sms 
+batman begin 
+nuns 
+non aprite quella porta 
+non aprite quella porta 
+porno 
+porno 
+porno 
+porno 
+joga bonito 
+futbol 
+film - dvx ita - non aprite quella porta - l'inizio 2006 - horror 
+el invensible 
+el invensible 
+el invensible 
+film dvx ita 
+film dvx 
+pc game 
+fifa 08 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+grey's anatomy 
+nederlands naakt 
+nederlands 
+nederlands 
+nederlands 
+nederlands 
+tyfla 
+space 1999 
+xxx 
+ufo 
+eastenders 
+spider pork 
+spider pork 
+dragon ball 
+starsisters 
+spooks 
+network security portable reference 
+illustrated tcp-ip - a graphic guide to the protocol suite 
+illustrated tcp-ip - a hacking the cable modem (2006) 
+illustrated tcp-ip - a hacking the cable modem (2006) 
+illustrated tcp-ip - a hacking the cable modem 
+illustrated tcp-ip - a hacking 
+illustrated tcp-ip - a hacking 
+hacking 
+salsa 
+an awfully big adventure 
+an awfully big adventure 
+mesmer 
+tomtom 
+an awfully big adventure 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+frankie martinez 
+frankie martinez 
+frankie martinez 
+salsa ny 
+salsa ny 
+salsa ny 
+salsa ny 
+salsa ny 
+salsa 
+severage 
+tagli al personale 
+tagli al personale 
+tagli al personale 
+tagli al personale 
+un impresa da dio 
+molto incinta 
+the simpson 
+search files, youtube and liveleak 
+king singers 
+sms 
+montezuma 
+hitch 
+hitch 
+partner 
+sotto mentite spoglie 
+twain 
+cemento armato 
+reeves 
+handel 
+grieg 
+2pac - hit em up 
+pikket 
+akon 
+picket 
+pickets 
+kanye west - stronger 
+flying 
+hairspray 
+lolita 
+kama sutra 
+film ita 
+ubuntu 
+bath 
+film spanish 
+lesbian 
+bush 
+ubuntu 
+kanye west 
+kanye west 
+kanye west - stronger 
+2pac - hit 
+fatlip - the salmon dance 
+akon - swete 
+akon 
+kanye west - the best life 
+kanye west-the best life 
+kanye west 
+onyx - slam harder 
+slam harder 
+fm concepts 
+stacy keibler 
+candy 
+osama 
+osama bin laden 
+iraq 
+kurtlar vadisi 
+kurtlar vadisi 
+balkenende 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+hendrik ido ambacht 
+vista 
+mac os x 
+mac os x 
+bikini 
+prison break 3 5 
+jimi blue 
+holland 
+justin timberlake 
+ayo technology 
+justin timberlake 
+dark rising 
+search files, youtube and liveleak 
+lesbian 
+lesbian 
+search files, youtube and liveleak 
+young harlots 
+search files, youtube and liveleak 
+lesbian 
+search files, youtube and liveleak 
+house mixtape 
+rocky 
+beats 
+instrumental 
+vanoza 
+vanoza 
+vivere 
+vanoza 
+search all files 
+der eisige tod 
+search files, youtube and liveleak 
+system of a down 
+ratatouille 
+disco boys 
+top 100 
+top 1000 
+rnb 
+rnb4u 
+yelle 
+wolfgang petry 
+va house 
+www.bitreactor.to 
+torrent 
+torrent 
+iceage2 
+ice age 2\ 
+ice age 2 
+slut 
+roddy frame 
+gone baby gone 
+ice age 
+ice age 
+hair spray 
+bitreactor.to 
+bitreactor.to 
+sal solo 
+search files, youtube and liveleak 
+high school musical 
+enttarnt 
+the bucket list 
+high school musical dvd rip 
+enttarnt 
+enttarnt 
+enttarnt 
+jpop 
+postal 
+j-pop 
+j-pop 
+japan 
+postal 
+postal 
+stalin 
+ratatouille 
+babes 
+omanko 
+playboy february 
+manko 
+playboy 
+yaoi 
+enttarnt 
+syoujyo 
+yaoi 
+planet terror 
+planet terror deutsch 
+der kleine könig macius 
+pure rouge 
+der sternwanderer 
+age of empires 
+pure rouge 
\81\93ã©ã\82\82 
+imperialism 
+caeser 
+gurren 
+swinger 
+der sternwanderer 
+die drei räuber 
+die vorahnung 
+die vorahnung 
+enttarnt 
+lynda.com tutorial 
+linux+tutorial 
+linux+ tutorial 
+ltutorial 
+the invasion 
+ufo 
+the invasion 
+ufo 
+space 1999 
+the invasion 
+könige der wellen 
+u.f.o 
+u.f.o 
+enttarnt 
+enttarnt 
+chuck und larry - wie feuer und flamme 
+nero dvd 
+chuck und larry - wie feuer und flamme 
+xmlspy 
+ratatouille 
+spider man 3 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers 
+zwartboek 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+harry potter und der orden des phönix 
+next 
+das perfekte verbechen 
+das perfekte verbechen 
+das perfekte verbechen 
+achmed the terrorist 
+evan allmächtig 
+flyboys - helden der lüfte 
+high school musical 2 
+how i met your mother 
+motel 
+roots 
+disturbia 
+are you afraid of the dark 
+are you afraid of the dark voiceover 
+die liebe in mir 
+hände weg von mississippi 
+rennschwein rudi rüssel 2 
+s.h.i.t. - die highschool gmbh 
+goodbye bafana 
+spider man 3 
+spider man 3 german 
+dezember boy german 
+dezember boys german 
+dezember boys 
+dezember boys 
+dezember boys 
+ratatouille german 
+enttarnt german 
+slim shady lp 
+slim shady 
+for a friend 
+the chronic 
+chronic 
+the chronic 
+ice cube 
+friday 
+ren som 
+the office 
+resident evil extinction 
+atheism 
+dirt 
+heheryt 
+sum 41 
+google 
+system 
+search files, youtube and liveleak 
+sex 
+karaoke 
+gay 
+orkestbanden 
+beddingfield 
+search files, youtube and liveleak 
+red vs blue 
+die hard 
+die hard fr 
+die hard french 
+transformer french 
+family guy 
+family guy 
+alaura 
+mac os x 
+l'arc en ciel 
+tits 
+larc en ciel 
+muse 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rise the vieno 
+mrs robinson 
+naruto shippuuden 28 
+naruto shippuuden 29 
+mrs robinson 
+mrs robinson 
+mrs. robinson 
+me and julio down by the schoolyard 
+me and julio down by the schoolyard 
+me and julio down by the schoolyard 
+hey jude 
+let it be 
+google 
+firefly serenity 
+firefly 
+metalocalypse 
+metalocalypse 
+the office 
+criminal minds 
+metalocalypse season 
+half life 2 
+scarface 
+half life 2 orange box 
+psv 
+foo pretender 
+pretender 
+search all files 
+metalocalypse 
+muse 
+prison break 
+muse 
+photoshop video 
+photoshop tutorial 
+photoshop video 
+photoshop sphere 
+photoshop 
+photoshop video 
+30 days of night 
+gay 
+homeworld 
+demonoid 
+young harlots 
+trasformer 
+.mp3 
+metalocalypse 
+metalocalypse 
+metalocalypse 
+melissa 
+quicken 
+shrek 
+search files, youtube and liveleak 
+reno 911 
+nederland 
+nederlands 
+office space 
+metalocalypse 
+naked 
+nacked 
+naked 
+eros 
+breast 
+amateur 
+pics 
+house 
+video 
+search files, youtube and liveleak 
+the officec 
+the office 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+final fantasy 
+final fantasy movie 
+uefa chempions league 
+final fantasy 
+high school musical 
+charles darwin 
+search files, youtube and liveleak 
+dawkins 
+atlantis 
+sunset 
+eden 
+eden 
+wicked 
+queen 
+breasts 
+jordan lee 
+breasts 
+ubuntu 7.10 
+babysitter 
+babysitter 
+maid 
+massage 
+nurse 
+redhead 
+marshall 
+marshall mathers 
+marshall mathers 
+marshall mathers lp 
+breast exam 
+breast 
+linux 
+99 red 
+99 
+99 
+99 
+99 
+lost 
+lennox 
+cerberus 
+prey 
+invisible 
+private 
+forest of death 
+private 
+k3 
+barletta 
+rosetta stone 
+women loving women 
+search files, youtube and liveleak 
+breast 
+michael jackson 
+nos 
+kagbeni] 
+kagbeni 
+kagbeni 
+kagbeni 
+nederlands 
+najib 
+netwerk 
+search files, youtube and liveleak 
+big 
+amateur 
+anal 
+playboy 
+domplayer 
+hd 
+rooney 
+rape 
+bond 
+dom player 
+axxo player 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+va house 
+dana 
+breasts 
+pregnant 
+dog 
+kitchen 
+public 
+breasts 
+massage 
+dorm 
+sister 
+massage 
+search files, youtube and liveleak 
+a 
+a 
+x 
+remixland 
+remix 
+usenext 
+tribler 
+andreas dorau 
+c64 
+kaspersky 7 
+search files, youtube and liveleak 
+preteen 
+lolita 
+kaspersky 7 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+christofer hiding 
+christoffer hiding 
+christoffer idol 
+chris daughtry 
+http://tracker.prq.to/announce 
+angina-kamo kako delo 
+angina-kamo kako delo 
+angina-kamo kako delo 
+rtatouille 
+ratatouille german 
+ratatouille german 
+angina 
+aff 
+angina 
+angina 
+angina 
+angina 
+angina 
+angina 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+angina kamo kako delo 
+boobs 
+knock 
+svensk 
+angina kamo kako delo 
+angina 
+supernatural 
+lost 
+lost ita 
+lost 
+resident evil ita 
+angina 
+angina 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+andre hazes 
+tom tom 6 
+surf's up 
+wicked jazz 
+wicked jazz 
+sicko 
+star trek 
+search files, youtube and liveleak 
+pirates of the carribean 
+breast 
+redhead 
+xxx 
+aria 
+porno 
+hey there delilah 
+antm 
+jeff dunham 
+jethro 
+crysis 
+blowjob ninja 
+skye edwards 
+kispál és a borz 
+facial 
+shrek 
+cum 
+picture 
+opera wii 
+wii 
+mc tali 
+nephew 
+szabadság szerlem 
+return to house haunted hill 
+facial 
+facial 
+dr house 
+cytherea 
+heroes 
+senza parole 
+heroes s2e5 
+vasco 
+heroes s2 e5 
+transformers 
+transformers hun 
+lily allen 
+transformers cz 
+transformers divx 
+batép 
+batép 
+*.mp3 
+*.mp3 
+sex 
+shoot 'em up 2007 
+shoot 'em up 
+supra 
+supra 
+gps 
+windows 
+popolus 3 
+rail simulator 
+ubuntu 
+linspire 
+php 
+sex 
+incest 
+x51r 
+windev 
+windev 
+windev 
+windev 
+windev 
+asus notebook 
+windev 
+windev 
+windev 
+windev 
+windev 
+windev 
+windev 
+pcsoft 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+windows 
+cognos 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+overpowered 
+the office 
+"velvet rose" 
+"velvet rose" 
+"velvet rose" 
+"velvet rose" 
+"russian rape" 
+voyeur 
+girls gone wild 
+south park 
+twig 
+stargate 
+diggnation 
+diggnation 
+bizottság 
+diggnation 
+topo 
+waszlavik 
+rail 
+home video 
+trance 
+yaoi 
+yaoi 
+resident evil 
+porn 
+the seeker 
+xxx 
+perfect keylogger 
+levees 
+levees 
+when the levees broke 
+when the levees broke requiem four parts 
+bocelli 
+vasco rossi 
+vasco rossi 
+mystery 
+privateville 
+private ville 
+big fish 
+bruce springsteen 
+robin van persie 
+senza parole 
+space 1999 
+blake's 7 
+the mighty boosh 
+amorf ördögök 
+toon boom 
+abdullah chhaddeh 
+abdullah chhaddeh 
+abdullah chhaddeh 
+toon boom 
+abdullah chhadeh 
+toon boom mac 
+toon boom 
+all the way 
+all the way 
+blood diamonds 
+blood diamonds 
+blood diamonds 
+mp3 
+toon boom .dmg 
+.dmg 
+search files, youtube and liveleak 
+search all files 
+nin 
+baris manco 
+baris manco 
+baris manco 
+hitler 
+baris manco 
+xxx 
+kemal sunal 
+hitler 
+wehrmacht 
+bbw porn 
+porn 
+porn bbw 
+assfuck 
+search files, youtube and liveleak 
+the.magnificent.butcher.(dvd.rip.rus.by.gigon) 
+the simpson movie 
+film 
+filprison break 
+prison break 
+prison break ita 
+stardust ita 
+rude girls 
+rude girls 
+running 
+strip 
+bang bros 
+baby got boobs 
+my sisters got friend 
+my sisters hot friend 
+naughty america 
+toon boom 
+rape 
+surf's up 
+surfs up 
+surf's up 
+incest 
+drunk 
+orgy 
+office 
+nero ita 
+italiano 
+italian 
+nero 
+nero ita 
+battlefield 
+jaco pastorius 
+battlefield 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pearl jam 
+gray-man 
+tug 
+candid 
+handel 
+vanoza 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vanoza 
+electric blue 
+gray-man 
+xxx 
+karma 
+tutti frutti 
+hentai 
+gossip girl 
+pocket tv 
+pocket 
+[pocket] 
+[pocket] 
+[pocket] 
+pocket 
+pocket 
+pocket 
+live tv 
+live tv 
+live tv 
+pocket tv 
+chris rea 
+half life 
+ufo 
+coldplay 
+chris rea 
+chris rea 
+half life 
+oktoberfest 
+coldplay parachutes 
+half life 
+hungrian 
+dfx audio 
+parachutes 
+horse 
+horse 
+clawfinger 
+asterix 
+linux 
+de niro 
+in flames 
+rammstein 
+piss 
+piss 
+ac dc 
+dire straights 
+search files, youtube and liveleak 
+snehurka 
+south park 
+yuma 
+lilu 
+taxidriver 
+futurama 
+tankcsapda 
+cairo 
+"amy reid" 
+amy reid 
+hongkong movie 
+mirigy 
+nike 
+caliban 
+like it hot 
+harry potter goblet 
+harry potter half 
+the painkillers 
+sum 41 
+sum 41 
+sum 41 
+sum41 
+sum 41 
+trainspotting 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tomtom one 
+house s04 
+the office s04 
+flac 
+jazz 
+junoon 
+cuore 
+donna 
+stefania sandrelli 
+horse 
+stefania sandrelli 
+laura antonelli 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+wii 
+wii rockstar 
+wii pal 
+wii pal 
+house s04 
+rascal 
+movie 
+lessons 
+lessons 
+daft 
+kraftwerk 
+zagar 
+zagar 
+drawn s04 
+drawn s03 
+house s04 
+tera patrick 
+the office s04 
+office mac 
+search files, youtube and liveleak 
+ebony addiction 
+about face pov 
+search files, youtube and liveleak 
+scrubs s04 
+scrubs s03 
+scrubs s02 
+scrubs s01 
+scrubs 
+simpsons s19 
+teen 
+teen porn 
+kispál 
+lunch 
+search files, youtube and liveleak 
+ferry corsten junk 
+family guy 
+vista 
+search files, youtube and liveleak 
+die hard 4 
+tiesto 
+resident evil extinction 
+family guy 
+teen 
+resident evil extinction 
+tiesto 
+milf 
+ocean's 13 
+heroes 
+south park 
+family guy 
+funny 
+dog sex 
+bondage 
+honda crf230 
+pacman on crack 
+pacman on crack - blood hound gang 
+blood hound gang 
+lordi 
+red dwarf 
+hand 
+gay 
+hogtied 
+kink 
+panties 
+p.o.v. 
+pov 
+voyeur 
+spy 
+babe 
+trainspotting 
+blow 
+oral 
+suck 
+avenged 
+naked 
+natural 
+pretty 
+faitless 
+faithless 
+tv 
+search files, youtube and liveleak 
+surfs up 
+nicsak ki besz 
+death note 
+homem aranha 
+homem aranha 
+homem aranha 
+homem aranha 
+homem aranha 
+homem aranha 
+gungrave 
+not another teen movie 
+le chevalier eo 
+jackass2 
+le chevalier eon 
+search files, youtube and liveleak 
+simpsons 
+search files, youtube and liveleak 
+tits 
+tits 
+boobs 
+porn 
+tits 
+sex 
+sex 
+games 
+stuff 
+the girl next door 
+the girl next door 
+the girl next door movies 
+the girl next door movies 
+knocked up 
+knocked up 
+girls 
+sesame street 
+chimene badi 
+chimene badi 
+chimene badi 
+musicale 
+mázs 
+java 
+ubuntu 
+lynda.com 
+smallville 
+ufo 
+call of duty 
+usmle 
+the bourne 
+sex 
+girs 
+girls 
+regclean 
+uninstaller 
+http://video.google.com/videoplay?docid=3896964583024194307search files, youtube and liveleak 
+http://video.google.com/videoplay?docid=3896964583024194307search files, youtube and liveleak 
+linkin park 
+donna 
+cemento armato 
+flachau 
+rocco 
+dallas 
+search files, youtube and liveleak 
+deep house 
+sylvian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+robbie konijn 
+robbie konijn 
+robbie konijn 
+queensryche 
+weather report 
+weather report 
+weather report 
+lisa lipps 
+comedy 
+hofi 
+fripp 
+fripp 
+sylvian 
+handjob 
+geoff tate 
+superbad 
+battlefield 
+busty dusty 
+loloferrari 
+maxi mount 
+chelsea charms 
+plenty up top 
+big clit 
+triky 
+vanessa del rio 
+triky 
+triky 
+big clit 
+genesis 
+lolo ferrari 
+majom 
+nero8 
+massive atack 
+gib 
+massive atack 
+massive atack 
+300 ita 
+sex 
+battisti 
+chinesepod 
+transcript 
+nella 
+300 ita 
+heroes 
+chinese 
+l'ultima legione 
+halo 3 
+annácska 
+kelemen anna 
+i fantastici 4 
+anna kelemen 
+méhkirálynÅÂ\91 
+300 
+thai 
+thai 
+michel clayton 
+good night and god luck 
+good night and good luck 
+mancha de rolando 
+domino 
+domino ita 
+la spada nella roccia 
+pornó 
+pornó 
+goog night and goog luck 
+porn 
+non aprite quella porta 
+scary movie 
+scary movie ita 
+300 ita 
+molto incinta 
+300 
+300 ita 
+star wars ita 
+star wars 
+search all files 
+search all files 
+search all files 
+sex 
+la spada nella roccia 
+game 
+xxx 
+awr 
+suzuki 
+suzuki 
+suzuki 
+suzuki 
+ebook 
+search files, youtube and liveleak 
+monteverdi 
+monteverdi 
+monteverdi 
+tool 
+opeth 
+superbad 
+showder 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porn 
+metallica 
+oprah 
+mpl-studios 
+met-art 
+squirt 
+squirt 
+modflow 
+franz ferdinand 
+franz ferdinand 
+ulver 
+landdesktop 
+land desktop 
+xxx 
+sunshine 
+divx ita sunshine 
+007 
+007 casino royal 
+ankara 
+ankara 
+ankara 
+ankara 
+ankara 
+antonioni 
+nero 8 
+antonioni 
+search files, youtube and liveleak 
+duran duran 
+sylvia saint 
+search files, youtube and liveleak 
+magyar 
+shakespeare sisters 
+shakespeare sisters 
+shakespeare sisters 
+army of lovers 
+windows 
+windows 
+fallen 
+extinction 
+halo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trough it all 
+"bill evans" 
+mills brothers 
+mills brothers 
+duke ellington 
+iconix 
+iconix 
+uml 
+uml 
+uml 
+the wire 
+rihanna 
+avril lavigne 
+black sun empire 
+"bill evans" 
+amateur 
+"bill evans" 
+"bill evans" 
+"bill evans" 
+"bill evans" 
+e-book 
+"bill evans" 
+"charles mingus" 
+age of empire hun 
+age of empire hun 
+age of empire hun 
+age of empire 
+age of empire 
+mp3 
+300 
+system of a down war 
+system of a down 
+system of a down 
+"charles mingus" 
+splash 
+hun 
+katie melua 
+szabadon 
+madonna 
+baez 
+hungarian 
+michelle wilde 
+bros 
+inxs 
+inxs 
+tokyo drift 
+trust 
+martin donovan 
+once were warriors 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+karib 
+pirates 
+ida 
+vista 
+keygen 
+galaxy hitchhikers 
+galaxy hitchikers 
+oasis 
+hitchikers 
+emil rulez 
+steam 
+portal 
+lesbian 
+shemale 
+mature 
+kaiji 
+incest 
+regulate 
+portal 
+portal 
+xxx 
+anime 
+tranny 
+whisper of the heart 
+pacman 
+tranny 
+whisper of the heart divx 
+pacsteam 
+mike oldfield 
+david 
+whisper of the heart 
+shemale 
+milf 
+whisper of the heart take me home 
+search files, youtube and liveleak 
+whisper of the heart country road 
+goth 
+portal 
+model 
+gofast 
+paccafe 
+anal 
+mac 
+lady 
+catfight 
+lesb 
+catfight 
+search files, youtube and liveleak 
+http://tracker.megashara.com:2710/b21eb57047ec49648358693885c15fa0/announce 
+born to run 
+xxx 
+postal2 
+prison break 
+belladonna 
+katsumi 
+prison break s03e05 
+clara 
+nin 
+portal 
+portal 
+nine inch 
+nine inch 
+south park 
+portal 
+youtube video files 
+youtube video files 
+south park s11e10 
+handcuffs 
+handcuffs 
+bondage 
+msp 
+afetishpage 
+girls cuffed 
+south park s11e10 
+paccafe 
+pacda 
+pacsafe 
+crack 
+vista 
+shrek 3 hungarian 
+shrek 3 hun 
+shrek 3 
+gÅÂ\91zös 
+playboy 
+playboy 
+playboy 
+candy 
+candy m 
+szkáj 
+skywalker lajos 
+skywalker lajos 
+nero linux 
+heroes linux 
+strip 
+arsen 
+urine 
+pissing 
+pissing 
+hdtv 
+deep purple 
+colbert 
+deep purple 
+deep purple 
+deep purple 
+heroes 
+top gun 
+hey you 
+pantyhose 
+angel 
+buffy le campane dell'inferno 
+buffy le campane dell'inferno 
+beggars opera 
+squirt 
+bing crosby 
+clip 
+footlight parade 
+childs play 
+mike oldfield 
+tottenham 
+beggars opera 
+prison break 
+boots 
+bondage 
+it crowd 
+vnv nation 
+come back pearl jam 
+pearl jam 
+come back 
+vnv nation 
+winning eleven 11 
+vnv nation 
+winning eleven 11 
+u2 
+immagine in cornice 
+image in a frame 
+pearl jam 
+vnv nation 
+immagine in cornice 
+radiohead 
+curvead air 
+ac mega 
+curvead air 
+curvead air 
+curvead air 
+curvead air 
+deep purple 
+deep purple 
+battlestar season 
+pynk floyd 
+pink floyd 
+cum 
+matrix 
+matrix 
+matrix reloaded 
+bmw 
+led zeppelin 
+rosetta 
+bmw navigation 
+rosetta 
+rosetta 
+metallica 
+slayer 
+slayer 
+matthew garrison 
+barbie 
+barbie ita 
+mercedes navigation 
+david lynch 
+la foresta magica 
+prison break 
+need for speed 
+need for speed pro street 
+akos 
+republic 
+trixie 
+search files, youtube and liveleak 
+jojo mayer 
+lost 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+machine head 
+trixie 
+topanga 
+the silence of the lambs 
+the silence of the lambs 
+republic 
+fodera 
+akos 
+deep purple 
+vad fruttik 
+beatles 
+hypnotized 
+bruce 
+mario bart 
+heat 
+sibelius 
+license to wed 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rolling stones 
+sex 
+film 2007 ita 
+vst 
+doors 
+pogues 
+toto 
+styx 
+strawbs 
+sicko 
+big rack attack 
+big rack attack 
+rory gallagher 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+machine head 
+search files, youtube and liveleak 
+moody blues 
+spooky tooth 
+van der graf generator 
+adriano celentano film 
+ufo 
+procol harum 
+atomic rooster 
+chitarra classica 
+rolling stones 
+deep purple 
+cartoni animati 
+led zeppelin 
+kansas 
+terminator 
+catene film amedeo nazzari 
+curvead air 
+catene film amedeo nazzari 
+amedeo nazzari filmi 
+amedeo nazzari filmi 
+curved air 
+cries 
+barry ryan 
+painful 
+steeleye span 
+blackmores night 
+yes 
+tekken2 iso 
+tekken2 iso 
+king crimson 
+tekken2 iso 
+tekken2 iso 
+tekken2 iso 
+long toes 
+vanilla fudge 
+gentle giant 
+credence clearvater revival 
+credence clearwater revival 
+sasha 
+sasha 
+black rain 
+celebrity porn 
+bbc 
+doors 
+pink floyd 
+jackas 3 
+windows xp sp2 nl 
+you got the key 
+heroes 
+house 
+love is the key 
+an inconvenient truth 
+google 
+google 
+google 
+cute 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jetro tull 
+cartoon 
+the comebacks 
+rude girls 
+monrose 
+hentai 
+jetro tull 
+jimi blue 
+big.rack 
+search files, youtube and liveleak 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+the beatles 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+justin timberlake 
+il destino di un guerriero 
+ferry corsten 
+ferry corsten junk 
+ferry corsten junk 
+gray-man 
+drawn together 
+manu chao 
+il destino di un guerriero 
+mettalica 
+manu chao 
+manu chao 
+manu chao 
+eldest 
+britney spears 
+xxx hd 
+xxx2007 
+american girl in paris 
+jack johnson 
+brook april 
+american dad # 
+american dad 
+il gladiatore 
+american dad german 
+search files, youtube and liveleak 
+scared 
+mtv scared 
+film ita 
+prison.break 
+prison.break 
+prison break 
+mtv scarred 
+paper monster 
+dave gahan paper monster 
+dave gahan papermonster 
+dave gahan paper monster 
+paper monsters 
+south park 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rauch 
+rauch 
+black 
+brook april 
+mature 
+jazz 
+nightwish amaranth 
+nightwish 
+nightwish 
+manhunt 
+spongebob 
+bangbros 
+lesbian 
+heat 
+teen 
+shivering isles 
+halo 2 xp 
+shivering isleshivering isles 
+shivering isleshivering isles 
+shivering isle 
+shivering isles 
+breakdance 
+shivering isles 
+ederbringhausen 
+bromskirchen 
+annika 
+teen 
+naked 
+amateur 
+yannik 
+sahra 
+kevin 
+luca 
+sims 
+hamman 
+hamam 
+wild hogs 
+befor 
+all for one 
+ein engel fliegt in die nacht 
+ben 
+glashaus 
+monrose 
+dragon wars 
+einen stern 
+jugendliche 
+gangsta 
+peer groups 
+pussy 
+peer gruppen 
+the invasion 
+seeker 
+terror 
+shoot em u 
+trixie 
+topanga 
+little april 
+teen 
+lama 
+trixie 
+golf 
+1408 
+nl sub 
+search files, youtube and liveleak 
+little april 
+search files, youtube and liveleak 
+dark star one 
+e book 
+mehkiralyno 
+kelemen 
+anna 
+14 
+dutch sub 
+dutch 
+teen 
+nederlands 
+ned. 
+ondertiteld 
+buurvrouw 
+neighbour 
+spy 
+voyeur 
+peep 
+strip 
+bathroom 
+panties 
+beast 
+atlantis 
+arte dokumentation 
+thirakwa 
+atlantis s03 
+atlantis s04 
+dusche 
+umkleide 
+atlantis s04 xvid 
+umkleidekabine 
+private 
+pornochic 
+olivia mojica 
+il ruffiano 
+özpetek 
+vsti 
+michele strogoff 1975 
+lino banfi 
+business 
+michele strogoff 
+business 
+ita 
+la frontiera del drago 
+nero 
+winrar 
+search files, youtube and liveleak 
+manifold 
+porn 
+nexus 
+nexus jupiter 
+nexus jupiter 
+black sluts 
+black 
+atlantis s04 
+sakuraba rampage 
+sakuraba 
+rampage jackson 
+pride 
+pilates 
+u2 
+bbc 
+culture club 
+culture club 
+culture club 
+culture club 
+atlantis s04 04 
+lolita 
+preteen 
+florida teen 
+florida 
+sandra 
+nastia 
+brad pitt 
+bel ami 
+bel ami 
+bel ami 
+bel ami 
+culture club 
+belami 
+ogniem i mieczem 
+fish 
+stargate 
+stargate 
+stargate 
+stargate atlantis 
+saw4 
+brad pitt 
+brainiac 
+brainiac 5 
+alazai 
+hellraiser 
+search files, youtube and liveleak 
+hellraiser ii 
+him 
+phil eagles 
+phil eagles 
+florida 
+florida teen 
+sweet melonee 
+melonee 
+modelingdvds 
+toxicity 
+hellraiser iii 
+radiohead in rainbows 
+radiohead in rainbows 
+nastia mouse 
+florida teen 
+tricosta 
+axx0 
+pthc 
+teen model 
+bmw navigation 
+gimme more 
+prison break 
+prison break s03e05 
+heroes s03e05 
+heroes s02e05 
+heroes s02e05 
+heroes 
+hitman 
+teen 
+justice - dance 
+justice - dance 
+justice - dance 
+justice - dance 
+tottenham 
+horse 
+google 
+shrek 3 
+shrek 3 rip 
+transformers 
+club life 028 
+weeds 
+kyle 
+28 weeks later 
+lineage 
+pearl jam 
+hoax 
+sex 
+walt disney 
+walt disney ita 
+walt disnay 
+walt disnay 
+walt disnay 
+walt disnay 
+horror ita 
+horror ita 
+whores 
+ptsc 
+pthc 
+pthc 
+ls magazine 
+black sluts 
+black whores 
+black sex 
+naruto 
+avatar 
+avatar 
+avatar 
+30 rock 
+arrested development 
+crime and punishment 
+avena lee 
+shaving 
+shaves 
+shaved 
+japanese 
+asian 
+japanese 
+shaving 
+shaving 
+shaving 
+shaving 
+exam 
+lamb of god 
+psp 
+yello 
+belew 
+belew 
+king crimson 
+bob marley 
+bob marley 
+bob marley 
+bob marley 
+bob marley 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+californication 
+bourne 
+halloween 
+halloween ii 
+halloween 2 
+halloween 4 
+house 
+teen russian 
+the shining 
+bepi 
+scrubs 
+preteen 
+fiatal 
+csisztu 
+csisztux 
+magyar 
+bang bros 
+poltergeist 
+dawn of the dead 
+scrubs 
+achool 
+alchool 
+40 inches plus 
+big tits round asses 
+btra 
+young
+college fuck fest 
+milf 
+alcohol 
+halloween 
+halloween 
+return of michael myers 
+celine dion 
+celine dion 
+muse 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+girls 
+girls 
+the evil dead 
+search files, youtube and liveleak 
+mom 
+jethro 
+vivid 
+sasha grey 
+daemon tools 
+gloria guida 
+gloria guida 
+gloria guida 
+teagan presley 
+gloria guida 
+teagan 
+bigfish 
+curb 
+daemon 
+daemon 
+stardust 
+avere vent'anni 
+alien vs predetor 
+alien vs predetor movie 
+alien vs predetor movie 
+alien vs predetor movie 
+transformer il film ita 
+transformer ita 
+film ita 
+film ita 
+film ita 
+la divina commedia 
+transformer ita 
+spartiti chtarra classica 
+spartiti chtarra classica 
+heroes ita 
+film ita 
+film ita 
+film ita 
+renato rascel film 
+heroes stagione2 ita 
+heroes stagione 2 ita 
+heroes stagione 1 ita 
+heroes stagione completa ita 
+heroes stagione completa 
+transformer ita 
+wumpscut 
+prison break 3 
+prison break 3 sub ita 
+prison break 3 ita 
+prison break 3 sub ita 
+prison break 3 completa 
+prison break 3 
+resident evil 
+zagar 
+zagar 
+zagar 
+chris cunningham 
+heroes 
+grand funk 
+* 
+* 
+sanskrit 
+sanskrit 
+sanskrit 
+sanskrit 
+sanskrit 
+sanskrit 
+vedic 
+vedic 
+vedic 
+diventerai una star 
+umbrella 
+finley 
+queen 
+queen greatest hits 
+kitt 
+all night long 
+lionel riche 
+k.i.t.t. 
+k.i.t.t. 
+kitt 
+love today 
+love today 
+horror ita 
+horror ita 
+thriller avi 
+thriller avi ita 
+thriller avi 
+guns and roses 
+saturnino 
+in search of sunrise 6 - ibiza 
+hide and seek 
+elect the dead 
+kitt 
+touran 
+finding beauty in negative spaces 
+imogen heap 
+search files, youtube and liveleak 
+ruzsa magdi 
+prison break 
+ruzsú 
+rúzsa 
+visual webdev 
+winaso 
+winaso 
+import-export 
+search files, youtube and liveleak 
+divx heroes ita 
+william valkenburg 
+könig der wellen 
+könig der wellen 
+star trek 
+die vorahnung 
+guitar pro keygen 
+guitar pro key 
+a href='http://torrentz.ws/torrent/156966/hokkabaz-2006-dvd-rip-xvid-61' title='hokkabaz (2006) dvd-rip xvid-61 torrent ownload'>hokkabaz (2006) dvd-rip xvid-61</a> 
+die alten rittersleut 
+a href='http://torrentz.ws/torrent/156966/hokkabaz-2006-dvd-rip-xvid-61' title='hokkabaz (2006) dvd-rip xvid-61 torrent ownload'>hokkabaz (2006) dvd-rip xvid-61</a> 
+search files, youtube and liveleak 
+divx heroes ita 
+heroes ita 
+heroes ita 1x01 
+die blechbüchsenarmee 
+die blechbüchsenarmee 
+die blechbüchsen 
+king arthur 
+search files, youtube and liveleak 
+barbarella 
+heroes ita 1x0 
+heroes ita 
+vasco rossi 
+lecce 
+dialetto leccese 
+pirates 
+tette 
+troy 
+troy hans zimmer 
+troy 
+kalifornia 
+troy 
+troja 
+teen 
+sesso 
+spogliarello 
+spogliarello 
+ratatouille ita 
+streep tease 
+streep tease 
+streep tease 
+streep tease 
+search all files 
+ray charles 
+sex 
+good 
+global 
+andrew sisters 
+andrew sisters 
+nella 
+nella redtube 
+kings singers 
+boroka balls 
+search files, youtube and liveleak 
+kisvakond 
+andrew sisters 
+reon 
+lost 
+lili carati 
+canaglie 
+al volante 
+al volante 
+ford 
+una ford 
+una ford 
+al volante di una ford 
+succo d'arancia 
+canaglie 
+prison 
+sigla 
+sigla 
+sigla 
+house ita 
+monkey island 
+legend of kyrandia 3 
+legend of kyrandia 
+space quest 
+kelly brook 
+lord of the rings 
+axxo 
+pirates of the caribean world's end 
+pirates of the caribbean world's end 
+jennifer lopez 
+jennifer lopez 
+jennifer lopez 
+picture ace 
+search files, youtube and liveleak 
+scooter 
+nsked 
+naked 
+scooter 
+scooter 
+ita 
+nl 
+feist 
+high school musical 2 
+hungarian 
+high school musical 2 ita 
+half life 
+search files, youtube and liveleak 
+sarah 
+doom 
+phil 
+half life 
+half life 
+half life 
+half 
+half-life 
+half-life 
+half life 
+half life 
+half life 
+half life 
+half life 
+half life 
+half life 
+monrose 
+jimi blue 
+alex c 
+du hast den geilsten arsch der welt 
+geilster arsch der welt 
+alex c. 
+another world 
+bachman 
+planet terror 
+schönster arsch der welt 
+schönsten arsch der welt 
+teen feet 
+sha 
+lame immortelle 
+evan almighty scr avi 
+the question is what is the question 
+evan almighty full 
+monrose 
+brasileirinhas 
+desperate hous 
+battlestar gala 
+erotico 
+landa 
+sims2 
+sims2 
+sims2 
+2007 
+2007 
+search files, youtube and liveleak 
+windows 
+ghost 
+phantom limb 
+wehrmacht 
+sha 
+beckett 
+public 
+gay 
+justin timberlake 
+gay 
+gay 
+folsom 
+jimi blue 
+stardust 
+search files, youtube and liveleak 
+rtl klub 
+serial mom 
+acrobat 
+acrobat 8 ita 
+shower 
+porntube 
+porntube 
+porntube 
+monrose 
+gtr2 
+domo mia 
+the thing game 
+dopeman 
+domo mia 
+hitman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dopeman 
+girls 
+top gun 
+movies 
+harry potter 
+harry potter dutch 
+spiderman 
+dutch subtitels 
+movies with dutch subtitels 
+dutchmovies 
+dutch movies 
+dutch movies 
+dutch movies 
+dutch movies 
+movies 
+disney 
+disney movies 
+livingcolour 
+s 
+a 
+mai thuli bhaichu re 
+asian 
+blowjob 
+von löwen und lämmern 
+ndour 
+n'doure 
+worldmusic 
+worldmusic 
+world music 
+world music 
+arabic 
+arabic 
+limp bizkit 
+bulan 
+bulan 
+gabriel 
+billy talent 
+peter gabriel 
+prison break 
+game 
+transformers 
+the king of kong 
+simpsons 
+anneke grohnlo 
+ben x 
+anneke gronloh 
+ben x 
+plop 
+freddy got fingered 
+freddy finger 
+carny 
+girl next door 
+girl next door 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+battlefield 
+battlefield 
+home alone 
+brave one 
+vier minuten 
+resident evil 
+painted viel 
+knocked up 
+encarta 
+evening 
+honey 
+honey 
+spiderman 
+spiderman in lingua italiana 
+spiderman in lingua italiana 
+rocky 2 
+georgia rule 
+office 
+film honey 
+office 2003 
+film honey 
+film honey 
+hors de prix 
+honey con jessica alba 
+dvd 
+honey 
+honey 
+horror 
+horror 
+horror 
+horror 
+horror 
+horror 
+horror 
+grindhouse 
+spiderman 
+olè 
+notte prima degli esami 
+next 
+venerdi 13 
+porn 
+porn 
+porn 
+porn 
+venerdi 13 
+venerdi 13 
+venerdi 13 
+max pezzali 
+adobe after effects 
+adobe a 
+adobe 
+adobe 
+avril lavigne 
+adobe 
+2007 
+#1 dvd ripper v6.01 - [papi raton] 
+adobe after 
+#1 dvd ripper v6.01 
+sean paul 
+hip hop 
+black porn 
+nero 
+godfather 
+black porn 
+perfume 
+porn 
+porn 
+jessica rizzo 
+mr bean 
+porn 
+porn 
+50 cent 
+american gangster 
+musica araba 
+araba 
+canzoni arabe 
+musica danza del ventre 
+heroes 1 18 fft 
+naruto 60 
+joggs 
+naruto 60 episode dubbed english 
+big bow 
+justice dance 
+big tits 
+heroes 
+heroes ita 
+heroes 18 ita 
+eastern 
+gay 
+eating out 
+naruto 60 episode dubbed english 
+naruto episode 60 (3/3) english dubbed 
+silver surfer 
+rape 
+naruto episode 60 (3/3) english dubbed 
+naruto episode 60 english dubbed 
+naruto episode 61 english dubbed 
+naruto episode 61 english dubbed 
+siberia 
+syberia 
+nederlands 
+dutch 
+50cent 
+50cent 
+50cent 
+eating out 
+hamam 
+the hamam 
+dont taze me 
+gay 
+linkin park 
+search files, youtube and liveleainvasionk 
+invasion 
+imogen heap 
+sopranos 
+sopranos season 1 
+sopranos 
+porn 
+girl masturbating 
+movie 
+into the wild 
+into the wild 
+anthology 
+anthology 
+super anthology 
+home alone 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+opeth 
+david gilmour 
+skateboarding 
+porn 
+tia tanaka 
+ass parade 
+opeth 
+circa survive 
+john mayer-kid a 
+kid a 
+heroes 
+ok computer 
+kinski 
+kinski 
+kinski 
+kinski 
+kinski 
+kinski 
+kinski 
+kinski 
+pocketpc 
+freedom project 
+family guy hdtv 
+family guy 
+family guy s06 
+family guy s06 
+nannie diaries 
+nanny diaries 
+diamond 
+mighty heart 
+import/export 
+import/export 
+operation: kingdom 
+the kingdom deutsch 
+the kingdom ger 
+the kingdom 2007 
+the kingdom 2007 ger 
+keira knightley 
+the kingdom 2007 
+enttarnt 
+abbyy 
+abbyy 
+abbyy 
+abbyy corporate 
+abbyy corporate 
+abbyy 
+rendition 
+porn 
+dp anal 
+crow 
+crow angelica 
+angelica 
+crow 
+crow 
+crow 
+crow 
+trailer 
+trade 
+plas 
+plas 
+piss 
+puke 
+rambo trailer 
+podcast 
+vidcast 
+search files, youtube and liveleak 
+itaù 
+ita 
+teens 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+young 
+search files, youtube 
+search files, youtube 
+search files 
+search files 
+house ita 
+search files 
+traveler 
+heros 
+atb 
+ita 
+puke 
+vomut 
+vomit 
+scat 
+thugsjob 
+babysit 
+thugsjob 
+thugs job 
+thugs 
+il gladiatore in italiano 
+il gladiatore 
+il gladiatore in dvd 
+film il gladiatore 
+kodomo 
+matrix 
+dub 
+downtempo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+leaf 
+aaliyha 
+aaliyah 
+rapsodia romana 
+search files, youtube and liveleak 
+saw 4 
+2pac 
+2pac 
+search files, youtube and liveleak 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nero 
+cum facial 
+cum compilation 
+ea sports 
+fifa manager 
+python tutorial 
+python tutorial 
+python tutorial 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+gay 
+gay 
+gay 
+gay 
+gay 
+diggnation 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+blackadder 
+matrix 
+matrix ita 
+ratatuille 
+ratatouile 
+ratatouile 
+ratatouile 
+ratatouile 
+kent 
+kent ingenting 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+blackadder season 2 episode 2 
+grindhouse 
+ed edd eddy 
+grindhouse 
+ed edd eddy ita 
+sternwanderer 
\83Ù\83Å\81ãÅ\9fã\83\8bãĵãÄ°ã³ã\83\89 
\83ãÂ\83�Â\83Â\94ãĵã\83\9bãĵã³ã\83γÂ\83�Â\83�Ä\9fã\83\91ãÄ´ãĵã\83\89 
+chuck und larry 
+ghostrider 
+catia crack 
+catia 
+heroes 
+home of the brave ita 
+baabrika 
+super demo 
+super 
+timeattack 
+time attack 
+jorma 
+jorma 
+intervideo 
+jorma kaukonen 
+jorma 
+search files, youtube and liveleak 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing 
+simpson 
+simpson 
+sleepy kitten 
+simpson 
+simpson 
+kari tapio 
+kari tapio 
+kari tapio ja ville valo 
+kari tapio 
+simpson 
+simpson 
+simpson 
+simpson 
+kari tapio ville valo 
+pohjantähden 
+because sun 
+simpson 
+simpson 
+la famiglia simpson 
+la famiglia simpson 
+in generale 
+simpson 
+16 stagione dei simpson 
+stagioni dei simpson 
+stagioni simpson 
+i simpson 
+ship simulator 
+search files, youtube and liveleak 
+shakatak 
+nero 
+windows xp corporate 
+ndubz 
+ndubz 
+windows xp pro corporate ita 
+n dubz 
+ghost rider 
+naruto 
+emulatore ps2 
+search files, youtube and liveleak 
+cisor sistor 
+depeche mode 
+sto para 5 
+the pursuit of happyness 
+the pursuit of happyness 
+windvd 
+exdesi 
+enur ft. natasja -calabria 
+johny gaddar 
+enur ft. natasja -calabria 2007 
+enur ft. natasja -calabria 2007 
+50 cent 
+enur 
+vanaja 
+dutch pop 
+dutch porcupine 
+film ita 
+billboard 
+holland porcupine 
+porcupine tree zoetermeer 
+porcupine tree 
+heros 5 
+heroes 5 
+heroes s2e5 
+eating out 
+heroes s2e5 
+heroes s2e2 
+heroes s2e2 
+spiderman 3 ita 
+heroes 
+steven wilson 
+daily show 
+colbert 
+colbert.report.10.22 
+no-man 
+twilight zone 
+i simpsons 
+i simpsons ita 
+i simpsons 
+history channel 
+traveler 
+shoot 'em up (2007) dvdrip xvid.avisearch files, youtube and liveleak 
+shoot 'em up 
+8mm 
+agnes18 
+arab 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sixth sense 
+resident evil 
+ladies tailor 
+300 ita 
+search files, youtube and liveleak 
+albano 
+cinema 
+rock it 
+search files, youtube and liveleak 
+mal enseñado 
+leo mattioli 
+leo mattioli 
+future weapons 
+katie melua 
+carolina 
+reggeto 
+reggeton 
+pearl jam verona 
+ojalai 
+jesica 
+jesica 
+jesica 
+mal enseñado 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+spiderman 
+numbers 
+search files, youtube and liveleak 
+boobs 
+search files, youtube and liveleak 
+muppets 
+muppet 
+300 
+osx mac 
+search files, youtube and liveleak 
+beatles 
+harrison 
+vista 
+antivirus 
+antivirus 
+greg trooper 
+californication 
+webcam 
+nokia 
+baby when the light 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+3d 
+mature 
+crank that 
+utility 
+apologize 
+west stronger 
+*.iso 
+incest 
+*.iso 
+madonna 
+search files, youtube and liveleak 
+naruto 
+naruto 
+nude 
+xbox 360 
+dave berry 
+ros stewart 
+rod stewart 
+family guy 6 
+lost 
+lost 
+heroes 5 
+beppie kortekaas 
+zwartboek 
+paul de leeuw 
+paul de leeuw 
+paul de leeuw 
+kim carson 
+kim carson 
+blowjob compilation 
+blowjob compilation 
+blow job compilation 
+blow job 
+saw 4 
+compilation 
+pedo 
+transormer 
+transformer 
+transformer ita 
+pearl 
+hou 
+cheed 
+coheed 
+sex 2007 
+pearl 
+pearl 
+xxx 2007 
+orgy 2007 
+plain white 
+james blunt 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+musica orientale 
+orientale 
+nicchigni 
+genere 
+traveler 
+mac osx 
+mac osx 
+mac osx 
+mac osx 
+mac osx 
+mac osx 
+xvid 
+six feet under 
+six feet under 2 
+search files, youtube and liveleak 
+mac osx 
+mac osx 
+tribler 
+cold duck complex 
+ahmad 
+xxx 
+pope town 
+smokin 
+search files, youtube and liveleak 
+musa 
+prison break 
+search files, youtube and liveleak 
+jorge falconcantante 
+jorge falconcantante 
+jorge falconcantante 
+private castings 
+woodward 
+casting 
+private castings 
+frisky dingo 
+liz vicious 
+coheed 
+acoustic guitar 
+heroes 
+magic tricks 
+oring 
+jenna jamison 
+jenna jamison 
+dr. dre 
+eminem 
+eminem 
+blowjob 
+fuck 
+kaiji 
+aika 
+little non 
+power of balance 
+1984 
+burst city 
+hikage 
+star club 
+knyacki 
+knyacki 
+sandy mp3 
+sandy 
+partition magic 
+billboard 
+thomas the train 
+.avi 
+alex jones 
+wiso 
+internet tv 
+dj 
+heroes 
+trailer 
+trailer 
+colbert 
+ratatouille 
+search files, youtube and liveleak 
+jessica ross 
+anal 
+pregnant 
+lattex 
+jessica ross 
+elena grimaldi 
+sonia eyes 
+rapes 
+james blunt 
+bizzare 
+bizzare 
+dildo 
+toys 
+hungarian 
+magyar 
+hungary 
+giant 
+xxx 
+anna 
+pink prison 
+prison 
+pov 
+hard 
+bdsm 
+lesbian 
+fist 
+metallica 
+metallica 
+metallica 
+chamical brothers 
+rush hour 3 
+sex 
+priya rai 
+priya rai 
+amy reid 
+lela star 
+jenna haze 
+blowjob 
+rush hour 3 
+ita 
+priolo 
+troia 
+prison break so3e05 
+prison break so3 
+prison break so3 
+search files, youtube and liveleak 
+nero 8 keygen. 
+dvd ita 
+lil wayne 
+lil wayne 
+lil wayne 
+search files, youtube and liveleak 
+lil wayne 
+search files, youtube and liveleak 
+photo 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+nero 8 keygen. 
+nero 8 keygen. 
+nero 8 keygen. 
+birdman 
+nero 8 
+birdman 
+birdman 
+search all filesswinger 
+swinger 
+fisting 
+fisting 
+fisting 
+virtually 
+devil woman 
+painkiller 
+painkiller 
+ricky gervais 
+painkiller 
+painkiller 
+language tribler 
+boogerd 
+asimo 
+motown 
+dance 
+faithless 
+femjoy 
+memory 
+nl subs 300 
+across the universe 
+prison break s3e1 
+prison break s3e2 
+prison break s3e1 
+jiban 
+jiban 
+angel dark 
+angeldark 
+prison break s3e3 
+angel dark 
+dark 
+teagan 
+prison break s3e3 
+sasha grey 
+madness 
+vivid 
+the haunted 
+vivid 
+friends 
+jesse jane 
+keutenberg 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+facial 
+cum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+teagan 
+the who 
+take that 
+tears for fears 
+adam ant 
+housemartins 
+gaisha 
+geisha 
+neverne bebe 
+arrested development 
+rihanna 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+101 80s hits 
+simple 
+incest 
+search files, youtube and liveleak 
+momo 
+madonna 
+saw 4 
+saw 4 
+exdesi 
+search files, youtube and liveleak 
+l'ultimo inquisitore 
+l'ultimo inquisitore 
+ita 
+hairy 
+ita catwoman 
+divx ita 
+bd company 
+nude 
+superbad 
+superbad 
+artickmonkeys 
+articmonkeys 
+swits 
+swits 
+genesis 
+arctic monkeys 
+coda 1.0.5 
+search files, youtube and liveleak 
+babysitters 
+sachin deb burman 
+tribler 
+babysitters 
+babysitters teaches lolita 
+lolita 
+the oc 
+pes 6 
+natale a new york 
+al di là di tutti i limiti 
+lolita 
+chappelle 
+tribler 
+porn 
+carnhy 
+carny 
+full game 
+blades of glory 
+family guy 5 
+family guy 5 
+family guy 5 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+guns and roses 
+biggie smals 
+big 
+notourios big 
+rob zombie 
+lif ather death 
+life ather death 
+shoot em up 
+rob zombie 
+rob zombie live 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+chad baker 
+chat baker 
+yr 
+lolita 
+search files, youtube and liveleak 
+lolita 
+pocket pc 
+pocket pc spiele 
+lolita 
+lolita 
+homemade 
+amateur 
+south park ita 
+elephant man 
+beenie man 
+ita 2007 
+albanien 
+dr house 
+dr. house 
+house.s04 
+house 
+airpcap 
+mario barth 
+james blunt 
+bruce springsteen 
+ratpack 
+nightwish 
+monrose 
+coda 1.0.5 
+mark knopfler 
+rihanna 
+annett louisan 
+annie lennox 
+soundtrack 
+ich + ich 
+bob dylan 
+nelly furtado 
+timbaland 
+david gilmour 
+linkin park 
+jennifer lopez 
+mika 
+benedetti dall signore 
+benedetti 
+benedetti 
+enzo greggio 
+rammstein 
+rostenstolz 
+rosenstolz 
+reinhard mey 
+reinhard may 
+the housemartins 
+katie melua 
+laith al-deen 
+laith al deen 
+vertical horizon 
+simpsons 
+vertical horizon 
+soundtrack 
+roxio 
+nero 
+nl 
+mandriva 
+suse 
+driver magic 
+driver magic 
+privat 
+privat 
+driver magic 
+nackt 
+blonde raped 
+driver magic 
+nackt muschi 
+frau 
+led zepelin 
+led zeppelin 
+i simpson il film 
+auto 
+k 2000 
+haite 
+ginger lynn 
+nackt 
+il padrino 
+hey you pink floyd 
+il padrino il film 
+roxio 
+sex 
+nackt 
+sex 
+homepage 
+altenkessel 
+hey you 
+nashville 
+south park 
+neverne bebe 
+heroes 
+heroes 1x18 ita 
+heroes 1 18 ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+big tit 
+big tit 
+heroes 
+xxx tit 
+heroes 
+mark ronson 
+pink 
+lemon popcycle 
+lemon popsicle 
+pink 
+pink 
+home made gangbang 
+gangbang 
+gangbang 
+gangbang 
+gangbang 
+gangbang 
+gangbang 
+kameleon 
+kameleon schippers 
+nl subs 
+exagerado 
+lolita 
+exagerado - cazuza 
+animusic 
+masturbate 
+madonna 
+small jackets 
+teendreams 
+capitan harlock 
+capitan harlock 
+capitan harlock 
+capitan harlock 
+capitan harlock 
+i simpsons film 
+teendreams 
+i simpsons il film 
+simpsons il film 
+simpsons in italiano 
+i simpsons in italiano 
+ifeelmyself 
+i simpsons 
+i simpsons.italia 
+simpsons.italia 
+simpsons 
+valentino rossi 
+valentinorossi 
+catacomb 
+film.italia 
+film italia 
+abby winters 
+film italia 
+film italia 
+film italia 
+telefilm italia 
+telefilm 
+mac os x leopard 
+search files, youtube and liveleak 
+resident evil 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jennefer lopez 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+j. lopez 
+lopez 
+friends hot mom 
+sufi 
+sufi 
+naughty america 
+big boobs 
+big tits 
+psp 
+fuck tit 
+teachers pet 
+gay 
+gay sex 
+gay hard 
+gay sex movies 
+avatar 
+bending battle 
+movies 
+porn movies 
+gay sex 
+mago de oz 
+ita 2007 spiderman 3 
+spiderman 3 ita 
+cartoni animati 
+cartoni animati 
+camera cafè 
+camera cafè 
+ita 
+camera ita 
+south park ita 
+tropa de elite 
+avatar 
+two for the money 
+recoil sub ambiente 
+recoil sub ambient 
+recoil sub 
+busty dusty 
+sarennalee 
+big clit 
+brother bear 
+brother bear 2 
+weeds 
+hellsing 
+hellsing 
+sex 
+bookworm 
+bookworm bitches 
+bookworm bitches 
+bookworm bitches 
+bookworm 
+girl 
+office 
+30 rock 
+kid nation 
+kid nation 5 
+teen 
+modest mouse 
+search files, youtube and liveleak 
+beatles 
+the pigeon detectives 
+momsonfilm 
+banged 
+the pigeon detectives 
+ally mcbeal 
+barton fink 
+blu notte 
+no country for old men 
+no country for old men 
+no country for old men 
+no country for old men 
+no country for old men 
+beatles 
+beatles 
+beatles 
+megaherz 
+nazis 
+search files, youtube and liveleak 
+weißwurscht 
+weißwurscht 
+weißwurscht 
+beatles 
+*ffice 
+ffice 
+wurscht 
+halloween 
+sexy 
+busty 
+big tits 
+boobs 
+fuck 
+titty 
+titty fuck 
+homemade 
+homemade porn 
+porn 
+porn 
+porn 
+porn 
+porn 
+search files, youtube and liveleak 
+* 
+kaena 
+internet tv 
+middeleeuwen 
+veldslagen 
+battlefield 
+veldslagen 
+veldslagen 
+battlefield 
+bandiera rossa 
+faccetta nera 
+kiss the girl 
+ashley tisdale 
+crazy frog, pop corn 
+crazy frog 
+crazy frog pop corn 
+crazy frog 
+vagabondo 
+io vagabondo che son io 
+vagabondo remix 
+provenzano 
+provenzano dj 
+gabri ponte 
+gabri ponte 
+gabri ponte 
+gabri ponte 
+danny boy 
+girl friend 
+girl friend 
+my old kentucky home 
+my old kentucky home 
+nashville 
+la gazolina 
+chemical brothers 
+la gazolina 
+it il pagliaccio 
+it il pagliaccio 
+it il pagliaccio 
+it il pagliaccio 
+it il pagliaccio 
+gem boy 
+gerrard 
+pavarotti 
+borat 
+full metal jacket 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+renato zero 
+rem 
+pirati dei caraibi 
+pirati dei caraibi 
+pirates of caribbean 
+kaylani lei 
+sabrine maui 
+jdj 
+ocean's 13 
+ocean's 13 
+ocean's 13 
+search files, youtube and liveleak 
+video kings 
+naruto ita 
+brazzers 
+* 
+riosin murphy 
+roisin murphy 
+pirates of caribbean 
+crazy frog 
+leopard 
+back4win 
+fifa 08 crack 
+swe 
+maryha carey 
+naruto dub episode 77 
+preteen 
+xp fast 
+fast 
+phil collins 
+mathieu 
+matthieu 
+azkenax 
+orgy 
+gordon haskell 
+gordon haskell 
+anal 
+love 
+time lover 
+time 
+elton 
+movi 
+movi 
+steve wonder 
+movi 
+resident evil 
+resident evil extinction 
+carlito's way 
+kaede 
+natasha bedingfielde 
+sean kingston 
+fergie 
+duncan dhu 
+jimi blue 
+im lovin 
+im lovin jimi blue 
+im lovin jimi blue 
+jmij blue im lovin 
+saturday night live 
+avril 
+saturday night live 
+vitor e leo 
+vitor e leo 
+vitor e leo 
+vitor e leo 
+u2 
+paul mcartney 
+mango 
+live and let die 
+brazzer 
+brazzer 
+mango 
+mango 
+mango 
+enola gay 
+search all files 
+search all files 
+mango 
+ti amo cosi 
+ti amo cosi 
+ti amo cosi 
+telugu 
+mango 
+mango 
+mango ti amo cosi' 
+mango ti amo cosi' 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tits 
+atb 
+sophie b 
+sophie b howkins 
+prison break ita 
+dschingiskhan 
+nerd porn 
+nerdy porn 
+nerdy porn 
+nerd 
+nerds 
+naked nerds 
+naked nerd 
+anna 
+voyeur - hidden camera 
+220 kg bankdrücken 
+220 kg bankdrücken 
+220 kg bankdrücken 
+220 kg bankdrücken 
+220 kg bankdrücken 
+220 kg bankdrücken 
+220 kg bankdrücken 
+tiger 
+cheetah 
+lions men 
+220 kg bankdrücken 
+bankdrücken 
+rambo 4 trailer 
+search files, youtube and liveleak 
+genesis 
+genesis 
+sting 
+winrar 
+still alive 
+dan le sac 
+desperate housewifes 
+search all files 
+desperate housewifes 
+pc 
+fifa 2005 
+pc fifa 2005 
+desperate housewifes 
+desperate housewifes 
+desperate housewifes 
+full pc games 
+pc gallop racer 
+pc gallop racer 
+pc gallop racer 
+pc gallop racer 
+os x 
+muse 
+produkt key windows xp 
+produkt key windows xp 
+norton ghost 
+my friends hot mom 
+gabilondo 
+gabilondo 24 
+gabilondo 23 
+sailormoon 
+pass the marijuana 
+baker 
+prodigy 
+tits 
+bad brains 
+faithles 
+faithles 
+btra 
+horse 
+horse 
+chemical brothers 
+dog 
+elton 
+chemical brothers 
+elton 
+superbad 
+shoot em up 
+masturbacion 
+firts time 
+firts time 
+firts time 
+firts time 
+firts time 
+portal 
+teens 
+masturbacion 
+lost 
+firts time 
+girls 
+wild 
+girls 
+mp3 
+shakira 
+shak 
+u2 
+hairy 
+tv internet 
+fiction plane 
+bitter forces and lame race horses 
+cs3 
+elo 
+elooffice 
+elo office 
+wiso börse 
+harry potter 
+microsoft office 2007 
+hörbücher 
+schuld und sühne 
+donna leon 
+lluis llach 
+adobe 
+adobe acrobat 
+lluis llach 
+italiensich 
+latein 
+ragtime 
+microsoft office 
+italienisch 
+hörbuch 
+adobe 
+adobe acrobat 
+lluis llach 
+coldplay 
+coldplay 
+saegusa 
+saegusa 
+tropa de elite 
+teen 
+allamvizsga 
+harry potter und der 
+donna leon 
+elo 
+elo digital 
+elo office 
+microsoft office 
+microsoft 
+cecilia bartholy 
+coldplay 
+coldplay 
+coldplay 
+moby 
+moby 
+adobe acrobat 
+adobe photoshoü 
+adobe photoshop 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the alamo 
+the alamo 
+the alamo 
+the alamo 
+turtles 
+purple turtles 
+pedofilian 
+naturist 
+naturist 
+naturist 
+naturist 
+naturist 
+dancing 
+dancing 
+dancing 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+arctic tale 
+2007 
+2007 
+2007 
+2007 
+funny 
+puppy 
+dog 
+eating 
+dragon 
+simpsons 
+simpsons 
+jesus 
+a 
+2007 
+2007 
+1408 
+housewifes 
+lesbian 
+interkey 
+rejected cartoons 
+groove 
+go 
+rejected cartoons 
+rejected cartoons 
+teen 
+lolita 
+pedo 
+preteen 
+logos 
+libronix 
+libronix 
+hebrew 
+greek 
+biblical 
+bart 
+nusamamp 
+nusammap 
+nusammap 
+interkey 
+southpark 
+thumb 
+emperor's new groove 
+tengo thumb 
+tengo 
+tengo 
+tengo 
+tengo thumb 
+search files, youtube and liveleak 
+11 
+ratta 
+rattatouille 
+xmc 
+mxc 
+deadliest catch 
+pthc 
+naughty office 
+scrubs 
+rammstein 
+naughty teachers 
+teachers pet 
+teachers 
+teachers id like to fuck 
+scrubs 
+mashing pumpkins halloween 
+mashing pumpkins hallowe'en 
+mashing pumpkins hallowe'en mashup 
+the bridge at remagen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+eminem 
+james uk band 
+james uk band 
+paul potts 
+modest mouse 
+outkast 
+modest mouse 
+modest mouse 
+modest mouse 
+modest mouse 
+modest mouse 
+modest mouse 
+modest mouse 
+modest mouse 
+psycodrama 
+psychodrama 
+psych odrama 
+psycho drama 
+trillian password recovery 
+instant messenger password recovery 
+how to recover trillian passwords 
+trillian 
+radiohead 
+radiohead 
+radiohead 
+sex 
+naked 
+screensaver 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+program 
+search files, youtube and liveleak 
+radiohead 
+game 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+radiohead live 
+radiohead live 
+stereophonics you gotta go there to come back 
+filmek 
+stereophonics 
+placebo 
+the spinto band 
+radiohead live 
+belle sebastian 
+kings of leon live 
+kings of leon 
+lma manager 
+trillian 
+scarface 
+starcraft 
+dipset 
+radiohead in rainbows 
+radiohead in rainbows 
+me 
+lily thai 
+takedown 
+coldplay 
+coldplay 
+coldplay 
+coldplay 
+coldplay 
+stereophonics 
+mars volta 
+mars volta frances the mute 
+mars volta amputechture 
+mars volta live 
+mars volta ep 
+mars volta live 
+omar rodriguez 
+se dice bisonte no bufalo 
+mars volta deloused 
+mars volta comatorium 
+stereophonics 
+oasis 
+the deadly snakes 
+zbrush 3 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+football manger 2008 
+football mangaer 2008 
+football manager 2008 
+lma manager 2008 
+lma manager 
+meet the twins 
+where the boys 
+all 
+footballl manager 2008 
+ja so warn's 
+utube 
+utube 
+utube 
+utube 
+wmv 
+wmv 720 
+footballl manager 2008 
+rattatoi 
+ratatouille 
+ratatouille[2007]dvdrip[eng]-axxo 
+in the army now 
+ratatouille-axxo 
+ratatouille swe 
+missoni 
+tits 
+footballl manager 2008 
+in the army now status quo 
+sofa 
+breast 
+xbox 360 
+xbox 360 
+figa 
+.iso 
+tittyland 
+ass 
+uk sluts 
+uk amature 
+uk amateur 
+hot fuzz 720p 
+hot fuzz 
+slip 
+hot fuzz dvdr 
+pompini 
+bocchini 
+bryan adams 
+perizoma 
+roberta missoni 
+bad manners 
+transformers 720 
+eva 
+eva enger 
+eva 
+eva enger 
+luana 
+boobs 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+niples 
+carol 
+carol gold 
+starch 
+oil 
+oil 
+clipfischer 
+clipfisher 
+clipfisher 
+ratatouille 
+ratatouille 
+ratatouille swe 
+ratatouille dvd 
+ratatouille dvdr 
+wmv 720 
+transformers 720 
+boston tea 
+boston tea 
+boston 
+boston tea 
+villaricca 
+villaricca kartattak 
+napoli 
+raffaello 
+raffaello 
+gigi d'alessio 
+villaricca kartattak 
+i simpson 
+napoli 
+roma 
+eros ramazzotti 
+raffaello 
+paris hilton 
+my 
+15 anni 
+15 anni 
+15 
+dragon ball 
+raffaello 
+gone baby gone 
+raffaello 
+raffaello 
+prison break complete 
+bitreactor.to 
+www.bitreactor.to 
+"prison break" 
+kein bund fürs leben 
+terminal 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mccartney blackbird 
+the streets 
+beatles-blackbird 
+beatles-blackbird 
+its too late 
+the streets 
+the sims 
+the streets 
+oh no! 
+oh no oh my 
+spinto band 
+search files, youtube and liveleak 
+mars volta de loused 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mars volta amputechture 
+mars volta live 
+mars volta ballroom 
+monrose 
+dj size 
+mars volta 
+blur 
+oasis live 
+oasis live knebworth 
+oasis 
+oasis 
+oasis knebworth 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+omar rodriguez 
+mars volta de loused 
+highlander 
+eminem 
+eminem 
+fütisent 
+fütisent 
+fütisent 
+fütisent 
+fuftisänt 
+eminäm 
+oasis live 
+modest mouse 
+porno 
+bagno 
+hitman 2 
+curtis 
+video 
+curtis 
+n dubz 
+bionda 
+blonde 
+russel peter 
+pigon detectives 
+pijpen 
+lma manager 
+lul 
+football manager 
+bloot 
+lma managr 
+lma manager 
+world racing 2 
+world racing 
+world racing 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+abba 
+fate/stay night 
+fate/stay night 
+fate/stay night 
+fate/stay night, .avi 
+gossip 
+standing in the way of control 
+fate/stay night 
+artic monkeys 
+gossip 
+girls, kiss 
+favourite worst nightmare 
+gossip 
+standing in the way of control 
+the sims 
+sims 
+lucy pinder 
+porn 
+ipod 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+menice too society 
+menice too society 
+hentai 
+cartoon sex 
+football manager 
+medium 
+anita meier 
+anita meijer 
+anita meyer 
+vinyl 
+meier 
+full 
+the simspons 
+simspsons 
+the simpsons movie 
+teen 
+preteen 
+haze 
+aurora 
+snow 
+medium 
+roy 
+roy 
+roy 
+r@y 
+search files, youtube and liveleak 
+xxx 
+brothers sister 
+teen 
+anal 
+vegas 
+model 
+music 
+led zeppelin stairway 
+my name is earl 
+gigolo 
+4400 
+dirt 
+mac os x leopard 
+nader 
+windows xp 
+office 2007 
+sasha train 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kiss 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+k 
+ipartition 
+jericho 
+boston 
+myggan 
+jericho 
+bioshock 
+quake 
+quake 
+ubuntu 7.10 dvd 
+rush hour 3 
+bones 
+bones season 3 
+tv 
+online tv 
+taiwan online tv 
+taiwan tv 
+chinese tv 
+chinese tv 
+wizard of oz 
+wizard of oz 
+rush hour 3 
+ong bak 
+mo5712 
+vdo dayton 
+leopard 
+300 
+leopard 
+leopard 
+leopard 
+batmam 
+search files, youtube and liveleak 
+allie 
+girlfriend 
+sex 
+sex 
+msn 
+msn 
+msn 
+msn 
+girl 
+babe 
+lil troy 
+british 
+lil troy 
+all murray 
+xxx 
+rocco 
+asstraffic 
+xxx 
+teen 
+all murray 
+teenage 
+sex teenage 
+honey 
+thai 
+thai vedio 
+thai vedio 
+thai vedio 
+teenage sex 
+sex 
+zion 
+addison-wesley 
+guys 
+zion 
+wisin 
+daddy yankee 
+search files, youtube and liveleak 
+pet na tupi 
+filmes 
+preteen 
+pedo 
+10yo 
+10yo 
+10yo 
+10yo 
+10yo 
+10yo 
+preteen 
+preteen 
+preteen 
+preteen 
+preteen 
+preteen 
+preteen 
+teen 
+teens 
+kiddie 
+teen 
+alternative 
+porn 
+pron 
+porn 
+sex 
+google 
+google 
+i now pronounce you chuck and larry 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+antwerpen 
+car 
+jazzsinger 
+air crash 
+air crash 
+air crash 
+air crash 
+"left luggage" 
+nadia ali 
+time after time 
+vrt 
+time after time 
+animalsex 
+hotel californa 
+animalsex 
+animalsex 
+hotel california 
+animalsex 
+dog love 
+dog cum 
+suck dog 
+asian porn 
+asian porn 
+extrem sex 
+jenna jameson 
+animal sex 
+24 
+animal cum 
+love cum 
+pompini 
+pompini 
+pompini 
+pompini 
+pompino 
+pompino 
+facial 
+japan 
+asian 
+monday noght raw 22 october 2007 
+suck 
+dvd ita 
+bus 
+train 
+monday noght raw 22 october 2007 
+abuse 
+wwe monday night raw - october 22 2007 
+private 
+big cock 
+animalsex 
+animal sex 
+bestiality 
+dog love 
+dog xxx 
+the simpsons movie 
+dog cum 
+halloween 
+halloween h2o 
+halloween h20 
+halloween resurrection 
+suck dog 
+sex 
+hello 
+akon 
+suck horse 
+the protector - la legge del muay thai 
+sex horse 
+divx ita - the protector 
+video films 
+video films 
+18 
+animal 
+gay brutal 
+gay brutal 
+gay rape 
+rape 
+mature 
+mature xxx 
+little 
+suck cock 
+balls of fury 
+freeporn.to 
+freeporn 
+disney 
+love 
+gay 
+call of duty 
+kloaka 
+call of duty 4 
+cloaca 
+babe 
+call of duty3 
+call of duty 
+young 
+vista 
+girls kiss 
+call of duty pc dvd 
+call of duty pc 
+young 
+colin mc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+xxx italian 
+xxx ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zoofgilia 
+zoofilia 
+zoofilia 
+sex 
+oral gay 
+sperm 
+bukkake 
+bukkake xxx 
+xxx 
+reaping 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asian 
+j 
+polo hofer 
+hofer 
+hofer 
+adult 
+adult 
+cum oral 
+xxx oral 
+xxx 
+ryan dan 
+cazzo 
+gigi 
+gigi 
+uncensored 
+katie melua 
+blowjob 
+babes 
+xxx 
+triber 
+bob dylan 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+abba 
+axxo ultima 
+andre nikatini 
+andre nickatini 
+dre dog 
+andre nickatina 
+abba 
+bookmaker 
+abba 
+becoming jane 
+greys anatomy 
+greys anatomy season 2 
+abba 
+abba 
+abba 
+greys anatomy season 2 
+bones sason 3 
+bones season 3 
+bones season 3 
+bones season 3 
+bones season 3 
+ghost whisperer 
+avatar 
+avatar 307 
+abba 
+abba 
+abba 
+abba 
+sony vaio pcg-6h1m 
+eighteen lust 
+sony vaio drivers 
+cuby en blizzard 
+harry muskee 
+pcg-6h1m 
+pcg-6h1m recover 
+gay 
+blondie 
+search files, youtube and liveleak 
+blondie 
+blondie 
+q65 
+chris rea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+facialabuse 
+clit 
+chris rea 
+tina tunner 
+tina tuner 
+porcupine tree 
+inventor 
+inventor 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+abba 
+golden earing 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+golden earing 
+saw 4 
+eros tazenda 
+eros 
+domo mia 
+negramaro 
+csi ita 
+csi las vegas 7x ita 
+csi las vegas 7x ita 
+ita smallville 6x 
+pro evolution soccer 2008 
+saint 
+silvia saint 
+ps2 ita pro evolution soccer 2008 
+divx ita 
+owen hart 
+froukje de both 
+superpeer 
+wwf 
+froukje de both 
+exes and ohs 
+morandi 
+sassari 
+carbonia 
+cagliari 
+sardegna 
+james brown 
+jenna 
+falcon 
+porn full dvd 
+russian 
+drunk 
+desiree 
+zwartboek 
+deep inside 
+sex 
+dvd taboo 
+polish porn 
+polish sex 
+swedish sex 
+fucking 
+redtube 
+spy 
+de beatles 
+rolling stones 
+rolling stones 
+rolling stones 
+nude 
+divx ita 
+ll coll j 
+ll coll j 
+jennifer lopez 
+avril lavigne 
+el orfanato 
+mana 
+the cats 
+mana 
+cats 
+laura pausini 
+orfanato 
+cats 
+laura pausini 
+cats 
+laura pausini 
+laura pausini 
+laura pausini 
+abba 
+abba 
+luv 
+laura pausini 
+laura pausini 
+laura pausini 
+kaly malou 
+kaly malou 
+katy malou 
+katy malu 
+inventor 
+madonna 
+inventor 
+30 days,of night 2007 
+aarts 
+aarts 
+football manager 2008[multi 9] 
+football manager 2008 [multi 9] 
+football manager 2008 
+adobe 
+moana pozzi 
+autodesk inventor 
+football manager 2008 [multi-9] 
+football manager 2008 
+search all files 
+search all files 
+resident evil 3 
+resident evil 3 ita 
+resident evil 3 
+the simpsons 
+sugababes 
+umbrella 
+vomit 
+diamond 
+hitzone 43 
+hitzone 41 
+hitzone 41 
+tomtom 
+mozart 
+britten 
+hitzone 41 
+teen 
+wii 
+wii mario 
+3 allegri ragazzi morti 
+3 allegri ragazzi morti 
+brian regan 
+wii mario 
+stevie ann 
+stevie ann 
+stevie ann 
+closer to the heart 
+closer to the heart ann 
+closer to the heart 
+damien rice 
+diana krall 
+damien rice 
+john legend 
+avid 
+avid xpress 
+michael clayton 
+search files, youtube and liveleak 
+marielle 
+madonna 
+jan keizer 
+erik huizebosch 
+erik huizebosch 
+erik huizebosch 
+erik huizebosch 
+juanes la vida 
+zodiac 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tomtom6 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+all videos 
+best videos 
+best videos 
+best videos 
+transformers 
+best videos 
+best videos 
+best videos 
+best videos 
+transformers 720p 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zodiac 
+search files, youtube and liveleak 
+720p 
+transformers 720p 
+007 james bond ita 
+007 james bond 
+007 james bond 
+007 james bond 
+transformers 720p 
+007 vendetta privata 
+007 vendetta privata 
+007 vendetta privata 
+007 vendetta privata 
+007 vendetta privata 
+vendetta privata 
+mission impossible 
+mission impossible ita 
+mission impossible ita 
+mission impossible ita 
+mission impossible 
+bebe lilly 
+bebe lilly 
+software ita 
+transformers g1 
+adobe photoshop 
+adobe photoshop 
+donovan 
+donovan 
+donovan 
+jan smid 
+jan smid 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jan smid 
+prison break 
+jan smid 
+jan smid 
+maria sharapova 
+jan smid 
+jan smid 
+idols 
+monica bellucci 
+rod de nijs 
+monica 
+sandra bullock 
+monica bellucci 
+rob de nijs 
+maria sharapova 
+sex 
+maria sharapova nude 
+maria sharapova nude 
+monica bellucci 
+monica bellucci 
+monica bellucci 
+maria sharapova' 
+maria sharapova 
+3 dsexvilla 
+3d sexvilla 
+hazes 
+eric steckel 
+eric steckel 
+eric steckel 
+eric clapton old love 
+cp 
+nuggets 
+cbt 
+cbt 
+behind 
+ef tale 
+cbt 
+grey's 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+santana 
+lennon vs 2pac 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+youtube 
+search files, youtube and liveleak 
+kameleon 
+kameleon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+youtube 
+kruistocht 
+divx full metal jaket 
+divx full metal jaket 
+kruistocht 
+bourne 
+dvd 
+dvd 
+2007 
+2007 
+xx 
+rip 
+clemson maryland 
+paris 
+polaco tiraera 
+polaco 
+polaco 
+polaco 
+don omar 
+good luck chuck 
+good luck chuck 
+vida guerra 
+vida guerra 
+vida guerra 
+ho voglia di te 
+modà 
+pbs 
+north korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fun 
+the bourn ultimatum 
+die hard 
+die hard ita 
+the bourn ultimatum ita 
+residen evil ita 
+spiderman 3 dvd ita 
+spiderman 3 dvd ita 
+spiderman 3 dvd ita 
+dire straits 
+exe 
+nikki anderson 
+federica guttuso 
+guttuso 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+atb-in love with the dj 
+umbrella 
+wiedźmin 
+witcher 
+heartbreak 
+jericho 
+jericho 1 
+linux 
+search files, youtube and liveleak 
+heroes 1 
+heroes so1 
+heroes s01 
+search files, youtube and liveleak 
+agepe 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+em 
+axo 
+rus 
+search files 
+search video files 
+search fims 
+search films 
+search films xxx 
+search manga 
+search manga/hentai 
+search anime 
+search embrace of the vampire 
+search files, youtube and liveleak 
+search all 
+search all movies 
+search paris hilton 
+search sextapes 
+search alyssa milano 
+search star wars 
+grey's anatomy 
+saxon 
+bondage 
+saxon 
+saxon 
+bondage spanked 
+bondage 
+liveleak 
+mr brooks 
+jenna jameson 
+porntube 
+tom grant 
+tom grant 
+tom grant 
+porntube 
+porntube 
+sting 
+porntube 
+porntube 
+porntube 
+xxx 
+eva cassidy 
+garage band 
+sarenna lee 
+sarenna lee 
+sarenna lee 
+sarenna lee 
+julia ann 
+julia ann 
+julia ann 
+julia ann 
+julia ann 
+ava lustra 
+ava lustra 
+ava lustra 
+ava lustra 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+sting 
+bill evans 
+julia ann 
+4 non blond 
+4 non blond 
+4 non blonds 
+4 non blonds 
+pthc 
+gilmore girls 
+gilmore girls - season 4 episode 16 
+girls 
+yo 
+10yo 
+nablot 
+smoke 
+incest 
+peachez 
+natasha 
+estonia 
+young 
+peachez 
+prison break 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+campeones hacia el mundial 
+abby 
+endig crono trigger 
+preteen 
+junge 
+omanko 
+ashampoo 
+sammy 
+vst 
+nuendo 
+720p 
+ps2 ita 
+isreal kamakawiwo`ole 
+prostreet 
+fifa 2008 
+ps2 
+romero 
+ps2 
+over the rainbow 
+jimy hendrix 
+zoo 
+animal sex 
+bakers dozen 
+mp3 
+alektra blue 
+carmella bing 
+pov 
+slut 
+irisch 
+mp3 
+the shadows 
+facial 
+katsumi 
+flowing tide 
+nashville 
+i am easy 
+nashville, i am easy 
+nashville, i am easy 
+carradine, nashville, i am easy 
+carradine, nashville, i am easy 
+carradine, nashville, i am easy 
+nashville, i am easy 
+nashville, i am easy 
+the book of secrets 
+the book of secrets 
+the book of secrets 
+the book of secrets 
+genesis, supper's ready 
+the book of secrets 
+the beatles 
+lesbian 
+jenna jameson 
+crossroads of the celts 
+wild mountain thyme 
+the visit 
+belladonna 
+taryn thomas 
+the mask and mirror 
+service animals 
+fantastic celtic harp dreams 
+rough guide 
+strap attack 
+parallel dreams 
+strap attack 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tomb raider anniversary 
+tomb raider anniversary saves 
+trani 
+simona ventura 
+nanni moretti 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+ich + ich 
+nickelback 
+maria mena 
+plain white t`s 
+avril lavigne 
+sex 
+tomtom per cellulare 
+tomtom per cellulare 
+tomtom 
+caro diario 
+e2 
+eros ramazzotti 2007 
+sugababes 
+colbie caillat 
+slumber party 
+ocean's 11 
+maria mena 
+plain white t`s 
+plain white t`s 
+plain white 
+blow me sandwich 
+animal sex 
+pron 
+weezer 
+weezer 
+weezer 
+weezer 
+weezer 
+weezer 
+weezer 
+weeze 
+weezer 
+weezer 
+weezer 
+weathus 
+blink 182 
+blink 182 
+blink 182 
+blink 182 
+blink 182 
+blink 182 
+blink182 
+blink182 
+blink182 
+blink182 
+blink182 
+blink 182 
+blink 182 
+enema of the state 
+the cut 
+the cut love 
+love 
+sonic temple 
+weathus 
+the weathus 
+the wheatus 
+the wheatus 
+wheatus 
+goo goo dolls 
+goo goo dolls january friend 
+goo goo dolls 
+hard 
+film hard 
+film hard 
+film porno 
+scary movie 4 ita 
+ratatoille 
+simson 
+gta san andreas 
+gimpl 
+boerenkapel 
+gimpl 
+office 
+medicopter 
+ps2 game 
+nou toe maar 
+barletta 
+search files, youtube and liveleak 
+ramsey kitchen 
+ruvo di puglia 
+nico 
+dido 
+net ff anders 
+bari 
+feest kapel 
+stargate atlantis 04 
+feestkapel 
+noutoemaar 
+stargate atlantis 04 5 
+dvs 
+dvs katwijk 
+captain 
+captain general 
+concordia leiden 
+exelcior delft 
+frank boeyen groep 
+concour sciedam 
+frank boeien 
+concours sciedam 
+love letter 
+party 
+amateur 
+couple 
+sex tape 
+april 
+linux 
+nokia 
+nokia 770 
+nokia 800 
+maemo 
+mathland 
+disney magic 
+disney cartoon 
+hidden 
+security 
+system of a down 
+paradijs 
+system of a down steal this album 
+golden ratio 
+"unsquare dance" 
+"unsqueare dance" 
+"brubeck" 
+security cam 
+security cam 
+ajax 
+freeware 
+linux 
+tribler 
+the oc 
+cute 
+security cam 
+amateur 
+girlfriend 
+1408 
+1408 
+1408 
+search files, youtube and liveleak 
+saw 4 
+saw 3 
+groupen 
+group 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cocaina 
+cocaina 
+zombi 
+la caccia 
+la caccia 
+pc 
+missione parigi 
+sluts 
+pc 
+missione parigi 
+missione parigi 
+missione parigi 
+missione parigi 
+ratatouille 
+cemento armato 
+cemento armato 
+cemento armato 
+ratatouille 
+io non sono quì 
+in questo mondo libero 
+distretto di polizia 7 
+distretto di polizia 7 
+dutchman 
+dutchman 
+dutchman 
+quel treno per juma 
+quel treno per yuma 
+not going out 
+ugly betty 
+osx 
+suburban shootout 
+voyeur 
+hidden 
+"donna summer" 
+"donna summer" 
+asian 
+gallactica 3 season 
+college 
+erotic 
+chinese 
+porno 
+baccara yes sir 
+baccara yes sir 
+mickey mouse 
+sesamstraat 
+ernst en bobbie 
+deep purple 
+katie 
+route66 
+vista 
+sigur 
+hostel2 
+burzum 
+tibet 
+himalaya 
+himalaya 
+himalaya 
+voetbal 
+nepal mustang 
+nepal 
+tibet 
+tibet 
+kaylani 
+leanni 
+asian 
+squirt 
+new trolls 
+search files, youtube and liveleak 
+eyes wide shut 
+erotic 
+clit 
+stay alive 
+spiderman 
+stay alive dvd ita 
+eyes wide 
+chained 
+chained 
+chained 
+chained 
+sexy busty teen 
+chained heat 
+sexy busty teen 
+busty teen 
+2061 
+2061 
+2061 
+cubase 
+cubase 
+busty teen hardcore 
+leopard 
+maria de filippi 
+maria de filippi 
+maria de filippi 
+barlettai 
+barletta 
+barletta 
+barletta 
+virgin 
+first time 
+virgin anal 
+speculum 
+gyno 
+search files, youtube and liveleak 
+trondheim 
+cytherea flower 
+cytherea 
+painful 
+korn 
+breast 
+dream theatre 
+dream theatre once in a livetime 
+sim city rush hour 
+dream theatre 
+sim city rush hour 
+hoop dreams 
+dream theatre 
+dream theatre 
+dream theatre 
+rush hour 
+rush hour 
+rush hour 
+rush hour 
+sim city 
+sim city 
+lost 
+beatles 
+teen 
+beatles 
+painful 
+virgin 
+korn 
+farkas 
+the ninth gate 
+eng227-compaq 
+edwars 
+edward 
+scissor 
+shy love 
+call of duty 
+call of duty pc 
+pc game spongebob diner dash 
+ann poll 
+pc game spongebob diner dash 
+pc game spongebob diner dash 
+spongebob diner dash 
+brianna banks 
+spongebob diner dash 
+spongebob diner dash 
+trackmania 
+trackmania 
+trackmania 
+tomtom6 
+trackmania 
+trackmania 
+trackmania 
+trackmania 
+tomtom6 
+dvd-ita 
+dvx 
+dvd-ita 
+jewel foolish games 
+nero7 
+futurama 
+stay alive 
+stay 
+stay alive 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+big fish 
+arthur pieter 
+nazarian arthur 
+adolfo ny salsa 
+pthc 
+kienhuis 
+jan boers 
+boris titulaer 
+boris keep in touch 
+film 
+boris things 
+cat stevens 
+cat stevens 
+boris 
+boris 
+boris 
+boris 
+cat stevens 
+search files, youtube and liveleak 
+turkse meid 
+turkse meid 
+kasbergen 
+turkse chick 
+search files, youtube and liveleak 
+prison break 2x14 ita 
+the messengers 
+norton 
+norton ghost 
+sim city 4 
+party 
+kimberly holland 
+paris hilton 
+party hardcore 
+sim city 4 
+turistas ita 
+horror ita 
+the ring 
+the ring 3 
+invasion 
+smallville 
+smallville 
+madonna 
+acdc 
+ac/dc 
+acdc 
+acdc 
+acdc 
+youtube acdc 
+youtube acdc 
+youtube acdc 
+youtube acdc 
+big fish 
+death proof 
+invasion ita 
+after dark 
+divx ita 
+corel draw 
+die ermordung von jesse 
+import-export 
+import-export 
+28 weeks later 
+28 weeks later ger 
+compilation 
+compilation 
+compilation 
+the messengers ger 
+24 season 
+anime 
+accross the universe 
+across the universe 
+oblivion 
+spears 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fugawi 
+fugawi 
+big fish 
+search files, youtube and liveleak 
+animal sex 
+big fish 
+nick 
+zoey 101 
+zoey 101 
+animal with animal sex 
+paris hilton 
+12 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+14 yr sex 
+15 yr sex 
+16 yr sex 
+17 yr sex 
+17 years old sex 
+10 years old sex 
+11 years old sex 
+12 years old sex 
+13 years old sex 
+divx italia 
+divx ita 
+search files, youtube and liveleak 
+corel draw 
+reason 
+salsa 
+salsa 
+drunken sex orgy 
+reegeton 
+fun 
+gauge 
+jogos moratis 
+jogos mortais 
+jogos mortais 4 
+gavioes da fiel 1976 
+gavioes da fiel 
+gavioes da fiel 
+music 
+the kingdom 
+resident evil 
+hindi movie 
+hindi movie 
+sextet 
+heroes s02e05 
+tivo 
+seria 
+heroes s02e05 
+heroes.s02e05 
+technotronic 
+i cesaroni 
+rebeldes 
+español en nueva york 
+search files, youtube and liveleak 
+the cure 
+video 
+totti 
+search files, youtube and liveleak 
+.mkv 
+mkv 
+pompini 
+sesso 
+paprica 
+paprica 
+hentai 
+pompini 
+pompini 
+breakfast at tiffany 
+sesso 
+sesso 
+moana 
+moana pozzi 
+ard 
+block party 
+the cureculture club 
+culture club 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+lily thai 
+anal 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+hentai 
+scopate 
+scopate 
+reegeton 
+pompini 
+camasntra 
+camasntra 
+reegeton 
+reegeton 
+ratat 
+merenge 
+merenge 
+salsa 
+rock 
+dady yanky 
+dady yanky 
+don omar 
+don omar 
+winsi y yandel 
+american gangster 
+we own the night 
+michael clayton 
+3:10 
+dragon wars 
+squirt 
+filipino 
+filipina 
+mexican 
+latina 
+tengo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kiss kiss chris brown 
+good life 
+the office season 1 
+good life 
+baby bash cyclone 
+crank that 
+search files, youtube and liveleak 
+asian 
+alicia keys 
+desi 
+desi 
+black lesbian 
+black lesbian 
+black lesbian 
+black lesbian 
+ebony lesbian 
+lesbian 
+ebony 
+black 
+leopard 
+chearleader 
+chearleader 
+chearleader 
+the guardian 
+search files, youtube and liveleak 
+superman 
+titanic 
+transformer 
+èï½²æ\88\92 
+èï½² æ\88\92 
+tube 
\8f�¢é\87\91å\88\9a 
+prisonbreaker 
+prisonbreak 
+lost 
+èï½² 
+èï½² 
+stock 
+sex 
+sex 
+avatar 09 
+avatar 10 
+chingo bling 
+perreo 
+mark knopfler 
+bbw 
+la cubanita 
+labyrinth 
+labyrinth piece of time 
+la cubanita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jamie cullum 
+cars divx ita 
+blacksonblondes 
+whiskey in the jar 
+defrag 
+windows xp professional 
+micheal clayton 
+linux.for.dumimes 
+linux.for.dummies 
+linux.for.dummies 
+bowie 
+the rasmus 
+reason 
+tiesto 
+till sky falls down 
+tiesto 
+fm 08 
+endgame 
+selen millenium 
+selen 
+beppe grillo 
+seduced 
+selen 
+selen 
+selen 
+selen 
+selen 
+selen 
+selen 
+antonella del lago 
+antonella del lago 
+883 
+my friends hot mom 
+umberto tozzi 
+my friends hot mom 
+my friends hot mom 
+[divx ita] pathfinder 
+russian rape 
+windows xp professional 
+ring tones 
+windows xp professional 
+final cut 
+fadobe 
+[divx ita] italiano 
+blond 
+sister 
+nl 
+teacher sex 2007 
+teach me sex 2007 
+teacher sex 2007 
+teacher sex 2006 
+teacher 2007 
+sex teacher 2007 
+sexteacher 2007 
+andrea bocelli 
+crash 
+air crash 
+[divx ita] film 
+[divx ita] coeleone 
+[divx ita] corleone 
+young milfs 
+young milfs 
+milfs 
+[divx ita] 300 
+porno 
+diskeeper 
+dahlya grey 
+andrew blake 
+pc games 
+pc games ita 
+teen 
+teen anal 
+hot friends 
+sisters hot friends 
+my sisters hot friends 
+sisters hot friends 
+anal virgin 
+naughty america 
+lucy becker 
+krystal steal 
+
+latvia 
+search files, youtube and liveleak 
+teen 
+anal teen 
+search files, youtube and liveleak 
+diskeeper 
+teen 
+julie strain 
+misty mundae 
+julie silver 
+anal 
+anal virgin 
+lolita 
+misty mundae 
+julie strain 
+penthouse 
+penthouse 
+penthouse 
+stolen tape 
+pet 
+amiga 
+amigaos 
+windows xp professional 
+aiab 
+aiab emulator 
+amiga forever 
+dutch 
+amiga forever 
+amiga forever 
+amiga forever 
+aiab 
+driver magic 
+omnipage 
+omnipage 
+omnipage 
+winuae 
+nhl 
+amiga emulator 
+tickling 
+elizabeth ita 
+games 
+call of duty 4 
+need for speed pro street 
+modern warfare 
+smokin on hay 
+hay 
+matrix 
+salma hayek 
+salma hayek.avi 
+salma hayek 
+booty talk 
+jada fire 
+ilse delange 
+ferrari 
+dr phil 
+big tits 
+search files, youtube and liveleak 
+kjjklyuil 
+key gen nero 8 
+preteen 
+pthc 
+nickelback far away 
+nickelback 
+search files, youtube and liveleak 
+anal 
+pthc 
+pthc 
+lee sarenna 
+lee sarenna 
+pthc 
+limp bizkit 
+linux game 
+pthc 
+gigi d'alessio 
+porn 
+soldier of fortune 
+xcom 
+rgdgdfg 
+íỲ르ëà¸\98 
+korean slut 
+korean sex 
+a la cart csi 
+windows xp professional 
+darryn brown 
+simpsons 
+antivirus 
+big fish 
+search files, youtube and liveleak 
+my name is earl 
+my name is earl ita 
+my name is earl 
+orgasm 
+sybian 
+camera caffè 
+orgasm 
+babyj 
+babyshivid 
+io sono leggenda 
+pthc 
+michael clayton 
+preteen 
+stay alive 
+mimi 
+mariah carey 
+level 42 
+californication 
+californication 
+a chi mi dice 
+a chi mi dice 
+a chi mi dice 
+a chi mi dice 
+windows xp professional 
+windows xp professional 
+psp 
+metalica 
+metalica 
+metalica 
+metalica 
+metalica 
+metalica 
+simon and garafunkel 
+simon 
+fdhfgdghdgh 
+simon 
+simon 
+simon 
+simon and garfunkel 
+pompino 
+pompino 
+blowjob 
+porn 
+bush 
+baby bash 
+wait for you 
+wait for you yamine 
+wait for you yamin 
+no one alicia 
+flo rida 
+flo rida low 
+until the end of time 
+justin timberlake 
+justin timberlake beyonce 
+justin timberlake until 
+like this 
+mims 
+like this 
+hey baby ashanti 
+gay porno 
+hypnotized 
+gay porno 
+gay porno 
+gay 
+shawty is a ten 
+walk it out 
+the office season 1 
+walk it out 
+dj unk 
+big poppa 
+nitorious big 
+blaque 
+poop 
+meow 
+britney spears 
+flledwood mac 
+fleedwood mac 
+fleedwood mac 
+deep purple 
+matlab 
+storage 
+desi 
+desi 
+desi 
+adobe 
+discography 
+mon amour 
+shrek terzo 
+shrek terzo 
+peter vries 
+family guy 
+prison break 
+storage 
+ciara 
+mac os x 
+lee sarenna 
+jenna jameson 
+jenna jameson 
+vba 
+hybrid 
+horror 
+clips 
+virus 
+ppc 
+nokia 
+excel tools 
+clips 
+clips 
+excel 
+50 cent 
+computeridee 
+computer idee 
+50 cent 
+computer 
+idee 
+adobe 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+timberland 
+timberland 
+timberland 
+timberland 
+timberland 
+timberland 
+timbaland 
+timbaland 
+timbaland 
+timbaland 
+timberland 
+timberland 
+timberland 
+timberland 
+tommy lee 
+tommy lee 
+tommy lee 
+tommy lee 
+convoy 
+games 
+games 
+games pc 
+games pc 
+1704 
+habitacion 1704 
+habitacion 1408 
+habitacion 1408 
+habitacion 1408 
+1408 
+zimmer1408 
+zimmer 1408 
+tiger woods 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+chills and fever 
+kiss prince 
+kiss tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+beatles 
+yobs 
+bbc 
+dicograohy 
+dicography 
+golden ratio 
+3:10 
+osx86 
+osx86 10.5 
+osx86 
+leopard 
+rap 
+osx86 
+tengo 
+crysis 
+the wire 
+porno 
+amateur 
+publicinvasion 
+you say party we say die! 
+bloc party 
+modest mouse 
+teddybears 
+sugarettes 
+the sugarettes 
+menomena 
+menomena 
+john motta 
+josh hawkins 
+cazzo 
+ratatouille 
+ratatoille 
+ratatoille 
+ratatoille 
+porno 
+guns of navarone 
+greethings from athena 
+firmin 
+die hard 
+die hard 
+die hard 
+58 minuti per morire 
+58 minuti per morire 
+die hard 2 
+molto incinta 
+mortal combat 
+dottor house 
+porno 
+xxx 
+dragon ball 
+sailor monn 
+sailor mon 
+sailor moon 
+cartoon 
+.jpg 
+ragazza 
+paris hilton 
+adult 
+molto incinta 
+adult 
+dragon 
+bruce lee 
+ragazza 
+girl 
+dog 
+horse 
+horse 
+hause 
+hause 
+home 
+taxi 
+taxxi 
+sailor moon 
+ranma 
+dragon ball 
+city hunter 
+top gear 
+ncis 
+ncis 5 
+csi 8 
+csi 6 
+bones 3 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+thailiand 
+tiesto 
+coldplay 
+danny krivit 
+joe claussell 
+ian pooley 
+tiesto 
+dennis fantina 
+jameson megapack 
+jameson 
+monstertitten 
+monster tits 
+monster boobs 
+riesentitten 
+tits 
+busty 
+search files, youtube and liveleak 
+ufo 
+ufo afterlight 
+windows xp 64 nl 
+vsp 
+microsoft vista 
+films 
+films ita 
+salieri 
+pecorina 
+grazie zia 
+mortal kombat 
+mortal kombat ita 
+mortal kombat 
+mortal kombat 
+elizabeth 
+directx 10 di crysis 
+divx 
+marie antoniette 
+marie 
+marie antoinette 
+divx ita 
+saw iv 
+saw iv ita 
+gigi 
+corel 
+becali 
+dvd2one2 
+dvd2one2 
+dvd2one 
+becali 
+golden ratio 
+lost season 4 
+dvdfab 
+dvdfab 
+old boy 
+sex scandals 
+mms 
+dps 
+delhi public school 
+delhi public school 
+mms 
+dvdfabù 
+dvdfab 
+leopard osx 
+search files, youtube and liveleak 
+ramazzotti 
+ramazzotti 
+adobe 
+adobe premiere elements 
+adobe premiere elements it 
+gigi d'alessio 
+gigi 
+d'alessio 
+avatar 10 
+my name is earl 
+heroes 
+search files, youtube and liveleak 
+heroes s2 e5 
+heroes s2 e5 
+heroes s2 e5 
+* 
+* 
+wwe 
+heroes s2 e5 
+heroes s2 e5 
+heroes s2 e5 
+heroes s2e5 
+heroes 
+jericho 
+nikita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+copying beethoven 
+the painted veil 
+sex 
+sex 
+sex 
+sex horse 
+sex horse 
+sex horse 
+sex horse 
+sex 
+sex 
+knights and merchants 
+elizabeth 
+knights and merchants 
+knights and merchants 
+elizabeth the gold age 
+karaoke 
+karaoke 
+karaoke 
+gta 
+adobe premiere elements 3 it 
+saint 
+molto incinta 
+ratatoville 
+negroamaro la finestra 
+negroamaro la finestra 
+negroamaro la finestra 
+negroamaro la finestra 
+negroamaro la finestra 
+julia ann 
+julia ann 
+julia ann 
+vivid pictures 
+quebase 
+the beatles 
+robot 
+mr.bean 
+mr.bean 
+crocodile 
+merkel 
+vista 
+300 
+cake 
+bee movie 
+beatles 
+faithless 
+august rush 
+27 dresses 
+27 dresses 
+27 dresses 
+27 dresses 
+nrc next 
+nrc 
+bros push 
+bros push 
+bros 
+search files, youtube and liveleak 
+wow 
+erlang 
+vista 
+the invasion 
+trible 
+tribler 
+waxing 
+wax 
+dutch 
+nl sub 
+nl sub 
+chemistry 
+chemistry 
+chemistry 
+chemistry 
+nl 
+chemistry 
+windows xp 
+windows xp german 
+feet 
+longhorn 
+pregnant 
+the reaping 
+swingger 
+swinggers 
+swing 
+vista rtm 
+swinger 
+vista rtm 
+waitress 
+maid 
+outlands instances 
+windows millenium 
+windows mediacenter german 
+office xp deutsch 
+office xp german 
+windows xp 2003 german 
+windows xp 2003 
+search files, youtube and liveleak 
+sex 
+windows 2000 german 
+antony santos 
+linux 
+mother 
+daughter 
+tribler 
+mvcd german 
+eva 
+eva 
+2007 
+transformers german 
+linux 
+office xp german 
+deutsch 
+harry potter deutsch 
+search files, youtube and liveleak 
+gigi finizio 
+deutsch 
+daniele stefani 
+hoerbuch 
+nudism 
+naturism 
+pedo 
+lolita 
+nude 
+jordi savall 
+hoerspiel 
+blowjob 
+tindo 
+uninstall tool 2.2.1 
+virus 
+virus 2008 
+wallpaper 
+firewall 
+cavemen 
+browser 
+ho voglia di te 
+c# 3.0 nutshell 
+explorer 
+c# 3.0 nutshell 
+c# 3.0 nutshell 
+c# 3.0 nutshell 
+c# 3.0 nutshell 
+secure 
+windows german 
+evan almighty 
+big ones 
+secure 
+max secure 
+music and lyrics 
+security 
+windows 98 
+windows aio german 
+windows aio 
+windows ultimate 
+windows xp ultimate 
+windows xp 2008 
+windows 2008 
+aio 
+microsoft 2000 
+microsoft 2001 
+microsoft 2002 
+microsoft 2003 
+window server german 
+windows server german 
+windows server deutsch 
+santana 
+windows server 2008 
+il dolce e amaro 
+windows 2003 
+dolce e amaro 
+windows lite 
+wow 
+windows xp sp3 
+crazy xp 
+crystal xp 
+microsoft 2004 
+windows xp bill gates 
+windows xp 2006 
+windows xp premium 
+windows xp 2007 
+windows xp 2008 
+windows longhorn 
+search files, youtube and liveleak 
+vista home basic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dr. j 
+islam 
+.iso 
+windows iso 
+pro basketball 
+wwe - cyber sunday 
+eva cassidy 
+lost 
+perdidos 
+lost 
+perdidos 
+lost 
+windows power 
+xp power 
+u2 
+u2 
+xp experience 
+microsoft 2008 
+mac os 
+mac os windows 
+mac os theme xp 
+islam 
+scrubd 
+scrubs 
+leopard 
+330xd 
+330xd 
+ircommoncom 
+mocom 
+vista 
+leopard 
+leopard 
+leopard 
+leopard 
+leopard 
+ibm 
+thinkfree office 
+versiontracker 
+versiontracker 
+versiontrackerpro 
+nero 
+nero 
+vista 
+http://www.mininova.org/get/898328 
+world inferno friendship society 
+golden ratio 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+family guy 
+two and a half men 
+two and a half men 
+red green show 
+kacey 
+amy 
+amy shine 
+amy shine 
+tom jones 
+tom jones 
+search files, youtube and liveleak 
+millennium season 3 
+millennium s3 
+millennium s3 
+millennium s3 
+millennium s3 
+millennium s3 
+robot chiken 
+robot chicken 
+search files, youtube and liveleak 
+la leggenda 
+search files, youtube and liveleak 
+inconvenient truth 
+pursuit 
+unlock 
+games 
+dj tiesto 
+ibiza sex 
+ibiza sex 
+taxxi 
+taxi 
+taxxi 
+fat 
+fat 
+i need love 
+emmanuel 
+salieri 
+mario salieri 
+jessica 
+divx 
+games 
+games pc 
+top 20 
+ceasar 
+ceasar 
+power dvd 
+stone 
+basic 
+ufo 
+ufo afther 
+ufo after 
+power dvd 
+power dvd pc 
+prison.break 
+prison.break 
+prison.break s3 
+prison.break 
+prison.break 
+prison.break 
+prison.break 
+open season 
+letters from iwo jima 
+prison.break 
+jamaican 
+jamaican porn 
+search files, youtube and liveleak 
+porn 
+coochie 
+prison.break 
+sex 
+sex 
+pr0n 
+the eagles 
+pr0n 
+prison.break s3 
+suite cs3 
+suite cs3 
+
+
+? 
+bonzo dog 
+bonzo dog 
+search file 
+search files 
+
+
+
+
+teen 
+teen 
+teen 
+natasha 
+chip taylor 
+chip taylor 
+chip taylor 
+ann wilson 
+ann wilson 
+ann wilson 
+googlebox 
+heroes 
+search files, youtube and liveleak 
+
+ramazzotti 
+teen 
+melua 
+liefde 
+blof 
+amatoriale 
+ramazzotti e2 
+.avi 
+avi 
+leopard 
+alex jones 
+waking life 
+level 42 
+iso buster 
+acronis 
+blade runner 
+treu image nl 
+backup 
+arab 
+el greco 
+unlock cell 
+blackberry 
+discogarphy 
+discography 
+sex 
+xxx 
+pichunter 
+pichunter 
+amateurs 
+nas live 
+λεβενÏÃ\8e·ÏÂ\83 
+λεÏï¾\8eµÎ½ÏÃ\8e·ÏÂ\83 
+λεβενÏÃ\8e·ÏÂ\83 
+λεβενÏÃ\8e·ÏÂ\83 
+commedie 
+commedie de filppo 
+commedie de filppo dvd 
+horse sex 
+avrilm lavigne 
+linux 
+avrilm lavigne 
+avril lavigne 
+ghost 
+ghost dutch 
+dutch 
+unit 
+negramaro 
+spooks 
+tazenda 
+search files, youtube and liveleak 
+movie 
+heroes s02ep06 
+renault k4m 
+k4m 
+halo 2 
+gem boy 
+roxio media creator 
+roxio easy media creator 
+roxio easy media creator 10 
+gothic 3 
+antivirgear 3.8 
+antivirgear 3.8 
+antivirgear 3.8 
+the sims 2 pets 
+michael jakson 
+michael jackson 
+michael jackson 
+dragon moon 
+crazy german 
+dragon xxx 
+ugly betty 
+heroes 
+ass workship 
+gothic 3 
+pets 
+acdc 
+olivia mojica 
+renault 
+marija zabjelo 
+logan 
+dr. house 
+burnout 
+ray davies 
+prison break 
+need for speed 
+xp 
+logan 
+need for speed pro street 
+wow scolded 
+adobe 
+xxx 
+lucio dalla 
+prsin break 
+prison break 
+embrace of the vampire 
+k4m 
+justin timberlake 
+justin timberlake 
+justin timberlake 
+roxio 
+golden heart 
+golden heart 
+new york 
+new york 
+cake 
+faithless 
+foo fighters 
+richard cheese 
+porn 
+prisons breake 
+carny effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+top 100 
+heroes 
+xxx 
+lio 
+lion 
+xxx 
+licht verstandelijk gehandicapten 
+renault 
+licht verstandelijk beperkten 
+teens 
+mentally retarded 
+lolitas 
+conspiracy 
+gta vice city 
+soundtrack vice city 
+vice city 
+soundtrack 
+eva cassidy 
+heroes s02 
+heroes 1 
+heroes s01 
+heroes.s02e01 
+heroes.s02e0 
+heroes.s02e0 
+heroes.s02e0 
+heroes s02 xor 
+momoi 
+ned. 
+ned 
+ondert 
+horro 
+bure 
+neighb 
+voy 
+strip 
+invasion 
+eros 
+vasco la compagnia 
+the simpsons 
+cedega 
+search files, youtube and liveleak 
+jericho 
+kamelot 
+sonata arctica 
+metallica 
+du hast 
+captivity 
+realplayer plus 
+realplayer 
+k4m 
+realplayerplus 
+shakespeare 
+kitesnow 
+ass 
+moglie 
+moglie 
+j 
+kamelot 
+march of mephisto 
+at the end of paradise 
+edge of paradise 
+marija zabjelo 
+marija zabjelo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bleach 145 
+sonata arctica 
+rammstein 
+pink floyd 
+saw 4 
+30 days of night 
+nanni moretti 
+nanni moretti 
+taxy driver 
+american gangsta 
+30 days of night 
+ich wil 
+ich will 
+ich wil 
+dean martin 
+tribute 
+de andrè 
+de gregori 
+de andrè 
+fabrizio de andrè 
+de gregori 
+rino gaetano 
+de andrè 
+vecchioni 
+ita 
+italiano 
+porn 
+moana 
+moana 
+ligabue 
+vecchioni 
+franco battiato 
+roberto vecchioni 
+finardi 
+finardi 
+finardi 
+hendrix 
+the doors 
+power flower 
+roberto cecchioni 
+roberto vecchioni 
+guccini 
+adobe premiere 
+adobe 
+sims 2 patch 
+sims2 patch 
+sims 2 patch 
+roberto vecchioni 
+sebastian 
+rise against 
+cakifornication 
+californication 
+rise against 
+fifa manager 08 cracks 
+naruto 
+rise against 
+rise against 
+tropa de elite 
+aatje 
+arab 
+east is east 
+pirates of the carribean at worlds end 
+pirates of the carribean at worlds end 
+test 
+ita 
+california ita 
+. 
+.exe 
+access97 
+cars 
+ita manga 
+ita house 
+ita dr.house 
+ita dr. house 
+lush life 
+jenna jameson 
+ita 
+ita divx 
+ita divx 
+ita divx 
+bresek 
+breserk 
+breserk ita 
+breserk ita divx 
+ita berserk 
+spiderman 3 
+spiderman 3 pcdvd 
+spiderman 3 pcdvd 
+pc dvd 
+christy canyon 
+christycanyon 
+christy_canyon 
+christy 
+christy canyon 
+christy canyon 
+k4m 
+logan 
+renault clio 
+renault clio 
+renault clio 
+cracks 
+wp 
+driver 
+ring tones 
+renault clio 
+foot soldier 
+divx 2007 ita 
+xxx 
+xxx 
+foto 
+winx 
+winx 
+winx ita 
+culo 
+culo 
+salieri 
+search files, youtube and liveleak 
+die hard 
+s 
+sms 
+sms 
+mango 
+mango 
+sms sotto mentite spoglie 
+mango 
+porno 
+window media player ita 11 
+window media player 11 
+dragon 
+hypnotize 
+karol 
+papa woitila 
+papa woitila 
+karol woitila 
+karol woitila 
+ppa karol woitila 
+fucili 
+fellini 
+papa kerol woitila 
+papa kerol woitila 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bondage 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hacked 
+hacked wireless network 
+hacking wireless network for dummies 
+onion 
+tst 
+test 
+onion 
+.avi 
+sassari 
+dejavu ita 
+dejavu dvd ita 
+hair ita divx 
+jesus christ superstar ita divx 
+jesus ita divx 
+number 23 dvd ita 
+jesus ita 
+manga ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+italian 
+prison brake 
+prison brake 
+prison brake 
+mojica 
+prison brake 
+nelly furtado 
+tazenda 
+tazenda loneros 
+tazenda loneros domo mia 
+tazenda loneros domo mia 
+blue line 
+thin blue line 
+xbox 
+marley 
+bob marley 
+bob marley 
+bob dylan 
+p 
+search files, youtube and liveleak 
+bob dylan 
+bob marley 
+bourne 
+catania 
+martian child 
+avast 
+taxus 
+punto croce 
+punto croce 
+punto croce 
+punto croce 
+punto croce 
+trucchi punto croce 
+trucchi ricamo 
+punto croce perfetto 
+disegni punto croce 
+pooh 
+sisoft 
+sisoft 
+tokio hotel milano 
+playboy 
+playboy november 2007 
+prison brake 
+prison break 
+blood rayne 
+blood rayne 
+blood rayne 
+ryandan 
+prison.break 
+prison.break 
+top gear 
+saw 4 
+marley 
+kate nash 
+mature 
+hey you 
+hey you 
+hey you 
+amateur 
+ratatouille 
+paola turci 
+ratatouille ita 
+ratatouille ita 
+ratatouille ita 
+ratatouille 
+ratatouille 
+ratatouille ita 
+ratatouille 
+jennifer lopez 
+top gear 
+stockholm 
+je 't aime 
+je t aime 
+je t aime 
+je t aime 
+je t aime 
+je t aime 
+je t' aime 
+je 't aime 
+je 't aime 
+je 't aime 
+french 
+french dvd 
+cramberries 
+cramberries 
+cramberries 
+cramberries 
+cramberries mp3 
+zombies mp3 
+zombies mp3 
+zombies mp3 
+mp3 
+u2 
+2061 
+die hard 
+die hard 
+die hard ita 
+die hard ita 
+allmusic 
+all music 
+fergi big girls dont cry 
+om shanti om 
+dhan dhana dhan goal 
+pes 2008 
+moto gp 2007 
+die hard 3 ita 
+football manager 2008 
+softperfect 
+weeds 
+weeds s01e01 
+flashget 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+yandel 
+monsoon 
+monsoon 
+monsoon 
+monsoon 
+tokio hotel 
+la terza madre 
+la terza madre 
+the bourne ultimatum 
+the bourne ultimatum ita 
+il caso thomas crawford 
+xxx 
+michael clayton 
+michael clayton ita 
+tideland il mondo capovolto 
+kamael.rar 
+naruto folge 1 part 1/1 
+naruto folge.1 part 1/1 
+naruto episode 1 part 1/1 
+naruto episode 1 part 1/1 
+naruto episode 1 part 1/1 
+naruto episode 1 part 1/1 deutsch 
+naruto episode 1 part 1/1 deutsch 
+naruto folge 1 part 1/1 deutsch 
+naruto folge 1 part 1/1 deutsch 
+naruto folge 1 
+kamasutra 
+kamasutra 
+pro tools 
+kamasutra 
+nana 37 
+nana 37 
+nana 37 
+nana 37 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 
+nana 36 
+nana ita 
+nana ita 
+high school 
+nana 
+nana 
+nana 
+nana 
+spy 
+aria 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the office 
+the practice 
+ludicris 
+ludacris 
+ludacris 
+2pac 
+search files, youtube and liveleak 
+divx ita 
+camera cafe ita 
+camera cafe ita 
+pozzetto ita 
+essential 
+essential 
+guide 
+one u2 
+pes 2008 
+pes 2008 xbox 360 
+colin mcrea 
+wrc 
+xbox 360 
+xbox360 
+xbox360 
+dogma ita 
+dogma 
+hot fuzz 
+horse 
+pthc 
+heroes s02e01 
+heroes s02e02 
+heroes s02e01 
+heroes s02e03 
+little lupe 
+search files, youtube and liveleak 
+80 
+ferry korsten 
+tiesto 
+fucking 
+fuckin horses 
+guns n'roses 
+little lupe 
+animal sex 
+pthc moscow 
+hussyfan 
+movies 
+south park 
+kruistocht 
+avril lavigne 
+laura fuck 
+teen fuck 
+wii 
+lost 
+lost 
+movie 
+linux 
+alexandra oneil 
+alexandra oneil 
+critisize 
+bad ass 
+alexander oneal 
+horoor movies 
+horror movies 
+alexander o'neal 
+beatles 
+las vegas 
+montecito 
+reddwarf 
+search files, youtube and liveleak 
+tpain 
+comics 
+desparate 
+zusje 
+avril lavigne 
+avril lavigne 
+harry potter 
+lost 
+run, fat boy, run 
+run fat boy run 
+pj 
+girls gone wild 
+ratatoulle 
+disney 
+sex 
+globo 
+blonde 
+carmen electra 
+hannah montanna 
+hannah montana 
+pictures 
+the who 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+csi.s08e05 
+csi 8 
+csi 8 
+autocad 2007 
+csi 8 
+csi 8 
+csi 8 
+mystery jets 
+josh hawkins 
+josh hawkins 
+il capo dei capi 
+search files, youtube and liveleak 
+avast 
+windowsxp 
+windows xp 
+50cent 
+sex tape 
+eagles 
+ita 
+pregnant 
+lact 
+search files, youtube and liveleak 
+denise richards 
+os windows xp 
+windows vista 
+piss 
+naruto 
+football manager 2008 
+cubase 
+asia 
+southpark 11 
+southpark 1111 
+gaber shampoo 
+gaber shampo 
+king kong 
+gaber morale 
+dodge charger 
+dodge charger r/t 
+toyota 
+toyota 
+land cruiser 
+mtv 2008 
+mtv 2008 
+mtv 2007 
+hit 2007 
+anime ita 
+hagigawa 
+hagigawa 
+asian 
+asian 
+xxx 
+xxx 
+xxx 
+noa 
+noa i dont now 
+noa i don't now 
+noa i don't now 
+noa i don't now 
+noa 
+noa 
+noa 
+noa 
+don't now 
+i don't now 
+noa 
+cubase 
+cubase 
+windows 98 
+windows 2000 professional 
+kate nash 
+u2 
+miracle drug 
+pro evalutio soccer 
+pro evalutio 
+navision 
+navision 
+navision 
+navision 
+pro evalutio 
+pro evalutio 
+tildes birojs 2005 
+tildes birojs 2005 
+tildes birojs 2005 
+microsoft office 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+winzip 
+winrar 
+voyeur 
+american gangster 
+wic 
+juelz santana-from me to u 
+season1 
+hentai 
+tildes birojs 
+tildes birojs.lv 
+juelz santana 
+nylonic 
+the wire 
+cubase 
+emiry 
+felicity 
+tenn 
+tenen 
+teen 
+nudist 
+reno 911 
+jamie cullum 
+bossa nova 
+songbook 
+songbook 
+songbook 
+jazz guitar 
+jazz guitar 
+jazzguitar 
+jazzguitar 
+jazz 
+trade 2007 
+trade 
+northern soul 
+soul 
+mowtown 
+heroes s02e03 
+heroes s02e03 mkv 
+northern soul 
+ratatouille 
+la famiglia robinson 
+heroes s02e05 mkv 
+heroes s02e06 mkv 
+heroes s02e07 mkv 
+heroes s02e05 mkv 
+50cent 
+50cent 
+50cent 
+rihanna 
+nudist 
+danilewicz 
+akon 
+naturist 
+nudist 
+nudist photo 
+nudist photo 
+nudist photo 
+nudist photo 
+xxx photo 
+xxx 
+vid 
+xx 
+xx 
+fr 
+britney spears 
+iv rp reaktywacja 
+jars of clay 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+police 
+jars of clay 
+search all files 
+tildes birojs.lv 
+microsoft 
+level 42 
+pulp fiction ita 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+heroes 
+heroes.s02e01 
+heroes s02e01 
+pro evolution 
+spanking 
+abba 
+spanked 
+chelentano 
+tchelentano 
+celentano 
+spanking 
+xbox360pes 2008 
+pes 2008 xbox360 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+metallica 
+torrent tracker 
+tracker 
+armageddon 
+turks fruit 
+nerderlandse film 
+first time 
+search files, youtube and liveleak 
+xxx 
+perl 
+search files, youtube and liveleak 
+dvd shrink 
+perl 
+victoria 
+huge 
+microsoft 
+shrek 3 nl 
+shrek 3 
+shrek 
+kurt angle 
+firewall 
+book 
+phrack 
+tuneup 
+roberto 
+wigan casino 
+xxx 
+sex 
+toto cutugno 
+james blunt 
+sex 
+ubutu 
+ubuntu 
+search files, youtube and liveleak 
+madonna 
+discostars80 
+discostars80 
+discostars80 
+rock 80 
+disco 80 
+erotic 
+erotic 
+erotic 
+comidy 
+comidy 
+army of darkness 
+south park 
+amos milburn 
+wwe 
+spiel 
+control 
+hilton 
+easyart 
+sexy 
+search files, youtube and liveleak 
+bbc 
+michael palin 
+bubble girls 
+aria giovanni 
+shoko goto 
+sora aoi 
+hentai 
+new age 
+sora aoi 
+shoko goto 
+marina matsushima 
+g spot express 
+saori yamamoto 
+karunesh 
+la terza madre 
+la terza madre ita 
+adobe 
+a come andromeda 
+la terza madre ita 
+kapitein rob 
+kapitein rob 
+kapitein rob 
+candice night 
+candice night 
+atonement 
+k3 en de kattenprins 
+timboektoe 
+il capo dei capi 
+fleedwood mac 
+tit 
+bones 
+german 
+abbywinters 
+puntate prison break seconda serie 
+diskeeper 
+prison break seconda serie 
+deutsch 
+musik 
+subway 
+abby winters 
+bond 
+blue 
+ubuntu 
+paul 
+girlycast 
+iwork 
+28 
+heroes 
+heroes 5 
+spiegel 
+stardust 
+air sexy boy 
+air sexy boy mp3 
+dutch 
+mobile 
+search files, youtube and liveleak 
+fonky family 
+search files, youtube and liveleak 
+anatomy 
+anatomy hunted 
+anatomy s04 
+anatomy s04 e05 
+heroes s01 
+heroes s01 e04 
+die hard 4 
+susteren 
+budweiser 
+budweiser toad 
+nero for linux 
+nba 
+niki nova 
+adult 
+bang bus 
+train wreck 
+ita 
+dark night of the scarecrow 
+battle star galactica 
+memory 
+memory 
+audio books 
+self help 
+japanese 20 
+monster cock 
+shrek axxo 
+divx for mac 
+divx for mac 
+asia 
+melita 
+lolit 
+lol 
+xxx 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+evan 
+surfs 
+dragon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+eros ramazoti 
+eros ramazot 
+ramazot 
+harry potter 
+real butts 
+dirty latina 
+latina maid 
+dirty latina 
+tom 
+hilda 
+yuma 
+incest 
+incest 
+incest 
+daa converter 
+daa 
+converter 
+uif 
+kim possible 
+gay 
+hig scool 
+hig scool musical 
+hig scool musical 
+hig scool musical 
+hig scool musical 
+hig scool musical 
+hig 
+high school 
+high school musical 
+wigan casino 
+emmanuel 
+caniba 
+torture 
+peelers 
+dropkick murphy 
+the mahones 
+the mahones 
+web 
+irish 
+weeds 
+brotherhood 
+brotherhood s02e03 
+the mahones 
+the mahones 
+the mahones 
+the mahones 
+the mahones 
+larkin 
+larkin brigade 
+larkin brigade 
+flogging molly 
+flogging molly 
+floggin molly 
+anal 
+troia 
+tits 
+diktrom 
+irish 
+the guardian 
+the guardian 
+canibaal 
+peelers 
+ped 
+hentai 
+leopard 
+search files, youtube and liveleak 
+maximo park 
+ratatuille 
+pinkfloid 
+bruce 
+myrna goossen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+berlusconi 
+team fortress 
+wat zien ik 
+avs4you 
+avs4you 
+avs4you 
+video clips musica 
+linda de mol 
+search files, youtube and liveleak 
+zorán 
+vetar 
+silvia millecam 
+vuc vetar 
+samardic 
+surda 
+surda 
+surda 
+avs4you 
+avs4you 
+avs4you 
+avs4you 
+avs4you 
+avs4you 
+verbaan 
+mecano 
+gorginaverbaan 
+g.verbaan 
+buurvrouw 
+nassirya 
+nassiryia 
+nassiryia 
+avs4 
+avs4 
+avs4 keygen 
+a chi mi dice 
+blue 
+paul panzer 
+backstreet boys 
+trance voice 
+trance voice 
+trance voice 
+trance voice 
+vigilante 8 
+anna tatangelo 
+hentai 
+tatangelo anna 
+tatangelo anna 
+il padrino 
+virtual cd 
+nero 8 
+anime 
+winzip 
+virtual 
+nero 
+nero 8 
+country 
+sherlock holmes arsene lupin 
+sherlock holmes 
+vistaxp theme 
+moke 
+search files, youtube and liveleak 
+country 
+avatar 10 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+axxo 
+galactica 
+czdab 
+therbathia 
+bridge to terabithia 
+ultimatum 
+.mp3 
+mp3 
+avi 
+italian 
+galactica 
+bourne 
+bourne italian 
+avi 
+hot 
+mp3 
+italian 
+jericho 
+vista 
+windows 
+juiced 
+pc 
+pc 
+japanese 
+dark night 
+steamroller 
+best of 
+superfriends 
+justice league 
+chitsujo 
+nihon 
+cfnm 
+petter p 
+petter album p 
+petter albums 
+petter albums 
+petter albums 
+petter 
+arcade fire 
+movie 
+petter 
+mp3 
+mp3 musik 
+mp3 albums 
+mp3 albums 
+mp3 albums 
+mp3 
+mp3 petter 
+mp3 petter 
+mp3 petter 
+mp3 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+porn 
+porn 
+nihon 
+chisai 
+chinpo 
+chitsujo 
+musik 
+moodyz 
+cocoa 
+tomtom navigator 
+cocoa's room 
+naked mile 
+cocozella 
+small penis 
+small dick 
+small penis humiliation 
+tiny dick 
+tiny pecker 
+beirut 
+la terza madre 
+la terza madre 
+penis humiliation 
+nihon 
+cicciolina 
+nihon penis 
+cicciolina 
+nude male contest 
+cicciolina 
+body painting 
+resident evil 
+la terza madre 
+brigitta bulgari 
+pillow book 
+vomit 
+jonn holmes 
+jonn holmes 
+jonn holmes 
+un impresa da dio 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+stardus 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+rocco sifredi 
+rocco sifreddi 
+rocco sifreddi 
+prison break 
+prison break fr 
+need for speed 
+girls 
+rio de janeiro 
+need for speed 
+need for speed 
+radiohead 
+comptia 
+bdsm 
+navigator 
+stylexp 
+the unit 
+sunshine 
+bourne ultimatum 
+pthc boy 
+pthc boy 
+pthc boy 
+boy pedo 
+unit 
+playboy 
+ocean's 13 
+ocean's thirteen 
+ocean's 13 
+talk to me 
+talk to me movie 
+american gangster 
+f.e.a.r 
+fear 
+stardust 
+mistress 
+bdsm 
+grateful dead 
+iron man 
+game mp3 
+wiked game 
+isac game 
+welcome jungle 
+2008 
+movie 
+welcome jungle mp3 
+business 
+http://tracker.zerotracker.com:2710/announce 
+leopard 
+timboektoe 
+aquarium 
+charmane 
+charmane 
+aquarium 
+fakebook 
+transformers 
+osx 
+waris 
+raaz 
+v10 
+bdsm 
+bdsm 
+hindi 
+bdsm 
+fallen 
+men 
+www.sumotorrents 
+gay 
+www.sumotorrents.com 
+www.sumotorrents.com 
+mitti wajan mardi 
+guys gone wild 
+hkhd 
+leopard 
+hindi movies 
+the craft 
+indian movies 
+x-men 
+bhool bhuliyaa 
+indian 
+tan lines 
+tan lines 
+tan lines movie 
+the seeker the dark is rising 
+punjabi 
+porn 
+gay porn 
+the seeker the dark is rising 
+tomtom benelux 
+mistress 
+porn 
+cum 
+the craft 
+locoroco 
+tunstall 
+high school musical 
+video converter 
+trojan remover 
+alcohol 120 
+spyware doctor 
+search files, youtube and liveleak 
+psv 
+anatomy s04 e06 
+brave one 
+prison.break 
+will&grace 
+the bourne ultimatum 
+the bourne ultimatum ita 
+will&grace s1 
+atb 
+bean 
+bean 
+bean 
+mrs. bean 
+comedy 
+sean cody 
+atc 
+bruce 
+bruce 
+bruce 
+denzal 
+denzal 
+denzal 
+dooh 
+aha 
+bruce 
+david 
+bruce springsteen 
+bruce springsteen 
+bruce springsteen 
+bruce springsteen 
+gung ho 
+bruce springsteen 
+atb 
+at 
+bruce springsteen 
+live 
+gung ho movie 
+atb 
+gung ho 1986 
+gung ho michael keaton 
+music 
+atb 
+you tube 
+you tube 
+you tube 
+you tube 
+you tube 
+you tube 
+you tube 
+potter 
+nes roms 
+heroes 
+nes roms 
+mr. brooks 
+mr books 
+ps2 
+mr brooks 
+talk to me 
+tgtsoft 
+flyboys 
+biffy clyro 
+biffy clyro 
+naughty office 
+stream tv 
+tv 
+search files, youtube and liveleak 
+stream tv 
+stream 
+clayton ita 
+super tv 
+stream tv 
+a 
+.mp3 
+bobbe melle 
+bobbe malle 
+piossasco 
+jenna 
+aria giovanni 
+vasco rossi 
+stardust 
+ita 
+bourne 
+bourne ultimatum ita 
+star dust 
+pro street 
+everest 
+crystal 
+rossz pc játékok 
+search files, youtube and liveleak 
+windows media center 
+windows media center 
+windows media center 
+windows media center 
+pdf 
+maya 
+maya tutorial 
+nature 
+lake 
+media center 
+media center 
+pc booster 
+nick cave 
+aeonflux 
+jazz guitar 
+comic 
+solsuite 
+solsuite 
+solsuite 
+hitman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+comic xxx 
+gianni morandi 
+criminal minds 3 
+criminal minds 3 
+criminal minds 3 
+stylexp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vista transformation pack 
+zelig circus 2007 
+zelig circus 2007 
+zelig circus 2007 
+britney 
+final cut studio 2 
+windowblinds 
+hörspiel 
+teen 
+lisa sparx 
+lisa sparxxx 
+vincenzo salemme 
+dilli dalli 
+vincenzo salemme 
+vincenzo 
+vincenzo salemme 
+mango 
+eminem 
+fabri fibra 
+ramazzotti 
+sotto mentite spoglie 
+gigi d'alessio 
+alessio 
+sotto mentite spoglie 
+napoletana 
+high school musical 2 
+high school musical 
+avril lavigne 
+eminem 
+dragon ball 
+die hard 4 
+antikiller2 rusù 
+antikiller2 rusù 
+antikiller2 rus 
+concour schiedam 
+concours schiedam 
+exelcios delft 
+exelcior delft 
+exelcior delft 
+showkorps exelcior delft 
+door vriendschap sterk 
+happy feet 
+dvs 
+dvs katwijk 
+adest 
+mars muziek 
+polka's 
+trumpet party 2007 
+trumpetparty 2007 
+handy 
+mobile 
+mobile 
+mobile 
+mobile 
+search files, youtube and liveleak 
+seal 
+enemy territory quake wars 
+10cc 
+heroes 
+search files, youtube and liveleak 
+asian schoolgirls 
+harry potter 
+star wars 
+krieg der sterne 
+debbie 
+chinese 
+thai 
+hussyfan 
+chikan 
+chikan 
+ebony 
+wonderwoman 
+fandox 
+bald 
+the who 
+supertramp 
+pink floyd 
+clockwork orange 
+jurrasic park 
+porno italiani 
+porno italiani 
+porno italiani 
+porno italiani 
+porno italiani 
+porno 
+tom tom 
+fight club 
+wonderwoman 
+star 
+completo 
+search files, youtube and liveleak 
+eye candy 
+24 season 
+alien skin 
+alien 
+cinderella 
+latv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+latv* 
+latviski 
+boy 
+boy 
+http://www.youtube.com/watch?v=gdzn7k5rzlq&eurl=http://musicclip.web-log.nl/mijn_weblog/videoclips_n/index.html 
+http://www.youtube.com/watch?v=gdzn7k5rzlq&eurl=http://musicclip.web-log.nl/mijn_weblog/videoclips_n/index.html 
+http://www.youtube.com/watch?v=gdzn7k5rzlq&eurl=http://musicclip.web-log.nl/mijn_weblog/videoclips_n/index.html 
+rfid 
+search files, youtube and liveleak 
+youtube 
+youtube 
+youtube 
+search files, youtube and liveleak 
+youtube blip.tv 
+nightwish - amaranth 
+search files, youtube and liveleak 
+ufo 
+ufo 
+stargate atlantis 
+youtube 
+blip.tv 
+youtube 
+youtube 
+bombalurina 
+massage 
+polski 
+thai porn 
+traveler 
+search files, youtube and liveleak 
+adobe photoshop cs3 
+search files, youtube and liveleak 
+axxo 
+www.blip.tv 
+pj 
+french 
+autocad 
+microstation 
+bentley 
+bentley microstation 
+galatsin 
+russian porn 
+trade 2007 
+discografia gigi d'alessio 
+weeds french 
+french 
+dmx 
+dmx 
+french 
+french 
+hebrew 
+mom 
+mom 
+mom 
+mom 
+mom son 
+mom boy 
+teen boy 
+bleisch 
+moshav 
+beatles revolver 
+beatles 
+seeed 
+french 
+collection 
+pink floyd 
+jethro tull 
+ska 
+bombalurina 
+media center 
+russian dvdrip 
+russian dvdrip 
+godzilla 
+godzilla 
+godzilla 
+gta liberty city 
+udelej se 
+media center 
+high school musical 
+rus 
+media center 
+gangsters 
+american gangsters 
+stalker 
+the bourne supremacy 
+the bourne identity 3 
+the bourne 3 
+the bourne 3 ita 
+the bourne 3 ita 
+the bourne 3 
+funeral party 
+funeral party 
+funeral party 
+windows media center 
+tera copy pro crack 
+tera copy pro 
+tera copy pro 
+modern talking 
+silent hill dvd 
+windows media center 
+kiss 
+shandi 
+windows media center 
+la terza madre ita 
+la terza madre ita 
+film porno 
+film porno 
+film porno 
+justice 
+hard tech 
+hardtech 
+una settimana da dio 
+una settimana da dio 
+un giorno da dio 
+funeral party 
+hardtech 
+hardtech 
+electro 
+una impresa da dio 
+un'impresa da dio 
+una impresa da dio 
+funeral party 
+un impresa da dio 
+corel 
+nl subs 
+nl subs 
+cico 
+chico 
+italian 
+greek 
+pc game 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+pc game 
+300 
+morther 
+mother 
+mrs 
+vasco rossi 
+aunt 
+renegade 
+ratatuille 
+renegade pc 
+soldier of fortune 
+soldier of fortune pc 
+mom 
+vista ultimate 64 
+avg anti virus 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+harry potter 
+mother 
+aunt 
+andre hazes 
+son 
+george micheal 
+pau de eeuw 
+paul de eeuw 
+mom son 
+paul de leeuw 
+mom son 
+mom son 
+incest 
+javelin 
+german inzest 
+grey's anatomy 
+grey's anatomy ita 
+grignani 
+german 
+divx ita 
+county 
+country 
+csi 
+livelak 
+mom 
+amatoriale 
+car 
+pc game 
+glorious 
+alles is liefde 
+mortyr pc 
+mortyr 
+pc 
+soldier of fortune 
+soldier of fortune pc 
+24 season 4 
+call of duty 
+call of duty pc 
+search files, youtube and liveleak 
+call of duty 3 pc 
+uber soldier 
+leonie van veen 
+nl subs 
+"norton 360" 
+"norton 360" 
+norton 
+das konklave 
+go west 
+go west 
+go west 
+grazie 
+gianna nannini 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+orgies 
+el pecado 
+el pecado 
+esorcista 
+esorcista 
+asia argento 
+zucchero 
+castellano 
+castellano 
+espanol 
+la terza madre 
+piratas 
+la terza madre 
+amor 
+gaucho 
+gaucho 
+folclore 
+tango 
+trade 
+cars 
+suom 
+cars 
+osx 
+crysis 
+leopard 
+amatoriale 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amatoriale 
+amatoriale ita 
+sesso amatoriale ita 
+rocco selen 
+hard pissing 
+rush 
+magic iso 
+search files, youtube and liveleak 
+petites annonces 
+goa 
+mature 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lesbian 
+sci-fi 
+salieri 
+roccaforte 
+salieri 
+salieri 
+spb pocket plus 
+porno 
+bitdefender 
+kissing 
+bioshock 
+mature 
+pet sounds 
+abbey road 
+rush hour 3 
+oceans 13 
+milk 
+shell 
+psp ware 
+search files, youtube and liveleak 
+xxx 
+nude 
+media manager pro for psp 
+surfs up 
+surfs up dutch 
+surfs up 
+heroes s01e01 
+osx 
+search files, youtube and liveleak 
+children of bodom 
+children of bodom 
+sono come tu mi vuoi 
+irene grandi 
+videoart 
+seinfeld 
+bee 
+war 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stardust 
+rush hour 
+3:10 
+the assassination 
+simpsons 
+resident 
+transformers 
+deep purple 
+deep purple 
+glamazon 
+school 
+home 
+strapon 
+school 
+strapon 
+femdom 
+glamazon 
+femdom 
+lift 
+carry 
+glamazon 
+lift 
+femdom 
+bunny 
+thai 
+pron twins 
+porn twins 
+search files, youtube and liveleak 
+denise 
+denise 
+animal 
+country 
+rar password cracker 
+thinstall 
+cheerleader 
+database 
+cheerleader 
+britny spears 
+britney spears 
+machine fuck 
+forced sex 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jazz 
+smooth 
+level 
+miller 
+wooten 
+lennon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+corel 
+adobe 
+ezgenerator 
+jazz 
+jazz 
+search files, youtube and liveleak 
+harry potter 
+harry potter dutch 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game 
+harry potter game 
+harry potter nld 
+harry potter nld 
+harry potter nld 
+harry potter nld 
+harry potter nld 
+battiato 
+ita 
+eve 
+kim kard 
+pam anderson 
+mase 
+t.i. 
+architettura 
+casa 
+architettura 
+ok computer 
+dragon ball z all episodes 
+dragon ball z 
+orgie 
+xxx 
+bisex 
+handynummer 
+adresse 
+privat 
+langeoog 
+puff 
+reeperbahn 
+kiez 
+rotlicht 
+pervers 
+kontakt 
+bordell 
+megasex 
+massenfick 
+prono 
+porno 
+mega xxx 
+kungfu 
+search all files 
+groupsex 
+teen 
+architettura 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+wu zu 
+architettura 
+architettura 
+architettura 
+architettura 
+matematica 
+film 
+ingegneria 
+legge 109 
+legge 109 
+dublado 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+porno 
+warden 
+ilsa 
+klb 
+gal costa 
+musica 
+search files, youtube and liveleak 
+japan 
+data warehouse 
+data warehouse 
+apple 
+data warehouse 
+business object 
+calsical 
+calssical 
+calssical 
+x japan 
+x japan 
+x japan 
+x japan 
+jessica alba 
+jessica alba 
+jessica biel 
+vicky 
+vicky 
+hussyfan 
+kingpass 
+kingpass 
+carmen electra 
+shakira 
+alizze 
+die hard ita 
+hidalgo 
+hidalgo 
+ratatouille 
+ratatouille ita 
+wallpapers 
+bondage 
+search files, youtube and liveleak 
+glamazon 
+glamazon 
+glamazon 
+glamazon 
+home 
+school 
+strapon school 
+bunny 
+glamazon 
+searcgh 
+tribler 
+tribler 
+test 
+tribler 
+i the legend 
+search files, youtube and liveleak 
+legend 
+country 
+dexter 
+tribler 
+convert daa 
+kingdom hearts ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+puscifer 
+ho voglia di te 
+katie melua 
+liefde 
+v is for vagina 
+pino palladino 
+tribler 
+amiga 
+tribler 
+ggg 
+superman 
+tribler 
+tutorial 
+search files, youtube and liveleak 
+maria bethanea 
+maria bethanea 
+ggg 
+guns in roses 
+ggg 
+ggg 
+ggg 
+maria rita 
+elton jhon 
+katie mailua 
+katie melua 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+siffredi 
+tribler 
+search files, youtube and liveleak 
+jessi 
+tribler 
+led zeppelin 
+led zeppelin 
+biel 
+alba 
+rar passord cracker 4.12 keygen 
+rar passord cracker 4.12 keygen 
+rar password cracker 4.12 keygen 
+rar password cracker 4.12 
+brian adams 
+rar password cracker keygen 
+rar password cracker keygen 
+rar password cracker keygen 
+rar password cracker keygen 
+rar password cracker 
+teen 
+led zeppelin 
+antonio albanese 
+antonio albanese 
+ceto 
+laqualunque 
+run fat boy run 
+9nv9ydyfodq 
+alba 
+age of empire 
+age of empire iii mac os 
+age of empire iii mac 
+age of empire iii mac 
+search files, youtube and liveleak 
+8 1/2 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+age of empire iii 
+age of empire iii dmg 
+k3 
+superbad 
+britnay spares album 
+britney spears album 
+3.10 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sexy love 
+high school musical 
+ahead nero 
+christine lavine 
+christine lavin 
+bhool bhulaiya 
+superbad 
+google tech talks 
+perl 
+search files, youtube and liveleak 
+beal 
+jessica 
+italia 
+ita 
+simply 3d 
+oops 
+webcam girl 
+bunny 
+bunny 
+bunny 
+glamazon 
+lift 
+lift 
+lift 
+bunny 
+glamazon 
+software 
+programs 
+program 
+picture it 
+picture 
+bunny glamazon 
+skalls 
+skall 
+naked girls 
+porn 
+naked chicks 
+naked chick 
+naked 
+amfest 
+bunny 
+lift and carry 
+samen met andre 
+samen met andre 
+samen met andre 
+collective soul's "reunion." 
+collective soul 
+collective soul 
+heroes 7 
+glamazon 
+pokerbot 
+atlantis 
+atlantis - der versunkene kontinent 
+atlantis - der versunkene kontinent 
+bunny glamazon 
+lift and carry 
+lift and carry 
+lucy lee 
+lucy lee 
+mazinger 
+mazinger español 
+nydahl 
+legend 
+nydahl 
+acordeon 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+mixed wrestling 
+pedos 
+pedos 
+pedos 
+pedos 
+pedo 
+pedos 
+anal 
+rossi 
+rihanna umbrella 
+beionce 
+beyonce 
+moana 
+pozzi 
+pozzi 
+pedos 
+pedofilia 
+pedofilia 
+pedoporno 
+xxx 
+anal 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cum 
+pedo 
+pedos 
+babes 
+thai 
+thailand 
+thailand 
+china 
+thai 
+thai 
+thai creampie 
+yes 
+thailand 
+thailand 
+thailand 
+thailand 
+thailand 
+thai 16 
+xp ita 
+vasta ita 
+vista ita 
+pedos babes 
+babes 12 
+anal 
+anal cum 
+anal 
+battisti 
+eva enghen 
+eva engen 
+eva enger 
+science 
+fiction 
+japanese 
+español 
+español 
+chinese 
+thai 
+amazon 
+amazon amanda 
+tribler 
+amanda 
+blithe 
+cuba 
+537 cuba 
+titanic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cuba 
+537 cuba 
+amazon 
+galactica 
+dragon ball z episodes 
+usher 
+lion king 
+kim holland 
+amazon lift 
+animal sex 
+goddess 
+amazon woman 
+shoulder lift 
+caryy 
+carry 
+bollywood nude 
+monica carry 
+monika lift 
+bollywood naked 
+bollywood naked 
+bollywood naked 
+bollywood nude 
+bollywood nude 
+bollywood nude 
+bollywood nude 
+hollywood nude 
+monica lift 
+hollywood nude 
+lifting 
+spongebob 
+spongebob 
+soundgarden 
+disney 
+heroes.s02 
+heroes 
+descent 
+ponyplay 
+grudge 
+hidalgo 
+nomadi 
+gruselkabinett 
+freischütz 
+superbad 
+superbad 
+ponyplay 
+nomadi 
+schwarze auge 
+dsa 
+dsa 
+auge 
+dsa 
+hustler 
+playboy 
+hustler 
+powerpoint 
+larry brent 
+dämonenkiller 
+textur 
+porn 
+rickshaw 
+human rickshaw 
+porn 
+sylvia saint 
+pony play 
+rihanna 
+vista 
+ponygirl 
+convert daa 
+convert daa 
+owk 
+zone alarm 
+ponyboy 
+zone alarm ita 
+zone alarm 
+plan 9 from outerspace 
+office ita 
+money 
+money ita 
+you'll be in my heart 
+south park 
+you'll be in my heart usher 
+pearl harbor - dvd 
+usher 
+fast and furious 
+[xxx]russian_student_orgy_full_movie-smkd.wmv 
+[xxx]russian_student 
+[xxx]russian_student 
+russian student 
+gina 
+tits 
+teeny 
+españolwilliam pitt 
+soccer 
+william pitt 
+goal 
+voetbal 
+tv 
+goal 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+h6 
+hazes 
+samen 
+surf up 
+search all files 
+search all files 
+. 
+doc 
+ccr 
+grind 
+weeds s03 
+weeds s03 fr 
+dutch 
+horror 
+neighbour 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+avira antivirus security premium 
+avira antivirus security premium 
+avira antivirus security premium 
+rambo 
+five 
+sims 2 
+sims 2 castaway 
+dark angel 
+alkohol 120 
+alkohol 
+alcohol 
+video converter to psp 
+mark king 
+pc game ita 
+osborne 
+martin gore 
+martin gore 
+felch 
+anal 
+geobrain 
+geobrain 
+cumfart 
+geobrain 
+bologna 
+liefde 
+invasion 
+lecciso 
+antonio giuliani 
+linux 
+ponygirl 
+search files, youtube and liveleak 
+race 
+lil wayne 
+lil flip 
+tip drill 
+tip drill 
+clit 
+teen girl 
+badass 
+ponygboy 
+ponyboy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+elo.digital.office 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pinacle 
+ponygirl 
+pinnacle 
+heartbeat 
+sex 
+porn 
+3 bats live 
+office 
+3 bats live 
+blond 
+maastricht 
+bunny glamazon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+whitney houston ultimate collection 
+tribler 
+big 
+amfest02_chunk_1 
+sumo 
+woman sumo 
+sumo girl 
+osx 
+lady sumo 
+mixed sumo 
+femdom 
+search files, youtube and liveleak 
+xxx 
+xuxa 
+porno 
+boy 
+naruto yaoi 
+yaoi 
+dragonball gay 
+gay rape 
+bach 
+ligabue 
+batman 
+batman 
+batman 
+the bourne ultimatum 
+the bourne ultimatum 
+porno 
+porno 
+sesso 
+pompino 
+the bourne ultimatum 
+64 bits 
+nl subs 
+daa 
+uif 
+search files, youtube and liveleak 
+poweriso 
+heroes 
+heroes e07 
+bizut 
+强奸 
+强奸 
+强奸 
+强奸 
+强奸 
+强奸 
+sex 
+simpsons 
+simpsons 
+simpsons 
+simpsons 
+simpsons 
+echo and the bunnymen 
+echo and the bunnymen 
+echo and the bunnymen 
+om shanti om 
+milly vanilly 
+milly vanilly 
+more greatest hits 
+acdc 
+coral draw 
+corel draw 
+loins of punjab movie 
+rape 
+girls 
+skidrow 
+skid row 
+skid row thickskin 
+skid row thick skin 
+xara 3d 
+mission 
+imposseble 
+impossible 
+max pezzali "io ci sarò" 
+300 
+dragonball budokai 
+ps" dragonball budokai 
+ps2 dragonball budokai 
+mj 
+sevp 
+scm bradd 
+straigtcollegemen bradd 
+straigtcollegemen 
+straightcollegemen 
+straight college men 
+college men 
+hot men 
+david bravo 
+gay 
+sybian 
+ratatuille 
+ac-dc 
+paint shop 
+andré 
+hazes 
+illustrator 
+britney 
+beyonce 
+new york 
+lunikoff 
+enkelz 
+onkelz 
+nordfront 
+onkelz 
+the phantom of the opera 
+lunikoff 
+landser 
+kategorie 
+vollkontact 
+vollkontact 
+vollkontact 
+kc 
+vivere o morire 
+rape 
+pateta 
+f8-rc3-live kde 
+f8 
+f8 kde 
+westlife 
+star wars 
+the simpsons 
+westlife 
+westlife 
+draw & paint 
+ms paint 
+m4b 
+bond 
+dex 
+hypno 
+lightwave 3d 
+station pro 
+imagine 
+dream 3d 
+dreams 3d 
+true space 
+type styler 
+amorphium 
+darw & paint 
+draw & paint 
+3blow 
+3blow 
+myth 
+zen 
+zen 
+search files, youtube and liveleak 
+ken 
+guerriero 
+spiderman 
+ita ebook 
+ita e-book 
+ita e-book 
+ita e-book 
+ita ebook 
+ita 
+selen 
+mp3 
+light wave 
+xvid italian 
+la terza madre 
+italian 
+spinrite 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+conquistando o impossivel motivação pessoal 
+conquistando o impossivel motivação pessoal 
+conquistando o impossivel motivação pessoal 
+conquistando o impossivel motivação pessoal 
+motivacional 
+motivacional 
+videos 
+rocco 
+conquistando o impossivel 
+conquistando o impossivel 
+conquistando o impossivel 
+video motivação 
+video motivação 
+access 
+accessfatture 
+fatture 
+fattura 
+corso access 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+autocad 
+rocco 
+tribler 
+winrar 
+zecchino d'oro 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+games 
+games iso 
+rocco 
+rocco 
+rocco 
+rocco 
+ganja 
+dj tomekk 
+nirvana 
+vasco 
+need for speed carbon iso 
+xvid italian 2007 
+la terza madre 2007 silent 
+la terza madre 2007 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+search files, youtube and liveleak 
+hape kerkeling 
+hape kerkeling 
+hape kerkeling 
+hape kerkeling 
+hape kerkeling 
+xvid italin 2007 
+.mp4 
+.mpg4 
+.mpg4 
+movie lat 
+britney 
+britney 
+italian 
+horror italian 
+horror 
+italian 
+search files, youtube and liveleak 
+saw 4 
+pink floyd 
+saw 4 
+misoux 
+metallica 
+misoux 
+fuck 
+enya 
+enrico ruggeri 
+xvid iyalian 
+xvid italian 
+zero assoluto 
+zero assoluto 
+3d max 
+xvid italian 
+massimo di cataldo 
+enrico ruggeri 
+renato zero 
+enrico ruggeri 
+genesis 
+giorgia 
+sexy 
+cartoon sexy 
+cartoon 
+search files, youtube and liveleak 
+sailor moon 
+enrico ruggeri 
+xxx
+teen 
+vista 
+mac 
+sony ericsson w300i 
+sony ericsson w300 
+w300 
+corel 
+jennifer lopez 
+ita divx 
+teen 
+house 
+dr house 
+doctor house 
+red dwarf 
+pixar 
+king 
+singers 
+gregoriaans 
+ouverture 
+playboy 
+bhool bhulaiya 2 
+bridget 
+antonacci 
+bridget maasland 
+katja 
+dvd 
+pamela 
+dragonball 
+dvd ita dragonball 
+ita dragonball 
+dragonball 
+amatoriale 
+carl cox 
+boys 
+girls 
+boys 
+xvid la terza madre 
+bondage 
+bondage 
+ramazzotti 
+film 
+aladin 
+i7 e l8 
+sex 
+preteen 
+porno 
+sidnee 
+sidnee 
+pausini 
+adriano celentano 
+adriano celentano 
+adriano celentano 
+marco masini 
+porno 
+girlfriend 
+girlfriend 
+girlfriend 
+girlfriend 
+girlfriend 
+superman 
+foletti 
+foletti 
+pinocchio 
+renegade 
+celentano 
+amateur 
+tracy lords 
+porno 
+sound barrier 
+beo 
+schwarze sonne 
+wife 
+celine dion 
+beowulf 
+prison break fr 
+ableton live 
+mature 
+sound barrier 
+sound barrier 
+spank 
+spank 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adult 
+spanking 
+pc game 
+pes 2088 
+pes 2008 
+sfondi tv 
+carmen di pietro 
+vercelli 
+pc game pes 2008 
+dire staits 
+paris hilton 
+a walk to remember 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+a walk to remember/ movie download 
+angel 4x02 
+a walk to remember/ movie 
+angel 
+a walk to remember 
+trans 
+folder lock 
+amazon blithe 
+blithe 
+blithe 
+amazon girl 
+amazon women 
+amazon 
+prison brake 
+prison brake 
+prison brakee 
+prison brake 
+prison brake 
+prison brake 
+prison break 
+eros ramazzotti 
+eros ramazzotti etazzenda 
+tazzenda 
+eros ramazzoti 
+eros ramazzotti 
+tazzenda 
+tazenda 
+eros 2007 
+italian 2007 
+domo mia 
+justin timberlake 
+justin timberlake mp3 
+tribler 
+tribler 
+test 
+elephant 
+elephant 
+ubuntu 
+test 
+splinter[2006]dvdrip 
+splinter*dvdrip 
+splinter 
+amazon susan 
+hairspray 
+justin timberlake mp3 
+justin timberlake mp3 
+justin timberlake mp3 
+justin timberlake 
+apogolize 
+apogolize 
+reno.911 
+reno911 
+reno 911 
+reno 911 
+goddess 
+tribler 
+test 
+shemale 
+nude 
+saw 
+naked 
+borati 
+borrati 
+borroti 
+borroti 
+boroti 
+boroti 
+mixed wrestling 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+goddess 
+abba 
+abba gold greatest 
+corel draw 
+the last legion 
+daniel odonnell 
+daniel o,donnell 
+daniel o,donnell 
+uitzending gemist 
+uitzending gemist 
+uitzending gemist 
+project catwalk 
+charmed 
+charmed 
+charmed 
+uitzending gemist 
+uitzending gemist 
+crissy moran 
+crissy moran 
+crissy moran 
+crissy moran 
+maximum 
+peter gabriel 
+stargate atlantis 
+bourne 
+autodesk 
+ets 
+yes 
+lost season 3 
+lost season 4 
+lost season 3 
+babysitter 
+symbian 
+chuck 
+chuck so1eo6 
+chuck so1e06 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+vulcao etna 
+vulcao etna 
+etna 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+super 
+mobile 7 
+taxi 4 
+720p 
+xpress pro 
+xpress pro avid 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+google video 
+rascal flatts 
+rascalflatts 
+rascal flatts 
+life 
+stargate atlantis 
+search files, youtube and liveleak 
+dragon ball 
+life 
+dragon ball 
+swimming 
+pirates 
+starcraft brood war 
+young 
+dolly buster 
+schwanger 
+transformers 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+superbad 
+girls gone wild 
+accepted 
+hape kerkeling 
+mixed wrestling 
+geki 
+den-o 
+search files, youtube and liveleak 
+concept draw 
+grappling 
+grapplinggirls 
+amazon 
+launch 2007 
+the invasion 
+reign over me 
+pizza 
+ncis 
+csi 
+csi 8 
+tiziano ferro 
+tiziano ferro 
+a chi mi dice 
+no tears anymore 
+cascada 
+chikan 
+chinese 
+asian 
+hongkong 
+hong kong 
+eboney 
+ebony 
+spiderman 
+spiderman 
+sponge bob 
+fr 
+french 
+celentano 
+sesso 
+french 
+tribler 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+* 
+gay 
+italian 
+doors 
+doors 
+rolf zuckowski 
+den-o 
+eri clepton 
+clepton 
+nomadi 
+office 
+office dutch 
+french 
+bdsm 
+slave 
+slavegirl 
+fetish 
+porno 
+tribler 
+tribler 
+tribler 
+delft 
+delft 
+delft 
+peer 
+peer 
+ratatouille pl 
+ps2 
+tribler 
+teen 
+anal 
+snl 
+buonanotte italia 
+buonanotte italia 
+sex 
+madonna 
+madonna 
+chris brown 
+blowjob 
+alles is liefde 
+amv 
+cnn 
+bdsm 
+house 
+house. 
+window vista italiano 
+window vista ultimate italiano 
+window vista ultimate ita 
+les experts pc 
+les experts pc game 
+csi pc game 
+paki 
+heroes season 2 
+paki 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+prisonbrake 
+prison brake 
+prison brake 
+prison brake 
+prison brake 
+prison brake 
+prison break 
+shalwar 
+python 
+python 
+avatar 
+hijab 
+dutch 
+bumba 
+vale tudo 
+30 days of night 
+veil 
+free fight 
+bellydance 
+settlers 
+turkish bellydance 
+didem 
+new 
+paki 
+avatar 
+free fight 
+free fight 
+free fight 
+criss 
+latino muscle men 
+russian 
+. 
+games 
+ulead 8 
+ulead 8 pro 
+ulead pro 8 
+sato 
+ulead pro 8 
+ulead pro 
+ulead pro 
+ulead 
+ita 
+heroes 
+valentijn 
+pquadro 
+la terza 
+sexy back 
+58 minuti per morire 
+58 minuti per morire 
+30 days 
+helsing 
+helsinjk 
+helsink 
+helsing 
+van helsing 
+van helsing 
+the unit 
+hoeren 
+galactica 
+enterprice 
+enterprise 
+nine inch nails 
+dresden dolls 
+dresden dolls 
+placebo meds 
+placebo 
+placebo 
+dresden dolls 
+backstabber 
+head like a hole 
+head like a hole 
+head like a hole 
+head like a hole 
+head like a hole 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+the cure 
+if nly tnight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+nine inch nails 
+nine inch nails 
+nine inch nails 
+nine inch nails 
+lullaby 
+etabs 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+etabs 
+sap2000 
+suspiria 
+inferno 
+la tigre e la neve 
+dresden dolls 
+dresden dolls 
+dune 
+dresden dolls 
+the smashing pumpkins 
+the smashing pumpkins 
+the smashing pumpkins 
+the smashing pumpkins 
+adriano celentano 
+godlike 
+godlike 
+godlike 
+godlike 
+godlike 
+godlike 
+godlike 
+kmfdm 
+paz 
+paz! 
+paz 
+la guerra dei anto 
+la guerra dei anto 
+je t'aime paris 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+roberto benigni 
+roberto benigni 
+roberto benigni 
+roberto benigni 
+roberto benigni 
+arctic monkeys 
+nami 
+hentai 
+hentai 
+infdels 
+hentai 
+infdels 
+infdels 
+arctic monkeys 
+guano apes 
+lesbians 
+older 
+milf 
+mothers id like 
+mothers 
+oral 
+cunnilinguis 
+cunnilingus 
+devil prada 
+take that 
+van morrison 
+k3 
+fedex 
+biexp 
+*.* 
+* 
+* 
+solo 
+fusca 
+milf hunter 
+harry potter and the sorcerers stone 
+masturbation 
+kissing 
+kissing 
+kissing 
+tongue 
+tongue 
+tongue 
+feet 
+legs 
+feet 
+amateur 
+search all files 
+bang bus 
+vmware 
+matuer 
+matuer 
+matuer 
+mature 
+milfs 
+sex 
+nipples 
+tits 
+xxx 
+sex 
+rubbing 
+fucking 
+over 40 
+women over 40 
+sex over 40 
+saphos 
+sapphos 
+sappho 
+kissing 
+dmx 
+dmx 
+dmx 
+dmx 
+dmx 
+dmx 
+dmx 
+porn 
+lesbians fisting 
+diehard 
+diehard 
+sex 
+starrcraft 2 
+starcraft 2 
+starcraft 
+smallville.s07e07 
+i am legend 
+stardust 
+dog fuck 
+ratatouille 
+mattafix big cuty live 
+mattafix big city live 
+x gonna give it 
+x gonna give it to ya 
+search files, youtube and liveleak 
+shrek 
+ps2 shrek 
+mattafix big city life 
+untamed love 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+oxygen 
+heroes season 1 
+* 
+auteurs 
+home movies gril 
+home movies grill 
+blade house 
+klysma 
+zucchero 
+enema 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fifa manager 08 
+fifa manager 08 ita 
+pc ita fifa manager 08 
+fifa manager 08 
+natale acasa dj 
+natale a casa deejay.avi 
+la terza madre 
+film 2007 
+la terza madre 
+la terza madre 
+asian sluts 
+asian sluts 
+little drummer 
+little drummer 
+drummer 
+ll cool j 
+ll cool j 
+ll cool j 
+dr dre 
+dvd 
+lightroom 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+natale a casa deejay.avi 
+sawariya 
+ricky gervais 
+comic 
+salsa 
+salsa 
+hoogvliet 
+focus 
+wende snijders 
+diskeeper 
+stealth 
+timers 
+ãżã¤ã\83\9eãĵãÅ\9f 
+timers bootleg 
+timers boot 
+gay 
+timers 
+brazzers 
+druksexorgy 
+partyhardcore 
+audio recording 
+recording audio 
+camstudio 
+solution 
+camel 
+aika 
+timers 
+emerson lake and palmer 
+la terza madre 
+la terza madre 
+la terza madre 
+girls out west 
+girls out west 
+pregnant 
+nipple 
+piss 
+sekt 
+girls out westishotmyself 
+ishotmyself 
+ishotmyself 
+profondo rosso 
+csi 
+profondo rosso 
+without a trace 
+without a trace 
+csi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+brutal rape 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rick wakeman 
+brian wilson 
+brian wilson 
+brian wilson 
+brian wilson 
+brian wilson 
+brian wilson 
+brian wilson 
+richard hawley 
+van kooten en de bie 
+richard hawley 
+traci lords 
+heroes 
+papito 
+sailor moon 
+moana 
+windows vista 
+sailor moon il film 
+xxx 
+teens 
+goddess severa 
+leza 
+search all files 
+search all files 
+cinthia 
+tomtom 
+cinthia mulherao 
+dvd 
+dvd 
+ciblythe 
+blythe 
+blythe 
+amazon blythe 
+kayak 
+amazon astrid 
+amazon astrid 
+roger waters 
+metallica 
+metallica 
+stardust 
+astrid 
+svejk 
+dailly motion 
+tango 
+koremans 
+xxx 
+gandalf 
+cum 
+amazon taanna's 
+lucinda 
+goddess severa 
+cum 
+hardcore 
+oral 
+a tribute to r d burman 
+severa 
+mallu 
+adult 
+mario salieri 
+ita divx 
+tango argentino 
+dracula 
+pics xxx 
+xxx 
+piggyback 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode# 
+supertramp 
+nach 7 tagen - ausgeflittert 
+the bourne ultimatum 
+the bourne ultimatum de 
+the bourne ultimatum ger 
+daily 
+blythe 
+amedeo minghi 
+don omar 
+pcgame ita 
+film xxx 
+film xxx 
+reeves 
+matrix 
+matrix ita 
+amateurs 
+divx 
+divx ita 2007 
+divx ita 2007 
+divx ita 
+layo bushwacka 
+divx ita 
+divx ita 
+divx ita 
+divx ita 2007 
+the invisible ita 
+the invisible 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible 
+acdc 
+dvdrip ita 
+ac-dc 
+dvdrip ita 2007 
+led zeppelin 
+allman brothers band 
+allman brothers ba 
+allman brothers ba 
+allman brothers ba 
+golden earring 
+golden earring 
+golden earring 
+golden earring 
+golden earring 
+golden earring 
+led zeppelin 
+vanilla fudge 
+rolling stone 
+anal 
+don omar 
+don omar 
+ita 
+don omar 
+don omar 
+don omar 
+multilanguage 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+movies 
+dutch movies 
+dutchsubtitel movies 
+film 
+music 
+amateur porn 
+spiderman ned. 
+spiderman 
+tom tom go 
+don omar 
+divx ita 
+danny tenaglia 
+m2o 
+m2o 
+mp3 
+biagio antonacci 
+antonacci 
+antonacci 
+antonacci 
+samsa 
+goku 
+naruto 
+divx 
+divx ita 
+spaanse les arjan 
+mp3 
+nikki tyler 
+vivid 
+wicked 
+hazelwood 
+don omar 
+nino d'angelo 
+nino d'angelo 
+nino d'angelo 
+nino d'angelo 
+nino d'angelo 
+hoerbuch 
+audiobook 
+film 
+film azione 
+film 
+film xxx 
+film xxx 
+film xxx 
+film hostel part 2 
+film azione 
+alessandro siani 
+latina 
+latina 
+mortal kombat 
+film mortal kombat 
+30.dats.of.night.2007 
+triple x 
+30.days.of.night.2007 
+film porno 
+film porno 
+film porno 
+film porno 
+musica house 
+musica house 
+musica house 
+musica house 
+jethro 
+shemale 
+private 
+lost 17 
+boobs 
+breast 
+student 
+naked 
+naked 
+gay 
+penis 
+kali 
+tony hawk 
+princess kali 
+mistress 
+kamelot 
+groped 
+oakley hall 
+molested 
+ride 
+wushu 
+shoulder carry 
+downblouse 
+shoulder lift 
+nero 
+david török 
+search files, youtube and liveleak 
+seks 
+atomic drop 
+body slam 
+scoop slam 
+fireman's carry 
+markus heilmann 
+mafia 
+jordan 
+fireman's carry 
+bloodmatch 
+slam 
+blade 
+wrestling slam 
+bokito 
+bokito 
+bokito 
+bokito 
+bokito 
+bokito 
+funny movies 
+search files, youtube and liveleak 
+ass rape 
+ass rape 
+livorno 
+blondes anal 
+blondes 
+ita 
+disco inferno 
+50 cent 
+ita 
+search files, youtube andaika liveleak 
+aika 
+rosetta stone 
+rosetta stone 
+smallville 
+pantyhoseline 
+pantyhose 
+pantyhosetales 
+pantyhosescreen 
+pantyhose screen 
+panties 
+gold 
+gold fact 
+linzie mckenzie 
+linzie dawn 
+linzie dawn nckenzie 
+linzie dawn nckenzie 
+rape 
+superbad 
+fucking 
+fucking young girl 
+star wars 
+axis&allies 
+axis allies 
+tropico 
+sim earth 
+shogun 
+the last dragon 
+plaisir de nuire 
+tous au paradis 
+search files, youtube and liveleak 
+ita 
+ita 
+ita 
+ita 
+chelsey charms 
+chelsey charms 
+weihnacht 
+c++ 
+c+ 
+american gangster 
+kamasutra 
+lions for lambs 
+weirdsville 
+into the wild 
+age of empires 3 
+age of empires 3 
+age of empires 3 
+bug 
+cadez 
+blonde 
+blonde 
+blonde 
+tune up utilities 
+ita 
+ita 
+ita 
+ita 
+http://wiki.ubuntuusers.de/downloads/gutsy_gibbon/statisch?action=attachfile&do=get&target=ubuntu-7.10-dvd-i386.iso.torrent 
+http://wiki.ubuntuusers.de/downloads/gutsy_gibbon/statisch?action=attachfile&do=get&target=ubuntu-7.10-dvd-i386.iso.torrent 
+http://wiki.ubuntuusers.de/downloads/gutsy_gibbon/statisch?action=attachfile&do=get&target=ubuntu-7.10-dvd-i386.iso.torrent 
+casino royale 
+texas tornados 
+texmex 
+country 
+latin 
+flaco 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+quuen - somebody to love 
+queen - somebody to love 
+queen - somebody to love 
+ccc inc 
+toontje lager 
+preteen 
+the story 
+hogtied 
+nudists 
+the story 
+the story of o 
+whipped ass 
+bondage 
+nudist 
+aika 
+dvdclone 
+pc.football.manager.2007.italian.torrent 
+dvd clone 
+charms 
+chrystal star 
+dvdclone 
+busty dusty 
+facial 
+gianna michaels 
+busty dusty 
+lazio 
+ronaldinho.a.day.in.the.life.of 
+anime 
+ronaldinho 
+casino royale 
+gianni dany 
+spybot 
+spy bot 
+spy bot 
+playboy 
+inconveniant truth 
+inconvenient truth 
+premiere pro cs3 
+dvd*avi 
+dvdavi 
+ramones 
+calinforication dvd 
+calinfornication dvd 
+calinfornication 
+calinfornication 
+calinfornication 
+calinfornication 
+calinfornication 
+calinforn 
+calin 
+cali 
+californication 
+softwares 
+ashampoo antispyware 
+ashampoo antispyware 
+yu gi oh games pc 
+yu gi oh 
+shrek 3 
+shrek 3 
+google earthe 
+google earth 
+google earth pro 
+animal 
+bukkake 
+bukk 
+shrek 3 moves 
+japan av 
+hong kong 
+hk tv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+celentano 
+celentano 
+celentano 
+windows2000 
+moana pozzi 
+moves 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+film hardcore 
+moana pozzi 
+moana pozzi 
+film hardcore 
+windows 2000 
+film hardcore 
+moana pozzi 
+cicciolina 
+film hardcore 
+film hardcore 
+film porno 
+rocco siffredi 
+movus 
+simpsons movie 
+rootkit 
+rootkit 
+duft punk 
+punk 
+om shanti om 
+around the world 
+transformers the movie 
+death at a funeral 
+sawariya 
+search files, youtube and liveleak 
+led zeppelin 
+harry potter und * 
+harry potter und der orden des phönix 
+harry potter und der orden des phönix 
+harry potter und der orden des phönix 
+harry potter und der orden des phönix 
+games 
+harry potter und der orden des phönix 
+die hard 4 
+cumshot 
+facial 
+die hard 4 
+amatoriale 
+underworl 
+vista 
+amatoriale 
+vanessa incontrada 
+resident evil 
+adobe 
+resident evil apocalipse 
+resident evil apocalisse 
+search files, youtube and liveleak 
+mature 
+clone dvd 
+dragon ball z 
+ps2 dragon ball z 
+anti virus 
+back stage 
+back stage 
+paris hilton 
+300 
+prision breack 
+capon 
+nipple 
+omnipcx 
+resident evil 
+pc games need for speed pro street 
+ratatui 
+13 fantasmas 
+die hard 4 
+neox 
+spiderman 
+google 
+bbc 
+spiderman 
+spiderman 
+netfront 
+school of rock 
+pocoyo 
+el sindrome de ulises 
+pocke pc 
+bratz 
+hombre cubo 
+winx 
+search files, youtube and liveleak 
+jovanotti 
+maroon 5 
+deva ghan 
+avatar 
+dave gahan 
+hourglass 
+nomadi 
+bisex 
+cruyf 
+schiedam 
+search files, youtube and liveleak 
+windows 
+office 2007 
+24 
+visual basic 2007 
+visual basic 
+opera 
+blade 
+beverwaard 
+nero 
+blender 
+ulises 
+300 
+avril lavigne 
+wii 
+kaspersky 
+rolling stones 
+flood 
+the simpsons the movie 
+led zeppelin 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asia sex 
+dvd ita 
+dog fuck 
+funny clip 
+funny clip 
+funny clip 
+funny clip 
+search files, youtube and liveleak 
+modem spy 
+strike 
+search files, youtube and liveleak 
+grey's 
+mom 
+atonement 
+game 
+pc game 
+pc shooter game 
+pc fps game 
+pc 
+search files, youtube and liveleak 
+ken follett mondo senza fine 
+ken follett 
+follett 
+search files, youtube and liveleak 
+mika 
+mika 
+mika 
+timbaland 
+modern talking 
+moddern talking 
+talking 
+modern 
+need for speed 
+vanoza 
+shemal fuck 
+kerez 
+gay fuck 
+gabriella 
+bella 
+rocco 
+adult gay 
+pyramid 
+ever after 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+bublè 
+film 
+visual basic 
+mac leopard 
+ik herb schijt 
+ik heb schijt 
+ik heb schijt 
+ik heb schijt 
+ik heb schijt 
+ac 97 
+ac 97 
+ac 97 
+ac 97 
+realtek 
+realtek 
+realtek 
+ac'97 
+ac'97 
+gta 
+amy winehouse 
+fifa 
+pes 2008 
+mappe tt 
+mappe tomtom 
+xp live 
+xp lite 
+respectable 
+premiere 
+linkin park 
+fort minor 
+power dvd 
+big fish 
+اÙÃ\98²Ù�Â\86 Ùุ­Ùุ¯ 
+vomit 
+diamond 
+mina 
+adriano celentano 
+dolly 
+atb 
+video 
+rihanna 
+acronis 
+doom 3 
+jlo 
+tmf 
+pussycat 
+call of duty 
+call of duty ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+call of duty multilanguage 
+battlefield 
+madonna 
+call of duty 
+rugby 
+madden 
+madden 
+rugby 
+ray charles 
+summertime 
+summertime 
+summertime 
+ray charles 
+die ard vivere o morire 
+crack fifa 08 
+shemal 
+amici miei 
+amici miei 
+die hard 
+ligabue 
+sex 
+×Â\94×æ\87\81ר×Â\90×Â\9c×Â\99×Â\9d 
+dragon wars 
+cfnm 
+film 
+albano carrisi 
+shrek 3 
+shrek 
+sailor moon 
+i love you 
+ramsay 
+iron chef 
+kitchen 
+search files, youtube and liveleak 
+porno 
+cellulare 
+sexy 
+funny 
+american ninja 
+demon tools 
+donne di mafia 
+tosca d'aquino 
+mafia 
+osx86 
+nine inch nails 
+nine inch nails 
+spamalot 
+doctor who 
+wireless 
+tungsten t3 software 
+bdsm german attac 
+vicente celestino 
+porta aberta 
+enquanto o sol brilhar 
+enquanto o sol brilhar 
+enquanto o sol brilhar 
+facinação 
+facinação 
+facinação 
+facinação 
+champagne 
+champagne 
+champagne 
+carruagem de fogo 
+groove armada goodbye country 
+groove armada 
+carruagem de fogo 
+carruagem de fogo 
+ave maria 
+ave maria 
+hentai 
+star wars 
+jay brannan 
+sybian 
+tied 
+gyno 
+gyno 
+yikes 
+dormroom 
+vibrator 
+die hard 
+gay 
+stargate 
+17 and 
+matrix 
+doctor who season 3 
+matrix 1080p 
+doctor who season 3 dvd 
+×Â\9eש×Â\94 ×¤×¨×¥ 
+mumbai salsa 
+osx 
+gloryhole 
+glory hole 
+criminal minds 
+whore 
+bdsm 
+bdsm 
+bdsm 
+forced 
+wende 
+davilex 
+davilex software 
+davilex software 
+software 
+the who 
+album 
+criminal minds 3 
+drupal 
+witchblade 
+hentai 
+ashok mumbai 
+chemical brothers 
+zonealarm® internet security suite 
+zonealarm internet security suite 
+[bakakozou_-_conclave]witchblade 
+[bakakozou_-_conclave]witchblade 
+foxy lady 
+search files, youtube and liveleak 
+source 
+jimi blue 
+i´m loving 
+i´m loving 
+freestyle mix 
+avi video tools 
+rockabilly 
+hot coffee 
+amazing grace 
+anal 
+anal 
+pixar 
+haendel rinaldo 
+kurumi 
+haendel concerti grossi 
+vivaldi 
+pasolini 
+porno 
+calcio 
+calcio 
+calcio 
+mare 
+relient k 
+dildo 
+taylor rain 
+porpora 
+dr house 
+dr housedr house md 
+dr house md 
+spiderman 2 
+spiderman 2 ita 
+kurumi 
+house 
+spiderman 2 
+search files, youtube and liveleak 
+supertramp 
+mikayla 
+search files, youtube and liveleak 
+mikayla latin 
+mikayla 
+lichelle marie 
+codec 
+doraemon 
+dame chocolate 
+juanes, me enamoras 
+mumbai salsa 
+peillo 
+enrique 
+enrique naranjo 
+angela 
+hernandez 
+hernandez enrique 
+brazt 
+bratz 
+search files, youtube and liveleak 
+la terza madre 
+barbie la peliculas 
+barbie la pelicula la reina de los animaless 
+barbie la pelicula la reina de los animales 
+barbie la pelicula la reina de los animales 
+barbie la pelicula la reina 
+annie cruz 
+lily thai 
+a letto con le quarantenni 
+jonn holmes 
+milly d'abraccio 
+bocche di comesse 
+selen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asus 
+asus eee 
+zooskool 
+beast 
+dog 
+pig 
+snake 
+machined 
+wired 
+wired 
+machined 
+bondage 
+witchblade 1-24 eng softsub mkv complete 
+witchblade witchblade 1-24 eng softsub mkv complete 
+witchblade 1-24 eng softsub mkv complete 
+witchblade 1-24 eng softsub 
+eng softsub 
+telugu 
+telugu youtube 
+musoc 
+music 
+music 
+music gryan adams 
+music bryan adams 
+anime 
+dove sei gigi d'alessio 
+monros 
+timberland 
+timberland 
+timberland 
+sternwander germa 
+sternwanderer german 
+sternwander german 
+sternwanderer german 
+gigi d'alessio 
+sternwanderer german 
+prison break 08 
+anal 
+hot mom 
+rafaello 
+friends 
+miragio 
+d'angelo 
+nino d'angelo 
+monros 
+monros 
+nino d'angelo 
+nino d'angelo 
+happy movie 
+telugu movie 
+titanic 
+avatar 
+settlers 
+nlsub 
+nl sub 
+finizio 
+planet terror 
+wwe 
+wwe smackdown 
+wwe ecw 
+finiziojessica massaro 
+jessica massaro 
+jessica massaro 
+jessica 
+rosario miragio 
+rosario miragio 
+nino d'angelo 
+rocco sinfredi 
+rocco sinfredi 
+psv live 
+rocco sinfredi 
+logic pro pc 
+logic pro pc 
+unit 
+shemale 
+logic pc 
+dj ötzi 
+mika 
+elektor 
+dj ötzi 
+dj ötzi 
+foxy 
+dj ötzi 
+mika 
+search files, youtube and liveleak 
+video 
+anastacia 
+alessio 
+alessio 
+video 
+pupo 
+pupo 
+pupo 
+transformers german 
+hombre cubo y pocoyo 
+hombre cubo y el perro 
+dp 
+hombre cubo 
+hombre cubo 
+sex 
+carton 
+carton 
+hombre cubo episodio2 
+girsll 
+girsll 
+winx 
+aquatica 3d 
+nino d'angelo 
+search files, youtube and liveleak 
+pompino 
+pompino 
+pompino 
+pompini 
+pompini 
+pompini 
+pompini 
+milan campione 
+marachesh express 
+endless 
+dj orzt 
+dj otzi 
+dj otzi 
+dj otzi 
+gigi d'agostino 
+paki 
+cicciolina 
+pacific assault ita pc 
+mika 
+stand up for the champian 
+standup 
+orlowski 
+luxor 3 
+stand up for 
+kuschelrock 
+stand up for the champions 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rule the world - radio edit 
+rule the world 
+burqa 
+rule the world 
+take that rule the world 
+nino d'angelo 
+nino d'angelo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+powerpoint 
+search files, youtube and liveleak 
+publisher 
+publisher 
+iran 
+nieuwsbrief 
+sex 
+history 
+statistiek 
+statistica 
+mathematica 
+statistiek 
+veil 
+dokkum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+publisher 
+guitar 
+statistica 
+statistica.exe 
+statistica.zip 
+statistica 
+spss 
+posthumus 
+spss 
+titanic 
+publisher 
+mario merola 
+publisher 2007 
+portal pc 
+gigi d'agostino 
+american gangster 
+lift veil 
+lifting veil 
+irai 
+irani 
+30 days of night nl 
+saw 4 nl 
+anchorman 
+andre hazes 
+andre hazes 
+hijab 
+fantascienza 
+waitress 
+hereos 
+giant 
+giantess 
+kunst der fuge 
+imax 
+search files, youtube and liveleak 
+imax 
+foxy lady 
+justin timberlake 
+justin timberlake 
+justin timberlake 
+search files, youtube and liveleak 
+internet download 
+internet download accelerator 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+premiere pro cs3 
+big dick 
+freaks of cock 
+ava devine 
+teacher 
+extreme cum shot 
+premiere pro cs3 
+premiere pro cs3 
+beyoncé - b'day 
+isabeli fontana 
+nicky 
+nicky 
+nickymodel 
+nickymodel 
+monica bellucci 
+titanic 
+isabeli fontana 
+titanic 
+aduld 
+porno 
+adult 
+luchino visconti 
+beyoncé b'day 
+beyonce day 
+night 
+noodweer 
+noodweer 
+adriano celentano 
+adriano celentano 
+biagio antonacci 
+beppe grillo 
+elvis presley 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+blonde 
+search files, youtube and liveleak 
+nature 
+cars 
+stardust 
+hotmom 
+pepsi 
+hot mom 
+mentos 
+private 
+pepsi 
+hilarius 
+cola 
+search files, youtube and liveleak 
+los nocheros 
+pavaroti 
+pavaroti 
+pavaroti 
+search files, youtube and liveleak 
+van halen 
+search files, youtube and liveleak 
+xp pirate 
+xp pirate 
+franz ferdinand 
+franz ferdinand 
+franz ferdinand 
+franz ferdinand 
+xp pirated 
+xp 
+zetsubou 
+ã°ãÄ´ã³ãÄ°ãŹã³ 
+ã°ãÄ´ã³ãÄ°ãŹã³ 
+gurren 
+clint eastwood 
+911 
+911 terror 
+pak fa 
+civ 4 
+civ beyond the sword 
+elizabeth 
+papa roach 
+papa roach 
+fome 
+fome 
+area 51 
+soares 
+soares 
+jo soares 
+lula 
+lula 
+me enamora 
+xp home 
+me enamora 
+me enamora 
+el polaco 
+vista 
+wanda nara 
+bananero 
+wanda nara 
+beast 
+dog 
+american french 
+eva 
+gang 
+gang 
+italian 
+moulin 
+moulin 
+moulin 
+tabu 
+tabu 
+tabu 
+tabu 
+70ties porn 
+toon boom 
+toon boom 
+shaun of the dead 
+http://endeavor.isat.com.br/ 
+http://endeavor.isat.com.br/ 
+xp sp2 
+deus ex 
+wii 
+http://endeavor.isat.com.br/ 
+exploited black teens 
+blender 
+i think i love my wife 
+exploited black teens 
+airwolf 
+a-team 
+the beatles 
+simon and garfunkel 
+roling stones 
+rolling stones 
+chris rea 
+pink floid 
+pinc floid 
+pink floid 
+pink floyd 
+mumbai salsa 
+sawariya 
+fantascienza 
+friday night lights 
+odyssee 
+heroes season one 
+heroes season 1 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+ubuntu 
+ubuntu 
+ubuntu 
+ubuntu 
+michelle duits 
+poze 
+pictures 
+pictures 
+pictures 
+sex 
+break.com 
+gay 
+shrek trzeci 
+huge nipple 
+huge 
+clit 
+squirt 
+spanish 
+jack johnson 
+jack johnson 
+taboo 
+orge 
+shrek 
+stephen king 
+the stand 
+shrek_pl 
+ps2.iso 
+ps2 
+ps2 def 
+def jam ps2 
+def jam 
+teletubbies 
+def jam 
+def jam 
+my 
+my mom 
+sanct 
+vmware 
+friend 
+rocco 
+rocco 
+rocco 
+nanny 
+nanny 
+search files, youtube and liveleak 
+nanny 
+fuck 
+ficken 
+bitte fick 
+bitte fick 
+bitte fick 
+fick 
+fick 
+gay 
+nude boys 
+latex 
+ps2 bios 
+young boys 
+buddha 
+boys 
+travaglio 
+nude boy png 
+nude boy 
+sex boys 
+dandini 
+marta 
+fica 
+porn 
+search files, youtube and liveleak 
+tracy lords 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+any dvd 
+gay hardcore 
+pink floyd 
+tracylords 
+culo 
+tracylords 
+tracylords 
+tracylords 
+tracylords 
+tracy lords 
+tracy lords 
+tracy lords 
+chubby 
+teen 
+tracy lords 
+tracy lords 
+happy hardcore 
+mental theo 
+charly lownoise 
+cascada 
+mental theo 
+vista ultimate 
+cascada 
+detective conan capitulo 1 
+detective conan capitulo 21 
+detective conan capitulo 50 
+detective conan la pelicula 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bananero 
+animal 
+spanish 
+stay alive 
+vmware windows 
+the ring 
+vmware windows 
+vmware windows 
+nino rota 
+into the wild 
+de/vision 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+de/vision 
+de/vision 
+nino rota 
+rocco siffredi 
+moana pozzi 
+spain 
+piratas caribe 
+shy love 
+spy 
+mu online 
+esnf 
+shrek 3 
+eine schrecklich nette familie 
+alexis may 
+por ti 
+te quiero 
+si alguna vez 
+angel 
+naruto 
+into the wildamerican gangster 
+american gangster 
+austin lounge lizards 
+tastes like chicken 
+drunk 
+luciano pavarotti 
+adriano celentano 
+adriano celentano 
+gesù di zeffirelli 
+gesù di zeffirelli 
+gesù di zeffirelli 
+gesù di zeffirelli 
+gesù di zeffirelli 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+search files, youtube and liveleak 
+dvdrip ita 2007 
+quin 
+dvdrip ita 2007 
+dvdrip ita 2007 
+vintage cheese 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+formula tre 
+vintage cheese 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+search files, youtube and liveleak 
+dvdrip ita 2007 
+dvdrip ita 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+blunt 
+adult 
+jeroen van der boom 
+jeroen van der boom jij bent zo 
+jij bent zo 
+jij bent zo 
+jij bent zo 
+jeroen van der boom 
+search files, youtube and liveleak 
+jij bent zo 
+with every hartbeat 
+1973 
+james blunt 
+mp3 
+techno 
+lunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+magix foto su cd e dvd 
+blunt 
+james blunt 
+james blunt 
+t david guetta ft. cozi 
+david guetta 
+film 
+david guetta 
+blunt 
+robyn 
+simpson il film 
+simpson il film italiano 
+simpson 
+xxx 
+m 
+a 
+b 
+techno 
+t 
+dvdrip ita 2007 
+i 
+a 
+a 
+magix 
+magix foto 
+rwerg 
+umbilical brothers 
+a 
+eminem 
+e 
+eminem 
+mina 
+encarta 2008 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+foxy lady 
+http://youtube.com/watch?v=ljmnao2pnrq 
+dream theater 
+oblivion 
+oblivion 
+sims 2 
+aliens 
+spanish 
+peratas del caribe 
+piratas del caribe 
+spanish 
+mina 
+reptilians 
+zodiac 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+66 
+spss ebook 
+spss ebook 
+spss 
+v 12 
+newsletter 
+publisher 2007 
+publisher 
+nokia 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+history 
+bach 
+bush 
+hansma 
+hansma 
+ireland 
+dockinga 
+thie 
+man in trees 
+alles is liefde 
+little lupe anal 
+anal 
+thai 
+anal slut 
+wagner 
+ebony slut 
+ebony 
+wagner 
+wagner 
+wagner 
+ebony porn 
+asian porn 
+pthc 
+sarah young 
+xxx 
+50 cent 
+fist fuck 
+paris 
+goa 
+xxx mpg 
+thai xxx 
+thai 
+mpg 
+avi 
+rocco mpg 
+anal mpg 
+filipina mpg 
+r@ygold mpg 
+bitch mpg 
+anal avi 
+anal avi 
+anal avi 
+august rush 
+august rush 
+invasion 
+bon jovi 
+nokia 
+scooby do 
+scooby do 
+smallville 
+family guy 
+nino d'angelo 
+scooby do 
+il gladiatore 
+il gladiatore 
+il gladiatore 
+movies 
+movies 
+new movies 
+videos 
+bangbus 
+fred clause 
+fred claus 
+frbeowolf 
+beowolf 
+bewolf 
+slipknot 
+beowulf 
+the christmas 
+somebody help me 
+regina spektre 
+awake 
+awake 2007 
+seymour 
+somebody help 
+rattatoulle 
+ratatouille 
+ratatouille hd 
+720p 
+no jacket required 
+no jacket required 
+phil collins 
+no jacket required 
+phillip 
+phil 
+philipp 
+bangkok 
+jap 
+indian 
+phil collins 
+thai 
+falling down duran 
+falling down duran 
+duran falling down 
+thai 
+thai 
+fantasy 
+pearl 
+daywatch 
+alien vs predator 
+quickeys 
+quickeys 
+vendetta 
+linux 
+game 
+shrek 
+shrek 
+python 
+php 
+violation of 
+violation of mirage 
+violation of brianna 
+violation of brianna 
+violation of brianna 
+annie body 
+"annie body" 
+colbert' 
+colbert 
+brianna banks 
+annie body 
+all pissed off 
+annie body 
+annie body 
+"anastasia x" 
+"mr. horsecock" 
+squirting 
+texas twins 
+"texas twins" 
+philippine 
+frans bauer 
+boom 
+pda 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+pocket 
+dj zany 
+homw 
+divx ita 2007 
+divx ita 2007 
+ita 2007 
+fixkes 
+happy feet 
+acdcsee 9 
+acdcsee 9 
+acdcsee 9 
+acdcsee 9 
+acdcsee 
+acdcsee 
+rata 
+shrek 
+buschido 
+g unit 
+50 cent g unit 
+g unit 
+gunit 
+buschido 
+5o cent 
+50 cent 
+buschido 
+buschido 
+50 cent 
+fast and furious 
+ita 2007 
+ita 2007 
+ita 2007 
+the doors 
+ita 2007 
+ita 2007 
+ita 2007 
+ita 2007 
+ita 
+mp3 
+italian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+italian 
+italian 
+winx 
+thalia 
+thalia 
+thalia 
+thalia 
+thalia 
+mango 
+mango 
+winx la pelicula 
+amedeo minghi 
+amedeo minghi 
+amedeo minghi 
+amedeo minghi 
+renato zero 
+renato zero 
+anime 
+smallville 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville 
+smallville ita 
+charade 
+smallville ita 
+smallville ita 
+ita 2007 
+italian 2007 
+call of duty 2 mac 
+dvdrip italian 2007 
+dvdrip italian 2007 
+dvdrip italian 2007 
+s03e08 
+s03e09 
+illegal 
+sternwanderer 
+dvdrip 2007 italian 
+dvdrip 2007 italian 
+dvdrip 2007 italian 
+divx 2007 italian 
+divx 2007 italian 
+divx 2007 italian 
+divx 2007 italian 
+divx 2007 italian 
+divx 2007 italian 
+divx ita 2007 
+ita 2007 
+avi dvdrip ita 
+the.invasion 
+avi dvdrip ita 2007 
+avi dvdrip ita 2007 
+avi divx ita 2007 
+avi divx ita 2007 
+erwin 
+avi divx ita 
+avi divx ita 
+avi 2007 ita 
+lexmond 
+dvdrip avi 2007 ita 
+dvdrip avi 2007 ita 
+dvdrip avi 2007 ita 
+dvdrip avi 2007 ita 
+kill bill 2 
+dvdrip avi 2007 ita 
+heroes seconda serie 
+acdsee 
+rose 
+edith piaf 
+vie en rose 
+piaf 
+statistiek 
+dvd ita 
+csi 
+settlers 
+ceasar 
+caesar 
+settlers 
+caesar 
+rose 
+sposs 
+spss 
+spss 15 
+cartoon 
+child 
+csi las vegas 
+csi las vegas ita 
+csi las vegas ita 
+csi las vegas 
+winrar 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+csi hard evidence 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+nora johnes 
+nora jo 
+heroes 8 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+viejo camaradas 
+linkin park 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+brasileirinhas 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vivi fernandes 
+porno
+de lama,s verboden te lachen 
+porno 
+de lama,s verboden te lachen 
+scheila carvalho 
+sexy 
+doors 
+scheila carvalho 
+moody bleus 
+moody bleus 
+you tube 
+carla peres 
+funk 
+sexo no salão 
+sexo no salão 
+brasileirinhas 
+search files, youtube and liveleak 
+inimigos da hp 
+pooh 
+linux 
+rihanna 
+rihanna 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anime 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+craigslist 
+space miami 
+david guetta 
+eye of the tiger 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+final countdown 
+eye tiger 
+* 
+beatles 
+beatles 
+beatles 
+miami nikki 
+paul simon 
+beatles 
+dean martin 
+red carpet massacre 
+duran duran 
+queensryche 
+seal 
+as i am 
+as i am 
+steve morelli 
+steve morelli 
+steve morelli 
+steve morelli 
+steve morelli 
+maria bellucci 
+selen 
+canzoni nel tempo 
+tori amos 
+the new deal 
+the new deal 
+the new deal 
+quicktime pro windows 
+photoshop 
+adobe cs3 
+adobe cs3 master 
+heroes 
+heroes 08 
+datenbank 
+tribler 
+fachinformatiker 
+große 
+big 
+blonde 
+the counterfeiters 
+phil collins 
+no jacket required 
+não é o fim 
+não é o fim 
+não é o fim novo som 
+novo som 
+phil collins 
+green day 
+rise of atlantis 
+nfs most wanted 
+nba live 2008 
+green day 
+novo som 
+nfs underground 
+novo som 
+novo som 
+lesbicas 
+lesbicas 
+little lupe 
+fratellis 
+lesbian 
+novo som 
+novo som 
+novo som 
+novo som 
+novo som 
+novo som 
+novo som 
+green day 
+green day 
+green day 
+brasil 
+my chemical romance 
+lesbian 
+novo som 
+juliana paes 
+akon 
+smallville 
+house 
+aquecimento global 
+war at home 
+war at home 
+smallville 
+aquecimento global e o nosso futuro 
+pool 
+nfs most wanted 
+paris hilton 
+britney sears 
+akon 
+britney sears 
+britney spears 
+rhino 
+sexo 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jessie jane 
+tracy lords 
+tracy lords 
+tracy lords 
+tracy lords 
+porn 
+ratatouille 
+slayer 
+justin slayer 
+die hard 
+ncis 5 
+300 
+ratatouille 
+csi 4 
+csi 6 
+die hard 
+csi 8 
+big 
+black 
+bones 3 
+mandingo 
+lex steele 
+steele 
+naruto 
+lupin 
+ligabue 
+film 
+anal 
+tomtom 
+tomtom ita 
+vasco 
+ass 
+video 
+wealth 
+wealth 
+wealth 
+wealth 
+wealth 
+wealth 
+wealth 
+wealth 
+wealth 
+crack 
+simpson 
+simpson 
+sex 
+gay 
+anal 
+monique 
+ita 
+gay 
+delerium 
+gay 
+song 
+warefare 
+warefare 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+andrew sisters 
+andrew sisters 
+das wiegenlied vom tod 
+hot 
+new device 
+dialogys 
+dialogys 
+gossip girl 
+new device 
+new device 
+new device 
+device 
+mika 
+the valet 
+divx ita 
+divx ita 2006 
+divx ita 2007 
+divx ita 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+divx ita 2007 
+divx ita 2007 
+divx ita 
+2007 ita 
+2007 
+2007 
+2007 
+2007 
+crysis 
+crysis 
+folk music 
+itzone 42 
+hitzone 42 
+nero photoshow 
+ibiza 
+clipshow 
+ibiza 
+ibiza 
+ibiza 
+machete 
+machete 
+machete 
+the invisible 
+the invisible 
+memoriesontv 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita avi 
+spiderman 3 ita avi 
+avi spiderman 3 ita 
+spiderman 3 
+la leggenda di beowulf 
+la leggenda di beowulf 
+nero photo 
+piano 
+hot fuzz 
+heroes season 1 
+firefly 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+l.i.e 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+video 
+×Â\9eש×Â\94 ×¤×¨×¥ 
+hairspray 
+×Â\94×æ\87\81ר×Â\90×Â\9c×Â\99×Â\9d 
+search files, youtube and liveleak 
+lolitas 
+heroes.s02e08 
+portal 
+xxx 
+orange box 
+xxx 
+xxx 
+the mist 
+cum in mouth 
+karaoke 
+karaoke vietnam 
+viet karaokes 
+spanking 
+demjen 
+spanking 
+film dvd ita 
+kks 
+kks 
+kks 
+kks 
+warum rappst du 
+smoky 
+smoky 
+search files, youtube and liveleak 
+married with children 
+blof liefs uit londen 
+blof 
+p.uj 
+bach 
+bach 
+p.j 
+pj 
+mp4 
+mp4 
+mp4 
+greek 
+ferrari 
+centos 
+scheri mouvi 
+alan jackson 
+skery mouvi 
+full album 
+full album 
+full album 
+foxy lady 
+john legend 
+john legend 
+john legend 
+john legend 
+john legend 
+fergie big girls don't cry\ 
+fergie big girls don't cry 
+fergie big girls don't cry 
+life house 
+doctor who 
+doctor who 
+doctor who 
+the fall 
+"the fall" 
+sean kingston 
+"the fall" 
+zwartboek 
+stardust 
+zwartboek 
+super 
+fergie 
+search files, youtube and liveleak 
+star wars 
+office 
+shark 
+search files, youtube and liveleak 
+star wars 
+star gate 
+perverse 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+two 
+spooks 
+american 
+witch doctor 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+skeri mouvi 
+skeri mouvi 
+skery mouvi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+buddha 
+buddha 
+buddha 
+grey's anatomy 4x07 
+chaves 
+chaves 
+chaves 
+greys anatomy 4x07 
+naruto 
+skery mouvi tradus in romaneste 
+skery mouvi in romaneste 
+skery mouvi tradus 
+skery mouvi cu traducere 
+skery mouvi cu traducere 
+skery mouvi 
+hip hop 
+corrs 
+ferdi tayfur 
+trt 
+orhan hakalmaz 
+musa eroglu 
+vivid 
+hairspray 
+gangbang 
+ronaldo 
+galatasaray 
+prison 
+prison break s01 
+porno 
+revelation 
+doors 
+advert 
+seventeen 
+bob dylan 
+guthrie 
+joan baez 
+katyÅÂ\84 
+nohavica 
+de gregori 
+napoleon 
+whores of warcraft 
+allan 
+italian 
+spanish 
+jenna jameson 
+search files, youtube and liveleak 
+sexo 
+camera escondida 
+camera escondida 
+camera escondida 
+you tube 
+you tube 
+csi 8 
+anime 
+animal 
+warcraft 
+warcraft 
+warcraft 
+whorecraft 
+vivaldi 
+tits 
+asshole 
+asshole lesbian 
+lesbian 
+licking ass 
+lick ass 
+pissing 
+piss 
+peeing 
+pee 
+squirts 
+squirt 
+horse 
+apple 
+apple 
+apple 
+apple 
+samurai champloo 
+horse 
+ass shake 
+ass shaking 
+booty 
+booty 
+feet 
+foot fetish 
+licking 
+animal 
+rim job 
+asshole 
+gears of war 
+cum swap 
+fingers 
+fingering 
+wet pussy 
+search files, youtube and liveleak 
+anime 
+leach anime 
+wet 
+bleach anime 
+youtube 
+anime 
+æ¥ï¿½®ï¿½¯ 
+anime 
+hentai 
+tits 
+youtube 
+horse 
+zoo 
+dog 
+any dvd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+virtual dj 
+win98.mdk 
+*.mdk 
+sexy 
+search files, youtube and liveleak 
+games 
+muziek 
+muziek 
+sleeping with the enemy 
+babysitter 
+family guy 
+sopranos 
+software 
+porno 
+porno 
+rolling stones 
+rolling stones 
+rolling stones 
+rolling stones 
+bee gees 
+ncis 
+ncis 5 
+disco 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+irish 
+irishgeorge micheal 
+enya 
+die inselfeger 
+die inselfeger 
+mathias reim 
+mathias reim 
+rihanna 
+jamiroquai 
+sting 
+rataouille 
+30 days of night 
+das geheimnis der geisterinsel 
+operation: kingdom 
+operation:kingdom 
+operation:kingdom 
+operation kingdom 
+windows 
+hack 
+avg 
+keygen 
+power2go 
+hacking 
+donna summer 
+tune up 
+linux 
+cartoons 
+cartoons 
+cartoons 
+simply red 
+search files, youtube and liveleak 
+the cats 
+the cats 
+jan smit 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+a rush of blood to the head 
+a rush of blood to the head 
+a rush of blood to the head 
+dialogys 
+dialogys 
+dialogys 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+screwballs 
+windows vista ita 
+manga 
+sex 
+teens 
+next door 
+crysis 
+animalfarm dogs 
+animalsex dogs 
+animalsex dogs +horses 
+animalsex dogs +horses 
+animals 
+search files, youtube and liveleak 
+neyo 
+ne-yo in my own words 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+chris brown 
+ne-yo 
+bobobo 
+bobobo ,en español 
+tarzoon 
+marco borsato 
+louis de funes 
+marco borsato 
+ne-yo 
+avelgem 
+bamako 
+pregnant wife 
+couple 
+horse 
+craig david 
+usher 
+pro evolution soccer 
+fifa 2008 
+gerard vermeersch 
+bambi 
+number 23 
+beowulf 
+number 23 
+search files, youtube and liveleak 
+fantastic four rise of the silver surfer dutch 
+search files, youtube and liveleak 
+sexo 
+sexy 
+porno 
+incesto 
+tabooincesto 
+taboo 
+poweriso 
+poweriso 
+poweriso 
+sinatra 
+taboo 
+search files, youtube and liveleak 
+winmx italiano 
+moggel 
+moggel 
+moggel 
+moggel 
+moggel 
+search files, youtube and liveleak 
+saves the day 
+thursday 
+daft punk - stronger 
+daft punk 
+daft punk 
+gorillaz 
+daft punk 
+dj jean 
+disco 
+musikvideos 
+search files, youtube and liveleak 
+american gangster 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+omd 
+iron chef 
+simon and art garfunkel 
+iron chef 
+iron chef 
+iron chef 
+iron chef 
+iron chef 
+city select 
+city select 
+city select 
+left behind 
+alicia keys 
+elisa 
+elisa 
+mina 
+mina 
+kingdom come 
+glorious appearing 
+way of the master 
+dutch 
+yahel 
+yahel 
+yahel 
+search files, youtube and liveleak 
+xxx 
+yahel 
+yahel 
+sex 
+sex 
+trance 
+house 
+cosma 
+sex 
+ש×Â\9c×Â\9e×Â\94 ×�צ×Â\99 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+haitam 
+ש×Â\9c×Â\9e×Â\94 ×�צ×Â\99 
+tiesto 
+yahel 
+yahel 
+trance 
+yahel 
+yahel 
+sex 
+sex 
+rare 
+search files, youtube and liveleak 
+audiobook 
+science fiction 
+fedora 
+visual studio 
+hacking 
+****** 
+pamela 
+celebs 
+sandra 
+jenna 
+soft 
+soft 
+sex 
+jenna 
+jenna jam 
+xx 
+resident evil 
+italo disco 
+italo disco 
+italo disco 
+italo disco 
+nirvana 
+tits 
+radiohead 
+umbrella 
+marie digby 
+joy division 
+kayne west 
+closer 
+radiohead 
+radiohead 
+kayne 
+linkin park 
+search files, youtube and liveleak 
+radiohead 
+jay-z 
+black eyed peas 
+p.j. harvery 
+p.j. harvey 
+kayne west 
+lucinda williams 
+rhodesian 
+justin timberlake 
+white stripes 
+the killers 
+when you were young 
+the killers 
+film 
+horror 
+teen 
+girls 
+girls 
+weezer 
+70s 
+britney 
+britney 
+britney 
+britney official 
+the full codec 
+nero 
+magix 
+urban legend 
+magix 
+modest mouse 
+stargate atlantis 4 
+csi 8 
+csi 4 
+csi 4e08 
+gay wrestling 
+sean cody 
+bgeast 
+bg east 
+bg 
+can-am 
+wrestling 
+ita 
+ita 
+muziek 
+muziek 
+muziek 
+80s 
+50s 
+70s 
+codec 
+mepg4 
+mepg 4 
+mpeg4 
+mpeg 4 
+pinnacle 
+magix 
+search files, youtube and liveleak 
+"corbin fisher 
+"corbin fisher" 
+corbin fisher 
+gay 
+economics 
+veritas 
+veritas 
+need speed pro street 
+nuance 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+crysis 
+sawariya 
+codec 
+call of duty modern 
+e are the night 
+pinnacle 
+gatto 
+pesaro 
+search files, youtube and liveleak 
+catania 
+emine 
+bifor 
+search files, youtube and liveleak 
+pillman 
+sorella 
+porca 
+spiderman 3 
+eden mor 
+bozena 
+sesso 
+boobs 
+www.vuze.com/syndication/browse/azhot/all/movie/x/x/x/_/_/x/x/feed.xml 
+phone business 
+phone_business 
+www.legaltorrents.com/rss.xml 
+www.legaltorrents.com/rss.xml 
+www.legaltorrents.com/ 
+hetgesprek 
+lesbian 
+www.vuze.com 
+www.vuze.com 
+bbw 
+blowjob 
+search files, youtube and liveleak 
+http://thepiratebay.org/tor/3780716/in_a_lonely_place_(1950) 
+http://thepiratebay.org/ 
+movies 
+movies 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+japonesas 
+japonesas 
+japonesas 
+japonesas 
+chinas 
+chinas 
+humillacion 
+humillacion 
+japanese 
+japanese humiliation 
+resident evil apocalipse 
+resident evil 
+verve 
+sting 
+sting 
+50 to 1 
+disgorge 
+julies jordan 
+zero tollerance 
+verve 
+zero tollerance 
+50 to 1 
+cum 
+csi 8 
+csi 8 
+csi 8 
+strawberry 
+chmivcal 
+chemical 
+chemical brothers 
+traci 
+traci lords 
+final cut studio 
+kate bush 
+kate bush 
+kate bush 
+kate bush 
+kate bush 
+final cut 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+final cut 
+aplications 
+aplications 
+kwai 
+bridge on the river quai 
+bridge on the river kwai 
+palin 
+zbrush 
+plugins 
+acdsee 
+massada 
+bryce 
+bryce 
+bryce 
+zbrush 
+magiciso 
+arumbai+massada 
+3d 
+massada 
+caligari\ 
+caligari 
+amapi 
+youp 
+ebook 
+audiobook 
+scarlatti 
+a 
+wende 
+bach 
+nero 
+nero keygen 
+peitudas 
+peitos 
+acdc 
+avril 
+rock 
+rock 
+os intocaveis 
+os intocaveis 
+os intocaveis 
+kees van kooten] 
+kees van kooten 
+peitos 
+peitos 
+acdc 
+bmw 
+gold 
+various 
+ever 
+sexyback 
+search files, youtube and liveleak 
+sex 
+oma 
+foxy lady 
+best 
+justin timberlake 
+mabonna 
+mr 18 
+world big cock 
+big cock 
+ligabue 
+reggea 
+reggae 
+e65 
+old 
+jimi blue 
+us5 
+prince & me 
+bush 
+bochherini 
+mozart 
+bjork 
+bjork 
+bjork 
+the beatles 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+resident 
+webmail hack 
+avg 
+apocalyptica 
+epica 
+club 
+dave matthews 
+firewall 
+spyware 
+adolescent 
+apocalyptica 
+jula 
+jula 
+jula 
+american gangster ger 
+deep purple 
+aperture 
+eye tiger 
+aperture 
+parachute 
+superman 
+flood 
+parchute 
+parachute 
+night skies 
+chemical 
+chemical brothers 
+chemical brothers we are the night 
+videos 
+videos 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+masturbation 
+mpg 
+julia hayes 
+met-art 
+throat 
+house 
+club 
+katie 
+katie 
+katie 
+katie 
+tchtonic 
+techtonic 
+dance 
+katie 
+marianne 
+katie 
+handyman 
+katie 
+blowjob 
+stargate 
+youtube 
+avond boudewijn de groot 
+guus meeuwis live 
+eros ramazzotti 
+fuoco nel fuoco 
+eros cher 
+eminem 
+marilyn my love 
+my name is earl season 2 
+my name is earl season 2 
+my name is earl season 2 
+search files, youtube and liveleak 
+my name is earl 
+search files, youtube and liveleak 
+paris 
+madagascar 
+squirt 
+zen 
+santana 
+crash test dummies 
+weezer 
+flight of the chonchords 
+flight of the choncords 
+depeche mode 
+e2 
+aikido 
+suyl 
+love 
+transformers 
+blow job 
+banned 
+call girl 
+call girl 
+call girl 
+call girl 
+my friends hot mom 
+tool 
+sole 
+anticon 
+massive attack 
+massive attack 
+massive attack 
+porn 
+madonna 
+muscle 
+gay 
+gay 
+gay 
+resident 
+299 
+german 
+slim shady 
+slim shady lp 
+eminem 
+the slim shady lp 
+source 
+markus 
+eminem show explict 
+stream 
+eminem show(explicit version) 
+stream 
+stream 
+rock 
+visual 
+poltergeist 
+tv 
+webtv 
+php 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+gigi soundtrack 
+britney spears 
+judy garland 
+pink 
+pink-delirium 
+judy garland 
+poltergeist 
+poltergeist 
+poltergeist 
+poltergeist 
+poltergeist 
+poltergeist 
+oddworld 
+the slim shady lp 
+the slim shady lp 
+the slim shady lp 
+the slim shady lp 
+eminem 
+science 
+xml 
+science 
+animal 
+poltergeist 
+poltergeist 
+poltergeist 
+poltergeist 
+movies 
+the poltergeist movie 
+family guy 
+search files, youtube and liveleak 
+dirty mary crazy larry 
+dirty mary crazy larry torrent 
+dirty mary crazy larry torrent 
+dirty mary crazy larry avi 
+dirty mary crazy larry avi 
+hohlbein 
+search all files 
+mr and mrs 
+moby 
+meat beat manifesto 
+diversant 
+search files, youtube and liveleak 
+tomtom 
+tomtom go 
+lord of ring 
+the lord of ring 
+fat boy slim 
+the lord of ring 
+dejohnette 
+johnette 
+nikelback 
+nickel 
+jordan capri 
+jordan capri 
+anne rice 
+dvdrip italian 2007 
+anne rice 
+anne rice 
+dvdrip italian 2007 
+creep 
+ann rice 
+titties 
+live at the playboy mansion 
+red 
+soul heaven 
+kenny dope & karizma 
+diana krall 
+kenny dope 
+bob sinclair 
+baantjes 
+baantjer 
+baantjer 
+baantjer 
+baant 
+baan 
+baan 
+baan 
+baantjer 
+baantjer 
+country western 
+country western 
+country 
+amateur 
+celestine 
+studio 11 
+celestin 
+celestin 
+om shanti om 
+font twister 
+font twister 
+neil young 
+fonttwister 
+kenny dope 
+kenny dope & karizma 
+studio 10 
+pinnacle 
+kamelot winter 
+kamelot 2/11 
+ich will 
+du hast 
+kamelot 
+kamelot soul society 
+killing in the name of 
+der dünne mann 
+xx 
+xxx 
+xxx thai 
+wishbone ash 
+greatful dead 
+greatful dead 
+king kong 
+30 days of night 
+drawn together season1 
+drawn together season 1 
+drawn together season 
+drawn together 
+ich will 
+killing in the name of 
+godzilla 
+broadcast 
+u2 
+u2 single 
+country 
+search files, youtube and liveleak 
+reggie bush 
+ricky willaims 
+downloads 
+adobe creative 
+adobe creative 
+adobe creative 
+adobe creative suite 
+godtube 
+goldfilmmaker 
+indesign 
+solidgoldfilmmaker 
+j 
+dinner youtube 
+dinner 
+kamasutra 
+key gen 
+latex 
+search files, youtube and liveleak 
+silent service game pc 
+submarine pc 
+silent hunter pcgame 
+hentai 
+latex 
+silent hunter 
+candice night 
+streaming 
+streaming 
+apocalyptica 
+striptease 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+candice night 
+masturbate 
+masturbatedj 
+dj 
+mastruberen 
+mastubation 
+masturbation 
+erotiek 
+vingeren 
+acrobat 8 
+v-max 
+v-max 08 
+v-max 2008 
+thai 
+scorpions 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xvid 
+xvid ita 
+xvid ita 
+divx 
+tina arena 
+anal 
+shemale 
+f1 
+search files, youtube and liveleak 
+sanne 
+sanne 
+earth and fire 
+ladyboy 
+shemale 
+xvid ita 
+ruby is the one 
+uriah heep 
+taxi 
+bisex 
+oceans 
+amimals 
+house of the rising sun 
+sanne wallis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il pacificatore 
+il pacificatore 
+il pacificatore 
+il pacificatore 
+beppe grillo 
+surf 
+snowboard 
+surf 
+alan parsons live 
+fred claus 
+alan parsons 
+il capo dei capi 
+il capo dei capi 4 
+night skies 
+beowulf 
+beowulf 
+feet 
+cum eater 
+hitman 
+fetish 
+borat the movie 
+borat the movie 
+borat the movie 
+borat the movie 
+borat the movie 
+dreamwotks 
+dreamworks 
+vista 
+rats 
+zucchero 
+zucchero 
+zucchero 
+vista x64 
+he-man 
+youtube 
+beatbox 
+games 
+debate 
+debate rom 
+debate mit 
+debate 
+dylan 
+rhianna 
+jethro tull 
+jethro tull 
+11th hour 
+green day 
+30 days of night 
+11th hour 
+11th hour 
+11th hour 
+11th hour 
+11th hour 
+11th hour 
+hottest 
+jab we 
+30 days of nightgreen day 
+green day 
+main agar 
+om shanti 
+om shanti 
+naslite 
+wa 
+dark crusader 
+dark crusade 
+summer cumings 
+wireless 
+wireless 
+wireless 
+wireless 
+racquel darrian 
+300 castellano 
+night gallery 
+outer limits 
+gta 
+windows vista 
+windows vista programs 
+driver detective key 
+driver detective 
+rough sex 
+shreck 
+shrek 
+prank 
+prank jap 
+rape 
+2007 comedy 
+gears of war 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stargate atlantis s04 
+burn notice s01 
+lost s03 
+lost s04 
+ault 
+adult 
+prank 
+animal 
+mc dett 
+back to the jungle 
+sidewinder raw vol 3 
+jungle 
+dj sy 
+drum and base 
+drum and base arena 
+metallica 
+achmed the dead terrrorist 
+achmed the dead terrrorist 
+call of duty 4 
+brian adams 
+subsonica 
+teens 
+lavoura arcaica 
+lavoura arcaica 
+shrek3 
+halleluja jeff 
+il cane pompiere 
+il cane pompiere divx 
+il cane pompiere divx 
+il cane pompiere divx 
+il cane pompiere divx 
+gigi d'alessio 
+life of reilly 
+gigi d'alessio 
+life of reilly 
+celentno 
+celentano 
+mauro nardi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gianni morandi 
+gianni morandi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+curvead air 
+gianni morandi 
+curvead air 
+curvead air 
+genesis 
+gianni morandi 
+nardi 
+santa esmeralda 
+santa esmeralda 
+santa esmeralda 
+santa esmeralda 
+santa esmeralda 
+sex 
+search files, youtube and liveleak 
+billboard 
+pink floid 
+teen 
+teen 
+sex 
+beatles 
+rolling stones 
+genesis 
+heroes season1+2 
+staus quo 
+status quo 
+girls aloud 
+live 
+vista x64 
+jenna jameson 
+fuck 
+erotic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+films 
+films 
+films 
+avanters 
+films nl 
+films nl 
+fotos 
+playing with 
+vista x64 
+software nl 
+software 
+gma 
+bleach 
+bleach 4 
+bleach episode 4 
+bleach episodes 
+bleach 
+bleach4 
+anime 
+"duck dodgers" 
+"riders in the sky" 
+the incredibles 
+xxx japan 
+xxx japan 
+xxx japan 
+xxx japan 
+japan 
+fun with dick and jane 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+b eatles 
+search files, youtube and liveleak 
+hadley 
+spandau 
+spandau 
+spandau 
+duran 
+kylie minogue 
+rolling stones 
+doors 
+search files, youtube and liveleak 
+pogues 
+doors 
+tony hadley 
+ligabue 
+ligabue 
+rem 
+duran duran 
+spandau 
+springsteen 
+tony hadley 
+heroes s2e8 fr 
+heroes s2e8 fr 
+heroes s2e8 fr 
+heroes fr 
+ligabue 
+u2 
+arcadia 
+red carpet 
+amateurs 
+pavarotti 
+craig david 
+francesco renga 
+a ha 
+a-ha 
+promise land 
+gary kemp 
+hadley 
+celentano 
+ligabue 
+ligabue 
+burial: "near dark" 
+status quo 
+elvis presley 
+musica 
+riina 
+il capo dei capi 4 
+il capo dei capi 4 
+il capo dei capi 4 
+elvis presley 
+elvis presley 
+il capo dei capi 1 
+il capo dei capi 1 
+il capo dei capi 
+il capo dei capi 
+vervelen 
+ernie vervelen 
+tele tubbies 
+tubbies 
+winny the po 
+winny the 
+rockabilly 
+rockabilly 
+beowulf 
+american gansters 
+cod4 
+vervelen 
+hentai 
+immortal 
+immoral 
+sisters 
+night nurse 
+bangkok 
+pattaya 
+pataya 
+couples 
+indian 
+foxy lady 
+nude 
+euman 
+voyeur 
+locker 
+loker 
+room 
+massage 
+hookers 
+hentai 
+300 
+jazz 
+orlowski 
+kruistocht 
+orlowski 
+phil 
+spijkerbroek 
+spijkerbroek 
+kamelion 
+kameleon 
+kameleon 
+kameleon 
+windows xp 
+windows vista 
+discovery channel 
+beatles 
+litfiba 
+hustle 
+devon michaels 
+nikki benz 
+rolling stones 
+search files, youtube and liveleak 
+beatles 
+pink floid 
+pynk floyd 
+pogues 
+nomadi 
+the exorcist 
+mein neuer freund 
+helgate 
+123 
+helraiser 
+lost s03e10 ita 
+hellraiser 
+lost s03e10 
+nat king cole 
+evil death 
+evil dead 
+brian adams 
+lost ita 
+lost 3 ita 
+doors 
+toto 
+toto 
+articolo 31 
+styx 
+styx 
+styx 
+styx 
+articolo 31 
+articolo 31 
+articolo 31 
+elton jon 
+kansas 
+queen 
+lesbo 
+incinta 
+mignotta 
+mignotta 
+mignotta 
+troia 
+troia 
+troia 
+troia 
+troia 
+puttana 
+xxxdragon 
+manga 
+cod4 
+peter druker 
+rory gallagher 
+brian adams 
+brian adams 
+led zeppelin 
+u2 
+doors 
+cute goth chick painful anal 
+cute goth chick anal 
+led zeppelin 
+wrox 
+led zeppelin 
+moody blues 
+van der graf generator 
+pinback 
+hockey 
+ufo 
+procol harum 
+atomic rooster 
+rolling stones 
+deep purple 
+curvead air 
+barry ryan 
+sting 
+bunzo 
+steeleye span 
+blackmore's night 
+yes 
+king crimson 
+search files, youtube and liveleak 
+vanilla fudge 
+gentle giant 
+apocalyptica 
+credence clearvater revival 
+credence clearvater revival 
+credence clearvater revival 
+credence clearvater revival 
+credence clearvater revival 
+moana pozzi 
+gay 
+voorburg 
+heavy metal 
+search files, youtube and liveleak 
+hairsprai 
+hairspray 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+remove good4 
+remove good4 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gay 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+voorburg 
+voorburg 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+metal gear solid 
+search files, youtube and liveleak 
+madonna 
+madonna 
+madonna 
+madonna 
+peppino 
+peppino 
+donovan 
+ita 
+bdsm 
+voorburg 
+animal 
+queen 
+sturm der liebe 
+beowulf 
+shocking blue 
+ufo 
+ita 
+ita 
+ita 
+deep purple 
+taiji 
+taichi 
+tai chi 
+animal sex 
+michael porter 
+vba 
+film 
+nudist 
+girl fucking her dog 
+girl fucking her dog 
+fucking her dog 
+animals sex 
+mardi grass 
+scent of a woman 
+chinpo 
+phili 
+elektor 
+elektor 
+red hot 
+elektor 
+red hot 
+audioslave 
+ita 
+fight club 
+animalssex dog 
+animals sex dog 
+realplayer 
+lost 1x13 
+indochine 
+dog porn 
+lost 
+lost 1°stagione 
+lost 1°stagione 
+lost 1°stagione 
+dog fuck sex 
+lost 1°stagione 13°puntata 
+lost 1°stagione 
+borat the movie 
+eros 
+kayak 
+tinto brass 
+orlowski 
+sport 
+enya 
+sport channel 
+sport 
+renato zero 
+renato zero 
+renato zero 
+laura pausini 
+hercules 
+hercules 
+fred claus 
+hitzone the best of 2007 
+hitzone the best of 2007 
+hitzone the best of 2007 
+hitzone 
+audio 
+foxy lady 
+casino royal 
+francesco totti 
+francesco totti 
+francesco totti 
+francesco totti 
+francesco totti 
+simon and garfunkel 
+la leggenda di beowulf 
+matrimonio alle bahamas 
+abba 
+hollands 
+nederlands 
+ita 
+anime 
+2007 
+2007 
+2007 ita 
+2007 ita 
+2007 ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+pink floyd 
+italian 
+italian 
+italian 
+ita 
+album 
+laura pausini 
+laura pausini 
+laura pausini 
+let me think about it 
+lei 
+david amo 
+davida amo 
+david 
+amateur 
+david vendetta 
+thalia 
+thalia 
+thalia 
+thalia 
+raggae 
+raggae 
+fuck her dog 
+fuck her dog 
+animals sex 
+aria squuirts 
+survivor eye tiger 
+eye tiger 
+fifa 08 
+fifa 08 wii 
+fifa 08 wii 
+amazon warrior 
+rape mpg 
+rape mpg 
+rape mpeg 
+mario e sonic wii 
+hentai 
+amazon warrioe 
+amazon warrior 
+rape mpeg 
+rape mpg 
+amazon 
+female warrior 
+slave nurses 
+"slave nurses" 
+bang bros 
+bang bros 
+monsters of the cock 
+tsuyoi 
+chiisai 
+japan 
+small 
+dick 
+small dick 
+funny 
+skimpy 
+public 
+tiny 
+weenie 
+kuschelrock 
+animals sex 
+animals sex 
+autocad lt 
+autocad lt 
+autocad lt 
+lion cloth 
+lioncloth 
+elektor 
+eastpak 
+bag on fire 
+eastpak 
+eastpak 
+eastpak 
+eastpak 
+eastpak 
+eastpak 
+rugtas 
+rugtzak 
+rugzak 
+eastpak 
+search files, youtube and liveleak 
+schooltas 
+miusic was my 
+jon miles miusic 
+jon miles miusic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+underground 
+jon miles la sacrada familia 
+psx 
+superbad axxo 
+anime 
+kruistocht 
+harem 
+osx86 
+mickey 
+mickey 
+osx 
+caligula 
+panico na tv 
+panico na tv 
+amazon britta 
+amazons 
+amazons 
+manhunters 
+manhunters jessica drake 
+jessica drake 
+boob bangers 
+banger bros 
+bangerbros 
+banger bros 
+search files, youtube and liveleak 
+windows 
+raemonn 
+fantasy 
+ÑÄ\90½Ð¼Ðº 
+tnmk 
+ray 
+search files, youtube and liveleak 
+american gangster 
+bi sex 
+animal 
+cavalo 
+horse 
+scarface 
+gay 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke 
+pedo 
+15 
+teens 
+28 days 
+pthc 
+hidden cam 
+white stripes 
+anal 
+rush hour 3 
+microsoft office 2003 
+four-hour 
+four-hour work week 
+live daftpunk 
+live daft punk 
+banger bros 
+bangerbros 
+kardashian 
+pizza blow 
+master of lighting 
+tesla master of lighting 
+die legende von beowulf 
+weeds 
+songs 
+songs 
+trace 
+winrar password 
+pakistan 
+1st base 
+corno 
+corninho 
+corninho 
+corninho 
+corninho 
+corninho 
+criminal minds 3 
+marido 
+casais 
+orgia 
+suruba 
+swinger 
+1941hollywood movies 
+old hollywood movies 
+travesti 
+shemale 
+bi sex 
+big 
+ccna osi 
+dog 
+ccna osi 
+gay 
+brasil 
+brazil 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+deep purple 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the bridges at toko-ri 
+jovem guarda 
+gay, boy 
+gay 
+dbz 
+dbz 
+search files, youtube and liveleak 
+aerofly 
+aerofly 
+andre hazes 
+aerofly pro 
+aerofly pro 
+hardcore 
+charly lownois 
+aerofly pro 
+realflight 
+aeroflight 
+aeroflight 
+aeroflight 
+aeroflight 
+aeroflight 
+aeroflight 
+rape 
+garfield 
+18 
+18 
+underworld 
+jump 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hitman 
+aeroflight 
+realflight 
+underworld pl 
+underworld 
+asian 
+wii 
+pedo 
+spiderman pl 
+hidalgo subtitles 
+hidalgo subtitles 
+korkociÄÂ\85
+hidalgo subtitles 
+hidalgo sub 
+hidalgo sub 
+dane od boga 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+brazil 
+whore 
+dane od boga 
+gothic 3 pl 
+gothic 3 pl 
+pl 
+games 
+beowulf 
+stargate atlantis pl 
+pl 
+dj pool 
+search files, youtube and liveleak 
+bbc 
+threads 
+private gay 
+private 
+gay 
+bi sex 
+tranny 
+smallville 
+pussy 
+seventeen 
+masturbate 
+finger 
+she male 
+http://fr.youtube.com/watch?v=fysmli570k8 
+http://fr.youtube.com/watch?v=fysmli570k8 
+http://fr.youtube.com/watch?v=fysmli570k8 
+project runway 
+http://fr.youtube.com/watch?v=9khzwsytnde 
+sandwich 
+squirt 
+squirt 
+search files, youtube and liveleak 
+porn 
+alicia keyes 
+alicia keyes 
+alicia keyes 
+no one 
+cant tell me nothing 
+cant tell me nothing 
+can tell me nothing 
+master of orion 3 
+"master of orion" 3 
+ls 
+crossover 
+homeworld 
+cedega 
+codega 
+seal 
+cedega 
+mark medlock dieter bohlen 
+seal 
+david deangelo 
+bonte koe 
+de scheepsjongens bonte koe 
+bonte koe 
+bonte koe 
+crossover 
+beowulf 
+mystery method 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+film 
+search files, youtube and liveleak 
+sex 
+visio 
+microsoft 2007 
+microsoft 2007 
+database 
+php 
+search files, youtube and liveleak 
+clayton 
+office for mac 
+office for mac 
+lions lambs 
+sybian 
+beowulf 
+adult 
+enchanted 
+ita 
+cartoonsmart 
+leben 
+jima 
+vmware 
+alex jones 
+question mark 
+that's the question 
+tribler 
+fido 
+zwartboek 
+les 
+vmware keygen 
+partition magic 
+partition magic 8 serial 
+partition magic 8 serial 
+stream 
+search files, youtube and liveleak 
+greek 
+hopw i met your mother 
+how i met your mother 
+david guetta 
+benny benassi 
+dj selection 
+captivity 
+captivity movie 
+halo 1 download 
+halo combat evolved 
+captivity 2007 
+captivity 2007 dvd rip 
+doctor who 
+blake seven 
+blake 7 
+blakes 7 
+blakes 
+blakes 
+dexter 
+sean paul 
+heroes 9 
+search files, youtube and liveleak 
+heroes 9 
+toulouse 
+toulouse 
+joni mitchell 
+anime 
+tech 
+geek 
+chuck 
+heroes 9 
+http://www.craveonline.com/videos/nsfw/00005963/gangster_sesame_street.html 
+porno 
+reggiani 
+family guy 
+family guy 
+teens 
+graupner taxie 
+taxie 3 
+john legend 
+marco borsato 
+marco borsato 
+brianna blaze 
+brianna 
+beast fucker 
+animal sex 
+fifa 2008 
+animal sex 
+music 
+animal sex 
+teens for cash 
+music 
+rc airplane 
+real madrid 
+miguel angel munoz 
+miguel angel munoz 
+cubase 4 
+fabio dimi 
+fabiodimi 
+miguel torres 
+contaminato 
+picotto 
+"vanessa hudgens" 
+resident evil 4 iso 
+heathcliff and marmaduke 
+picotto 
+picotto 
+half life 
+salieri 
+salieri 
+mario salieri 
+search files, youtube and liveleak 
+spice girls 
+divx ita 
+2007 italian divdrip 
+2007 italian dvdrip 
+2007 italian dvdrip 
+the invisible 
+the invisible italian 
+2006 italian 
+spiderman 3 italian 
+all 
+albums italian 
+albums 
+single albums 
+albums single 
+2007 
+the simpsons 
+beastiality 
+hymen 
+masturbate 
+roberta kelly 
+lopez 
+gang 
+gloria 
+alexia 
+rino gaetano 
+sfroos 
+genesis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+najib amhali 
+harry potter 
+amsterdam 
+salieri 
+liberty marian 
+liberty marian 
+marian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amore in chat 
+madonna 
+spyware doctor 
+madonna 
+dutch 
+south park s11e07 
+xxx 
+dutch 
+monkey island 
+sit 
+south park s11e06 
+kiss 
+kiss 
+rock 
+axe 
+http://www.torrent.to/torrent/upload/xxx/398/215398/889-www.torrent.to...lacey%5b1%5d.duvalle.-.pounded.in.the.office.by.a.big.white.cock.torrent 
+mark thomas 
+mark thomas 
+mark thomas 
+mark thomas 
+justin timberlake 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stargate 
+stargate 
+ita stargate 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+genesis 
+mama 
+roma provincia 
+roma provincia 
+roma provincia torino capitale 
+roma provincia torino capitale 
+roma provincia torino capitale 
+roma provincia torino capitale 
+hey you 
+pink floyd 
+pink floyd, hey you 
+pink floyd hey you 
+pink floyd 
+simpson 
+mama 
+video 
+gay 
+bi sex 
+paris hilton 
+nod 
+rc 
+rc airplane 
+search files, youtube and liveleak 
+prison.break 
+prison.break 
+prison.break 
+dirty 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cubase 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+black eyed peas 
+avril 
+spooks 
+justin timberlake 
+search files, youtube and liveleak 
+kylie minogue 
+universal business adapter 
+gilmore 
+rape 
+search files, youtube and liveleak 
+ramazzotti 
+resident evil 
+alien vs predoator 
+avp 
+laundia 
+raand 
+randi 
+bitch 
+kylie minogue sex 
+kylie minogue sex 
+lindsay lohan sex 
+avril 
+sexy 
+sex 
+need for speed pro street 
+justin timberlake 
+flstudio 
+pamela 
+flstudio downloads 
+flstudio downloads 
+ita 
+italian ld 
+spanish 
+intocht elst 2007 
+epc 
+elster 
+goutier 
+goûtier 
+aarsman 
+ponyclub 
+sinterklaas 
+harry potter 3 
+harry potter 3 
+search files, youtube and liveleak 
+teen 
+mccarthy 
+lesbian 
+acdc 
+kokeshi 
+lesbian 
+pedo 
+hitman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+de limburgse zusjes 
+abba 
+abba 
+asian 
+osx86 
+jas 
+jas osx86 
+reign 
+missy 
+deathnote 
+onmitsu 
+search files, youtube and liveleak 
+avril lavigne 
+yui 
+funny 
+funny 
+search files, youtube and liveleak 
+westlife 
+. 
+ninfeta 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rap 
+dmx 
+jordan capri 
+scarface 
+scarface 
+scarface 
+scarface 
+disco 
+photoshop 
+donna summers 
+falcon studios 
+xxx privat 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+lolita 
+xxx lolita 
+xxx lolita 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+xxx preeteens 
+preeteens 
+preteens 
+pedo 
+masha 
+acdc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+axxo 
+search all files 
+masha 
+masha 
+ship sim 
+prison break e09 
+heroes 
+anal 
+over hill 
+over hill 
+over hill 
+over hill 
+over hill 
+over hill 
+over hill 
+robin 
+film 
+cabaret 
+cabaret 
+cabaret 
+cabaret 
+cabaret 
+milano palermo il ritorno 
+film 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rammstein 
+rammstein 
+borat 
+bb king 
+[rus] 
+[rus] 
+[rus] 
+[rus] 
+russian 
+nasha russia 
+kamedi klab 
+comedy klab 
+bombalurina 
+pulp common people 
+sim city 2000 rush hour 
+leonard cohen 
+simcity 2000 rush hour 
+simcity 2000 rush hour 
+sim city rush hour 
+simcity rush hour 
+simcity rush hour 
+sim city 4 
+animal sex 
+interpol 
+legalizacja xp 
+legalizacja xp 
+prodigy 
+black and white 2 
+mp3 
+albums full 
+albums full 
+sex 
+animal sex 
+taggart 
+realflight 
+realflight 
+fms 
+taggart 
+taggart 
+sex 
+padre de familia 
+pop 
+muchachada nui 
+black and white 2 
+chuck deely 
+chuck deely 
+chuck deely 
+chuck deely 
+chuck deely 
+chuck deely 
+black and white 2 
+black and white 2 
+fucking 
+russian 
+design 
+discovery 
+discovery 
+maya 
+maya tutorial 
+maya 
+digital tutors 
+digital tutors maya 
+digital tutors maya 
+digital tutors maya 
+digital tutors maya 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+russian teen 
+devil may cry 
+shark 
+disney 
+search files, youtube and liveleak 
+colombia 
+japanese 
+drama 
+japanese 
+utada 
+shrek 
+shrek ita 
+shrek ita 
+anti-smoking tar 
+smoking 
+no country for old men 
+bruce almighty 
+hana yori dango 
+resident evil 
+teen 
+erotic witch project 
+nihon 
+cat fight 
+ecchi 
+pee 
+stripper 
+pee 
+caribbean 
+brazilian 
+in plain view 
+ghost rider 
+call girl cat fight 
+ipanema 
+girls from ipanema 
+la professoressa di lingue 
+la professoressa di lingue 
+puttane 
+puttane 
+sex 
+greek 
+lavezzi 
+stefanidou 
+l 
+g 
+lavezzi 
+livaniou 
+napoli 
+napoli 
+sex 
+dragonball 
+brazilian models 
+brazilian girls 
+brazilian carnival 
+captivity 
+juiced 2 
+games 
+games ps2 
+black girls 
+games ps2 ita 
+games ps2 
+games ps2 
+games ps2 
+games ps2 
+napoli 
+lavezzi 
+calcio 
+cakcio 
+calcio 
+c 
+tutte le puntate del capo dei capi 
+il capo dei capi 
+il capo dei capi 
+capo dei capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+dragonball gt ita 
+dragonball gt 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt 
+dragonball gt 
+dragonball 
+dragonball 
+amy whinehouse 
+amy winehouse 
+president bush 
+bush 
+hannah 
+pagodinho 
+zeca 
+teen 
+hannah 
+barney 
+call duty 
+julia paes 
+far cry 
+far cry 2 
+juliana paes 
+mujra 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ten commandments 
+search all files 
+search all files 
+ten commandments 
+nude 
+zrinka 
+tall julia 
+tall zrinka 
+amazon 
+goddess 
+party 
+party 
+amazon 
+amazon kitty 
+mikayla miles 
+wrestling 
+wrestling 
+wrestling 
+wrestling 
+wrestling 
+wrestling 
+wrestling 
+wrestling 
+mud 
+wrestling 
+wrestling 
+wrestling 
+queen adrena 
+simpsons 
+bunny's jap 
+dido 
+naruto 
+avira 
+naruto shippuuden 35 
+enya 
+fm wrestling part 4 
+avira premium security suite 
+bbc david attenborough 
+bbc 
+tall kat 
+nathalie 
+nathalie kelley 
+strapon 
+skimpy 
+velvet 
+fracture 
+arrestmovies 
+femdom police 
+mistress francesca 
+jogos mortais 
+arrest movies 
+arrestmovies 
+jogos mortais 4 
+jogos mortais 
+search files, youtube and liveleak 
+2007 
+teen 
+depeche mode 
+flash 
+flash slideshow 
+cabaret 
+barely 
+boston 
+i simpson 
+divx 
+divx ita 
+dvd 
+compagnie pericolose 
+2007 
+divx ita 
+divx ita 
+divx ita 
+divx ita 
+divx ita 
+jlo 
+lopez 
+dog 
+country 
+country 
+tornado 
+jogos 
+search files, youtube and liveleak 
+beyond the enemy lines 
+be dazzled 
+pink floyd 
+eagles 
+tarzan and the lost city 
+tarzan and the lost city 
+como ama una mujer 
+como ama una mujer 
+lopez 
+hawkwind 
+latin 
+search files, youtube and liveleak 
+beowulf 
+search files, youtube and liveleak 
+dvd ita 
+denzel washington 
+denzel washington 
+kevin kosner 
+donne di mafia 
+dvd ita - donne di mafia 
+dvx ita - donne di mafia 
+dvx ita - donne di mafia 
+afte effects cs3 
+premiere cs3 
+premiere cs3 
+ninja 
+american ninja 
+julia paes 
+filme 
+search files, youtube and liveleak 
+xxx 
+codecs for wmv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jogos mortais 
+teachers 
+young teachers 
+sex teachers 
+blink 
+blink182 
+substitute teacher 
+search files, youtube and liveleak 
+lost 
+vivid 
+osx 
+weeds s03 
+elvis presley 
+elvis presley 
+elvis presley 
+primus 
+dire straits 
+dire straits 
+londoncafe 
+friso 
+luca jurgen 
+elvis presley 
+elvis presley 
+renoceros sex 
+giraffe sex 
+nijlhorse sex 
+rhenoceros sex 
+shemale sex 
+misty mundae 
+misty mundae 
+misty mundae 
+misty mundae 
+julie strain 
+lina romay 
+jess franco 
+erin brown 
+teen 
+anal teen 
+fifa 2008 pc 
+eve who's that girl 
+vampire 
+zombie 
+horror 
+cannibal 
+witch 
+gtk 
+reverse 
+exploit 
+windows 
+shellcode 
+virus 
+ghost rider 
+ghost rider ita 
+ita 
+duty 
+hitzone 2007 
+hitzone 2007 
+hitzone 2007 
+ita 
+ita 
+ita 
+ita resident 
+ita resident 
+ita resident 
+italian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+grey's anatomy 
+men in trees 
+men in trees 
+brothers and sisters 
+gillore girls 
+gillore girls 
+gilmore girls 
+scheepsjongens 
+bontekoe 
+bontekoe 
+bontekoe 
+scheepsjongens van bontekoe 
+scheepsjongens van bontekoe 
+ita 2007 
+ita 
+ita 
+ita 
+ita 
+ita 
+ita 
+playstation 
+ita 
+xxx 
+take that beauitful world tour 20 th novemebr 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that beautiful world tour 
+take that 
+take that 
+pro street 
+takethat 
+takethat 
+takethat 
+pro street 
+yamyam15 
+zelda 
+adobe 
+adobe mac 
+u2 
+anal 
+first anal 
+american gangsters 
+pro rugby 
+final cut 
+you tube 
+google video 
+search files, youtube and liveleak 
+oblivion 
+search files, youtube and liveleak 
+vista 
+oblivion 
+oblivion 
+savariya 
+divx ita 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+madonna 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+anouk 
+oblivion 
+oblivion 
+oblivion sn 
+oblivion sn 
+tanto va la gatta al lardo 
+adobe cs premiun mac 
+oblivion sn 
+adobe mac 
+sn 
+lime wire pro for mac 
+photoshop cs3 mac 
+adobe cs3 mac 
+lime wire pro for mac 
+lime wire pro for mac 
+lime wire pro for mac 
+lime wire pro for mac 
+sn 
+sn 
+sn key 
+key sn 
+renato zero 
+keygen+key+sn 
+keygen+key+sn 
+keygen+key+sn 
+keygen+key+sn 
+walt disney 
+keygen+key+sn 
+boondocks 6 
+keygen key sn 
+keygen key sn 
+keygen key sn 
+pink floyd a saucer full of secrets 
+mr. magorium's wonder emporium 
+search files, youtube and liveleak 
+cardigans 
+nirvana 
+cardigans 
+toast titanium 
+to my boy 
+fat boy slim 
+lea di leo 
+lea di leo 
+jesse jane 
+kira kener 
+samson en fred 
+search files, youtube and liveleak 
+amature 
+arie en bastiaan 
+janossi 
+search files, youtube and liveleak 
+amature 
+nikelback 
+nickelback 
+nickelback 
+nickelback rockstar 
+nickelback 
+firefly 
+alicia kays bono 
+alicia kaeys bono 
+alicia keys bono 
+uk garage 
+ms dynamite 
+the hives 
+grey's anatomy 
+the smiths 
+ghost whisperer sub ita 
+ghost whisperer 
+black rebel 
+cardigans 
+janossi 
+janossi 
+peter licht 
+latremenoi mou geitones 
+power iso 
+rio reisser 
+rio reisser 
+rio reisser 
+cardigans 
+peter 
+a 
+b 
+c 
+d 
+e 
+f 
+g 
+h 
+i 
+j 
+k 
+l 
+l 
+m 
+n 
+o 
+ü 
+p 
+q 
+r 
+game 
+s 
+t 
+u 
+para pente 
+high school musical 
+mac ox 
+mac ox 
+mac ox 
+mac ox 
+mac ox 
+mac ox 
+mac ox 
+mac ox 
+mac ox 
+sp3 
+xp sp3 
+amature 
+mac ox 
+teen 
+mac ox 
+mac ox 
+mac osx 
+teen 
+teen 
+mac osx 
+mac osx 
+mac osx 
+nice 
+gay 
+almodovar 
+gay 
+stagger 
+stagger 
+boy 
+boy gay 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+search files, youtube and liveleak 
+hellboy 
+search files, youtube and liveleak 
+hellboy 
+bilitis 
+white 
+white temptation 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+raped 
+anal 
+american gangster 
+ernst en bobbie 
+ernst en bobbie 
+ernst en bobbie 
+emergency 4 
+heroes 
+i don't wanna be a murderer 
+50-50 
+50-50 
+50-50 
+search files, youtube and liveleak 
+scat sex 
+scat 
+50-50 
+50-50 
+veggie tales 
+bible 
+gina wild 
+illuminati 
+audio books 
+audiobooks 
+audiobooks 
+appleseed 
+appleseed 2004 
+appleseed 2004 
+appleseed 2004 
+house of flying daggers 
+hero 
+hero jet li 
+dvix 
+endgame 
+italian 
+honey bee king2 
+trex 450 
+trex 450 
+divx italian 
+divx ita 
+ita 
+anything but mine 
+dinosaurs 
+sex 
+pro street 
+portal 
+postal 
+scrubs season 7 episode 1 
+scrubs season 7 episode 1 
+naruto 
+ttc 
+ttc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ttc 
+la terza madre 
+ruby 
+ruby 
+matrimonio alle bahamas 
+jonn holmes 
+jon holmes 
+jon holmes 
+italia 
+roberta 
+michelle 
+sesso 
+sex 
+young girls 
+young teen girls 
+teens 
+minogue 
+ebay song 
+cd70 
+jessica alba 
+jenna jameson 
+sex 
+sex 
+saint 
+goldfrapp 
+crov 
+noah's arc 
+noah's arc season one 
+drawn together 
+drawn together 314 
+drawn together s3e14 
+firefly 
+naruto 
+hilton 
+anubis 
+vista 
+anubis 
+jump top 100 
+teen 
+search files, youtube and liveleak 
+loli 
+all 
+daddy 
+sister 
+sist 
+cous 
+cousin 
+coussin 
+ali 
+german 
+all 
+cartoon 
+3d 
+all 
+report 
+report 
+report 
+first 
+girl 
+allµ 
+all 
+media player nl 
+media player 
+mcafee 
+the chronic 
+the cronic 
+cronic 
+dr.dre 
+deathrow 
+deathrow 
+deathrow 
+deathrow records 
+house of the rising sun 
+the cnronic 
+the chronic 
+july morning 
+child in time 
+blackout 29 
+blackout 
+these tunes 
+the l word 
+sex 
+maxizems 
+maxiems 
+pier 
+maxiems 
+dance tunes 
+sport 
+bigtit 
+bigtits 
+earth and fire 
+maxiems 
+maxiems 
+maxiems 
+usenext 
+big cock 
+intervention 
+call of duty 
+song of the south 
+porn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+shania twain 
+search files, youtube and liveleak 
+frontpage 2007 
+christmas 
+office 2003 
+office 2007 
+vivaldi 
+search files, youtube and liveleak 
+prison break season 3 mpg 
+prison break season 3 mpg 
+prison break season 3 mpg 
+prison break season 3 mpg 
+prison break 
+xxx 
+tunisia 
+led zeppelin 
+south park 10 
+south park 
+natural city 
+djumbo 
+djumbo 
+djumbo 
+djumbo 
+djumbo 
+28 
+28 kinderen 
+kinderen voor kinderen 
+ministry of sound 2008 
+anouk 
+sunshine 
+clubland 12 
+lover 
+erotiek 
+kamasutra 
+cd70 
+dubbel d 
+kamasutra fuck fest 2 
+neuken 
+vingeren 
+wet 
+klit 
+the nits 
+top billboard 
+dance 
+dj jean 
+dj- jean 
+dj 
+crysis 
+xp sp2 
+deus ex 
+dopeman 
+human sex 
+amateur sex 
+aikido 
+amateur porno 
+electro 
+faces of death 
+faces of death japanese torture 
+faces of death chinese electro torture 
+faces of death japanese electro torture 
+japanese electro torture 
+chinese electro torture 
+electro torture 
+lolita 
+jan smit 
+boondocks 6 
+spiderman3 
+worms 
+spiderman3 ita 
+worms 
+spiderman3 
+worms portugues 
+saturno contro 
+saturno contro dvd 
+rem 
+enema 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+spiderman3 
+spiderman3 
+dutch 
+hentai 
+nederlands 
+crack for need for speed pro street 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nenas 
+nenas 
+nenas 
+nenas 
+system mechanic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+** 
+system mechanic 
+system mechanic 
+riki martin 
+search files, youtube and liveleak 
+stage 6 
+carmon electra 
+carmon electra 
+carmon electra 
+crysis 
+join hands 
+michael clayton 
+blackhawk down 
+black hawk down 
+cs3 kaygen 
+mac antivirus 
+antivirus mac 
+antivirus 
+sophos mac 
+gay 
+itunes 
+jennefer lopez 
+thugi thugi 
+bbc banhunt 
+manhunt 
+manhunt 
+bbc survivors manhunt 
+bernadette 
+equations 
+equations 
+linear equations 
+linear equations 
+linear equations 
+ficken 
+i am legend 
+i am legend nl 
+. nl 
+nl subs 
+office 07 
+r5 
+pc game 
+mom 
+antivirus 
+strip 
+phtoto shop 
+photo shop 
+office 07 
+mother 
+inzest 
+incest 
+mx3 
+mrs 
+hot mom 
+mutter 
+granny 
+. 
+. 
+. 
+nl subs 
+srt 
+nl subs 
+moeder 
+zoon 
+aunt 
+granny 
+grandmother 
+young 
+lesson 
+mariach 
+mariatchi 
+mariachi 
+mariachi 
+mariachi 
+mexico 
+latin 
+county 
+county 
+county 
+mariachi 
+mariachi 
+mariachi 
+mom 
+country 
+aikido 
+ann wilson 
+nfs pro street maps 
+nfs pro street car 
+nfs pro street car tuner 
+spss 
+sarah brightman 
+top of the pop 
+fifa 2008 
+fifa 2008 for windows 
+fifa 2008.iso 
+fifa 2008 
+manon thomas 
+manon thomas 
+manon thomas 
+manon thomas 
+need for speed pro street 
+bioshock 
+eminem 
+gospel 
+girls 
+girls 
+gheto gospel 
+delifianneio parthenagogeio 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il capo dei capi 
+sburro fisso 
+bamba 
+one piece 96 
+julia alexandratou 
+julia alexandratou 
+julia alexandratou 
+julia alexandratou 
+julia alexandratou 
+alexandratou 
+britney spears 
+search files, youtube and liveleak 
+nomadi 
+one peiece 96 
+lost passage 
+photoshop 
+eastern promises 
+roxio 
+one peiece 96 
+one peiece 96 
+sunhouse 
+dog 
+benders 
+jenni lee 
+jenna haze 
+roxio 
+elton john 
+elton john 
+elton john duets 
+elton john duets 
+elton john duets 
+duets 
+dutes elton john 
+dutes elton john 
+elton john 
+grey's anatomy forever young 
+huge tits 
+huge tits 
+88 minutes 
+big tits 
+big tits 
+big tits 
+city navigator 
+psyclon nine 
+*.mpg 
+*.* 
+video editing 
+porcupine tree 
+a prova di morte 
+a prova di morte 
+nudes a poppin 
+hitman 
+vangelis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+warld trade center 
+thai 
+chinese 
+bangkok 
+threesome 
+outdoor 
+fkk 
+nude 
+rape 
+hooker 
+russian 
+katie meluha 
+seventeen 
+beowulf 
+jeff dunham 
+search files, youtube and liveleak 
+elisabeth 
+13 
+12 
+14 
+15 
+14 
+16 
+deutsch 
+pre teen 
+tesseract 
+*.* 
+redhead 
+culcha candela 
+culcha candela 
+morandi 
+monty phyton 
+kraaykamp 
+kitaro 
+stomu yamasta 
+sinatra tahamata 
+pipo 
+ratatouille 
+rube goldberg billiards! 
+peru 
+zodiac 
+kayak 
+ekseption 
+asian 
+trace 
+sister7 
+patrice pike 
+neil young 
+fredro starr 
+darjeeling 
+il capo dei capi 
+win xp 
+opensuse 10.3 
+doom 3 
+fear 
+rihanna 
+rihanna 
+eredivisie 
+rihanna 6 video 
+eredivisie live 
+live 
+live voetbal 
+beyonce 
+rihanna sos 
+kerry katona 
+kerry katona 
+rihanna 
+kerry katona sex 
+simpson 
+beyonce 
+weather 
+jenna jameson 
+windows xp home 
+xxx 
+xxx 
+xxx 
+xxx 
+search files, youtube and liveleak 
+beowulf 
+angeln 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+audio 
+audio 
+ghost reacon 
+ghost reacon 
+ghost reacon 
+bionic woman 
+why did i get married 
+magic iso 
+arina 
+loli 
+loli 
+lolita 
+arina 
+ls magazine 
+ls land 
+r@ygold 
+pthc 
+ptsc 
+preteen 
+dvdr 
+house of sand and fog 
+dvdr pal 
+weihnacht 
+loli 
+pthc 
+arina 
+13yo 
+12yo 
+12yo 
+11yo 
+14yo 
+10yo 
+need for speed 
+le ragazze del coyote ugly 
+xxx 
+die hard 4 - vivere o morire 
+porno 
+search files, youtube and liveleak 
+films 
+sex 
+nero 
+djavan 
+search files, youtube and liveleak 
+djavan - delirio dos mortais 
+phil collins 
+phil collins - i cannot believe is true 
+phil collins 
+mariah carey 
+fabio junior 
+lenine 
+pedro mariano 
+filmati 
+filmati 
+phil collins 
+djavan 
+phil collins 
+dragonball 
+distraction 
+spiderman 3 
+zucchero 
+dire 
+lightroom 
+lightroom 
+mr been 
+paris hilton 
+1900 
+paris hilton 
+brian setzer 
+distraction 
+lightroom 
+ron paul 
+weeds 
+mr been 
+red 
+swordfish 
+pantera 
+tribes: vengeance 
+tribes: vengeance 
+tribes: vengeance 
+tribes: vengeance 
+search files, youtube and liveleak 
+1701 a.d. 
+hitman 
+company of heroes 
+britney spears gimme more 
+jenna jameson 
+britney spears gimme more 
+jenna jameson 
+shrek 3 
+shrek 3 -avi 
+shrek 3 not avi 
+ita shrek 3 
+shrek 3 
+shrek 3 
+dvd5-ita 
+smash my pc 
+dvd5 ita 
+smash my pc 
+yahoo 
+yahoo 
+higurashi kai 
+higurashi kai 01 
+schooldays 
\9c°ç\90�¸ 
\9c°ç\90�¸ã\80\8001 
+æ�äºï¿½Â\88 
+ãŹã³ã\83\80ã  
+ï½Ä\8f½Ã¯½Â\86 
+ear training 
+python 
+dolly buster 
+search files, youtube and liveleak 
+latina 
+bangbros 
+selena spice 
+latina 
+latina hq 
+latina high quality 
+latina high quality 
+latina high quality 
+high quality 
+high quality hq 
+hq 
+bangbros 
+bangbus 
+latina 
+xxx 
+lolo ferrari 
+busen 
+quickbooks 
+sextape 
+hitman 
+microsoft games 
+microsoft games 
+games 
+action games 
+action games 
+action games 
+photoshop 
+adobe flash 
+ricky koole 
+bionic 
+bionic woman 
+search files, youtube and liveleak 
+"shadows of war" 
+shadows of war 
+motionbuilder 
+motionbuilder 
+csi 8 
+csi 6 
+csi 4 
+criminal minds 3 
+tom tom 
+tomtom 
+bones 3 
+tomtom win 
+tomtom laptop 
+tomtom 
+my guide 
+my guide 
+my guide 
+myguide 
+myguide 
+rigas sargi 
+rigas sargi 
+map italia 
+triiphlon 
+triiphlon 
+triphlon 
+tribler 
+tribler ita 
+tribler 
+shemale 
+search files, youtube and liveleak 
+resident eveil 
+urusei 
+ninja 
+ninja scroll 
+caligula 
+bogart 
+humphry bogart 
+tokyo joe 
+bogart 
+rossella 
+james bond 
+godzilla 
+monkey island 
+avatar 
+saving grace 
+immediate music 
+dead or alive 
+ars arcana 
+takeshii 
+dein ex,mein albtraum,mvcd 
+dein ex mein albtraum,mvcd 
+solange du hier bist 
+solange du hier bist,mvcd 
+force majeure 
+american gangster 
+humphry bogart 
+razor 
+u2 
+rigas sargi 
+hitmen 
+pc games pinball 
+pc games 
+xxx 
+search files, youtube and liveleak 
+die siedler 
+rescue dawn 
+hatebreed 
+eros 
+hatebreed 
+teens goin wild 17 xxx dvdrip 2007 
+you tube 
+you tube manon thomas 
+you tube manon thomas 
+djumbo 
+porno 
+little brunette fuck 
+interraccial porno 
+porno 
+brunette fuck 
+massive attack 
+roms nintendo ds 
+nintendo ds 
+nintendo ds 
+nintendo ds roms 
+nintendo ds roms e 
+nintendo ds roms (e) 
+blues brothers 
+dvd blues brothers 
+porno paris hilton 
+jessica may 
+jessika may porno 
+ligabue 
+porno 
+al rangone 
+private spice 
+titty bianchi 
+titti bianchi 
+psilvia saint 
+silvia saint 
+silvia saint porno 
+titti bianchi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+company of heroes 
+dragon ball af 
+u2 
+playboy 
+rigas sargi 
+vdeo 
+hellgate london 
+tom tom 
+tom tom mappe italia 
+tom tom maps 
+tom tom maps italy 
+tom tom maps 
+harry potter 
+harry potter 
+mobile 
+handy 
+il gladiatore 
+manuale d'amore 2 
+mobile 
+manuale d'amore 2 
+mobile 
+hellgate london 
+empire earth 3 
+the witcher 
+supreme commander 
+supreme commander 
+supreme commander 
+silverfall 
+zapper 
+online tv 
+machtlos 
+machtlos 
+machtlos 
+machtlos 
+veen oosterhuis 
+oosterhuis 
+von löwen und lämmern 
+beowulf 
+beowulf ger 
+la leon 
+la leon 
+la leon 
+la leon 
+la leon 
+machtlos 
+machtlos 
+across the universe 
+across the universe ger 
+across the universe 
+camel 
+dropkick murphy 
+der dritte mann 
+simpson 
+simpson 
+canalis 
+hilton 
+hilton 
+windows xp 
+third man 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+1804 
+office 
+1408 
+stevie wonder 
+abba 
+tilde 
+crack 
+tildes birojs 
+crack explorer 
+tildes birojs 
+tildes birojs 
+tildes birojs 
+tildes birojs 
+gladiator 
+baby 
+girl 
+jessica alba 
+jessica alba 
+saw 4 portugues 
+gladiator 
+gladiator 
+ita 
+kavel57 
+kavel57 
+kavel57 
+kavel57 
+jodi mitchel 
+jodi mitchell 
+jodi mitchell 
+christy moore 
+christy moore 
+christy moore 
+ita lost 
+xuxa 
+tori amos 
+abba 
+tori amos 
+tori amos 
+hbo 
+tori amos 
+dfhdghsgfjfg 
+tori amos rainbow 
+kont 
+chris 
+greg lake 
+tori amos rainbow 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+search all files 
+search all files 
+zodiac 
+tori amos 
+counting crows 
+dora the explorer 
+diego the explorer 
+diego the explorer 
+search files, youtube and liveleak 
+ratouille 
+kaspersky 
+ratatouille 
+eset smart security 
+dora the explorer 
+man at work 
+fibra 
+panda 
+alarm zone 
+before sunrise 
+michael bubble 
+dora the explorer+diego 
+dora the explorer 
+twelve monkeys 
+sahuleka 
+massada 
+lekcja 3 
+video clip erasmus 
+video clip 50 cent 
+jon miles 
+video clip hantr 
+seal 
+mary winsor 
+bellydance 
+danza del ventre 
+belly 
+belly dance 
+lessons 
+ventre 
+belly 
+pilates 
+pianoman 
+pearl jam 
+billy joel 
+andria 
+andria 
+andria 
+andria 
+andria 
+andria 
+barletta 
+barletta 
+elton john 
+halloween 
+van helsing 
+normaal 
+need for speed pro street xbox 360 
+normaal 
+halloween 
+fetish 
+femdom 
+femdom 
+mistress 
+kavel57 
+beatles 
+child in time 
+kavel57 
+kavel57 
+matrix 
+american gangster 
+universum 
+hush 
+strapon 
+deep purple 
+roy paciù 
+roy paci 
+roy paci toda joia toda 
+roy paci toda joia toda 
+roy paci toda joia toda 
+exception 
+roy paci 
+bubble 
+exseption 
+exeption 
+the fifth of beethoven 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+piss 
+exception 
+u2 
+search files, youtube and liveleak 
+hazes 
+aabba 
+aabba 
+sony ericsson video converter 1.0 
+sony ericsson video converter 1.0 
+aabba 
+ccredo 
+search files, youtube and liveleak 
+xander b 
+xander de 
+oblivion 
+english dictionary 
+dictionary 
+house 
+no cd key oblivion 
+black sabbat 
+no cd key oblivion 
+cd key oblivion 
+cd key oblivion 
+oblivion 
+english-latvian 
+latvian 
+latvian-english 
+latvian-english 
+femdom 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+femdom 
+femdom 
+femdom 
+femdom 
+alice in chains 
+cartoon 
+alice in chains 
+beatles 
+alice in chains 
+rage against the maschine 
+rage against the machine 
+rage against the machine 
+dropkick murphy 
+dropkick murphys 
+femdom 
+rowan heze 
+rowan heze 
+rowan heze 
+rowan heze 
+latvian 
+u2 
+u2 
+u2 
+u2 
+windows vista 
+tildes birojs 
+salieri 
+u2 
+flogging molly 
+flogging molly 
+u2 
+u2 
+blof 
+blof 
+blof 
+porno 
+dictionary 
+dictionary 
+dictionary 
+firpo 
+tango 
+free english 
+english 
+eurotalk 
+eurotalk 
+rigas sargi 
+oxford 
+cnn 
+rigas sargi 
+rigas sargi 
+rigas sargi 
+keygen 
+pokemon 
+ash leaving pikachu 
+ash leaving pikachu 
+zlatan 
+pkmn 
+pokemon 
+mewtwo vs giovanni 
+pokemon 
+search files, youtube and liveleak 
+matrix 
+mewtwo 
+overture 
+hitman 
+big 
+anal 
+computer cz 
+after effect 
+photo shop for mac 
+friends bloopers 
+friends bloopers 
+friends bloopers 
+space 1999 
+photo shop plugin 
+pokemon the movie 
+dmx 
+queen 
+pokemon 
+i'll be there for you 
+mistress 
+mistress 
+pokemon 
+pokemon 
+pokemon 
+pokemon 
+pokemon 
+mewtwo 
+queen 
+friends 
+friends bloopers 
+friends serie 
+mario salieri 
+europe countdown 
+teresa orlowski 
+cumparsita 
+xzibit 
+ken lorela 
+matrix 
+la terza madre 
+300 
+ghost reacon 
+ghost reacon 
+osvaldo pugliese 
+la cumparsita 
+all 
+amalia rodriguez 
+london 
+englsh-latvian 
+english-latvin 
+english-latvin 
+english-latvin 
+english-latvin 
+search files, youtube and liveleak 
+english 
+cnn 
+antivirus 
+pervers 
+microsoft word 
+gladoator 
+gladiator 
+matrix 
+abba 
+abba 
+abba 
+vivaldi 
+vivaldi 
+mozart 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+manon thomas 
+mr been 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+van halen 
+call of duty 
+search files, youtube and liveleak 
+delphi 
+deep purple 
+deep purple 
+deep purple 
+risk 
+la terza madre 
+arm7 
+arm7 
+delphi 
+il nascondiglio 
+acdc 
+gufi 
+il nascondiglio 
+il nascondiglio 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pupi avati 
+pupi avati 
+pupi avati 
+pupi avati 
+anal 
+delphi 
+delphi 7 
+delphi 7 update 
+d programming 
+vista 
+upskirt 
+bisexsual 
+crowded house 
+spirerman3 
+search files, youtube and liveleak 
+battlestar 
+battlestar 
+waar is het paard van sinterklaas 
+de scheepsjongens van bontekoe 
+de scheepsjongens van bontekoe 
+de scheepsjongens van 
+sil de 
+tatinic 
+sinterklaas 
+young teen 
+gladiator 
+young teen lesbions 
+young teen lesbi 
+young asians lesbian 
+young asians lesbian 
+asians lesbian 
+asians lesbian 
+gladiator 
+search files, youtube and liveleak 
+anima porn 
+anima 
+cold cases 
+hentia 
+cold case 
+cartoon 
+gladiator 
+sex 
+saw2 
+porno 
+cartoon 
+xxx 
+porno 
+xxx 
+xxx 
+xxx 
+xxx 
+nudist 
+bikini 
+bikini 
+pthc 
+chinese 
+chinese 
+star dust 
+lost 
+syx 
+syx action sports 
+law and order 
+axxo 
+planet terror pal 
+planet terror pal 
+planet terror pal 
+planet terror 
+death proof 
+beck 
+youngest 
+teens 
+math 
+eddie murphy 
+teens 
+teens 
+teens 
+teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+rigas sargi 
+rigas sargi 
+rigas sargi 
+young teens 
+young teens 
+teens 
+shemale 
+shemale 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+*.* 
+porn 
+mars 
+pissing 
+sexparty 
+pee 
+amy whinehouse 
+amy winehouse 
+moana pozzi 
+luca damiano 
+photomagico 
+dieskau 
+schuberts lieder 
+brunette 
+search files, youtube and liveleak 
+stephen king 
+the stand 
+hot 
+pron 
+porn 
+no country for old men 
+linkin park 
+cubase 
+shaved 
+shaved 
+shaving 
+thong 
+string 
+keng leiden 
+sneaker 
+shoes 
+feet 
+feet 
+tickling 
+tickling 
+tickling 
+tickling 
+kunst en genoegen leiden 
+tickel 
+tickeld 
+tickling 
+tickeld 
+andy pink 
+andy pink 
+ilse delange 
+ilse delange 
+kunst en genoegen leiden 
+ilse delange 
+microsoft word 
+string 
+ilse delange 
+wii rockband 
+spanked 
+spanking 
+sex 
+eva 
+lost 
+prison break 3 
+prison break 3 ita 
+office 
+diablo 
+diablo 
+rayman 
+nina senicar 
+canalis 
+canalis 
+arcuri 
+nuda 
+nude 
+nude 
+hayden panettiere 
+pulp fiction 
+milf 
+eva angelina 
+milfs 
+milf 
+big boob 
+bryce 
+supreme commander 
+saw 4 
+can touch it 
+cant 
+amgas 
+resident evil 
+resident.evil.extinction.2007 
+resident.evil.extinction 
+resident.evil. 
+resident.evil 
+residentevil 
+sex 
+neil young 
+yemen 
+stalin 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+high scholl musical 2 
+high scholl musical 2 
+high scholl musical 2 
+high scholl musical 2 
+juiced 2 ps2 
+juiced 2 ps2 
+juiced 2 ps2 
+futurama bender 
+regan anthony 
+nero 8 
+zecchino 
+porno 
+film 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the dubliners 
+room 1408 
+the dubliners 
+flogging molly 
+intervista col vampiro 
+angel 
+tera patrick 
+jenna jameson 
+nl subs 
+release therepy 
+ludacris 
+manchurian candidate 
+ludacris 
+kaspersky 
+antivirus 
+panda 
+irish 
+hilton 
+shrek terzo 
+shrek terzoita 
+bound 
+kidnapped 
+kidnapped 
+search files, youtube and liveleak 
+forced] 
+forced 
+bondage 
+movie 
+delphi 
+spartiti chitarra classica 
+delphi 
+lo spacca cuori 
+i vichinghi 
+il cappotto latuada 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+number 23 ita 
+number 23 divx 
+sonia eyes 
+search files, youtube and liveleak 
+eagels 
+wind surf 
+la terza madre 
+wind surf 
+music all 
+giochi 
+blood rayne 
+cats 
+wind surf 
+wind surf 
+wind surf 
+fleedwood mac 
+bto 
+spiderman 3 ita 
+alpinismo 
+alpinismo 
+superman returns ita 
+pirati dei caraibi ai confini del mondo 
+dancing the night away 
+dancing the night away 
+dancing the night away 
+dancing the night away 
+dancing the night away 
+dancing 
+dancing the night 
+rescue dawn 
+knocked up nl sub 
+knocked up nl sub 
+knocked up nl sub 
+knocked up nl sub 
+knocked up nl sub 
+knocked up 
+knocked up 
+arm7 
+john denver 
+jannacci 
+jannacci 
+kate holmes 
+holmes 
+i gufi 
+saw 
+horror 
+jools siouxsie 
+prison break 
+jools 
+jools 
+jools 
+silvia 
+siouxsie 
+kinderen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+movie 
+movie dutch subs 
+movie dutch subs 
+movie dutch subs 
+movie dutch subs 
+nude 
+movie dutch subs 
+movie dutch subs 
+hddvd 
+movie dutch 
+movie dutch 
+gaston en leo 
+taxi driver 
+schindler's list 
+husker du 
+pregnant 
+pink floyd 
+heroes 
+ashwin navin 
+ashwin navin 
+osx 
+osx10.4 
+osx 10.4 
+osx 10.4 
+osx 10.4 
+osx 
+mac tiger 
+nfl 
+sexy girl 
+boxing 
+boxing girls 
+bears 
+private gladiator 
+atmega 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stanford 
+stanford 
+stanford 
+yes 
+daily show 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gay 
+nakednews 
+japan 
+pro evolution soccer 
+elisa 
+vasco 
+vasco 
+vasco 
+vasco rossi 
+vasco rossi 
+.mp3 
+gianni morandi 
+codegear borland 
+borland 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+teenmodel 
+teenmodel 
+delphi 
+delphi 
+teenmodel 
+teenmodels 
+teenmodels 
+ella teenmodel 
+gta 
+genuine advantage crack 
+van morrison 
+jazz 
+gossip 
+ella teenmodel 
+van allen 
+van hallen 
+van hallen 
+doors 
+cry 
+session 
+meet 
+models 
+pamela 
+troia 
+whore 
+ass 
+assassin creed for pc 
+assassin creed 
+mp3 
+adobe all in one 
+adobe all in one 
+ashampoo all in one 
+davilex 
+davilex 
+school 
+alien 
+scooter 
+scooter 
+school 
+anna kurnikov 
+search files, youtube and liveleak 
+delphi2005 
+delphi 
+poweriso 3.8 
+poweriso 3.8 
+camel sex 
+delphi 
+rene froger 
+pump op the bass 
+pump up the bass 
+nope is dope 
+nijlpaard sex 
+shemal sex 
+dildo sex 
+music 
+ncis 5 
+tony takitani 
+tony takitani 
+tony takitani 
+tony takitani 
+trubble 
+truble 
+clear 
+fire 
+giochi pc 
+david bowie 
+trade 
+trade 
+queen's of the stone age 
+queen's of the stoneage 
+queen's 
+queen 
+slayer 
+metallica 
+drupal 
+*.mp3 
+negramaro 
+pulp 
+earlier 
+head 
+tortas 
+wendy 
+king singers 
+zarautz 
+james galway 
+macabre 
+vocal 
+a capella 
+montezuma 
+top5 
+search all files 
+26.11.2007 
+26.11.2007 
+top 5 
+globe 2006 
+top 10 
+ricky rubio 
+olimpiade patinador 
+olimpiadas patinaje 
+olimpiade patinaggio 
+lazy town 
+top 5 
+nudes 
+milf 
+history channel 
+yo yo ma brazil 
+sex 
+tragedy 
+gossip 
+sonic youth 
+sonic 
+soul 
+bling 
+blues 
+gary moore 
+more 
+bio 
+windows vista nrg 
+emuolator 
+emu 
+windows vista 
+emu 
+emu 
+bling fling trick 
+emu 
+sex 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+wolfetones 
+wolfetones 
+the wolfe tones 
+tool 
+zodiac 
+earth 
+zwarte piet 
+sinterklaasliedjes 
+schindler's list 
+sinterklaas 
+bling fling trick 
+windows vista 
+game 
+sexy games 
+sexy game 
+xxx game 
+xxx game 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ita 
+xxx game 
+avg 7.5 ita 
+sonic youth 
+avg 
+anime ita 
+i griffin ita 
+heroes 
+heroes ita 
+phil collins 
+search files, youtube and liveleak 
+selen 
+giochi psx2 
+giochi psx2 
+giochi psx2 
+giochi psx2 
+top 10 
+giochi 
+giochi 
+giochi 
+dvd 
+film ita 
+film ita completi 
+champions 
+film ita dvd 
+messi gol 
+messi maradonna 
+divx ita 
+la terza madre 
+un matrimonio alle bahamas 
+rihanna 
+matrimonio alle bahamas 
+la terza madre 
+encatada 
+film ita 
+valeria marini 
+jenna 
+maria 
+maria 
+sap 
+search files, youtube and liveleak 
+hyderabad 
+hyderabad sex 
+hyderabad party 
+trace 
+win trace 
+roxy music 
+mistress 
+domina 
+trampling 
+femdom 
+female 
+female piss 
+golden shower 
+film ita 
+film ita 
+film ita 
+film ita 
+film ita 
+film ita 
+film ita 
+dragon 
+dragonball 
+adamo 
+adamo 
+antivirus 
+high heels 
+high heels 
+stiletto 
+stiletto sex 
+sexy heels 
+sexy heels 
+per un pugno di dollari 
+sum 41 
+sum 41 
+vasco 
+*.cdt 
+lesbian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porn 
+ero 
+la leggenda del lago 
+non son degno di te 
+oblivion 
+per un pugno di dollari 
+per un pugno di dollari 
+per un pugno di dollari 
+oblivion 
+nicola di bari 
+dieci comandamenti 
+dieci comandamenti 
+la guerra dei ross 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 
+spederman 
+spederman 
+spederman 
+spederman 
+oblivion 
+oblivion 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+nel sole 
+massimo di cataldo 
+massimo di cataldo 
+massimo di cataldo 
+giacomo rondinella 
+giacomo rondinella 
+toto 
+toto 
+toto 
+toto 
+toto 
+gianni morandi 
+gianni morandi 
+gianni morandi 
+gianni morandi 
+*.avi 
+spanish 
+film ita 
+will smith 
+control freak 
+will smith 
+control freak 
+control freak 
+control freak 
+control freak 
+ready steady go 
+film 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+film 
+scrubs 
+scrubs season 5 episode 21 
+scrubs s5 e21 
+scrubs 5x21 
+bowie 
+bowie 
+divx ita 2007 
+dj lory 
+film kim ki duk 
+film francis ford coppola 
+film francis ford coppola 
+asian 
+film ita 
+zombie nacion 
+mozart 
+zombie nacion 
+a 
+red zone 
+red zone 
+red zone 
+red zone 
+moko 
+film 
+hentai 
+adam proll – hummel 
+adam proll – hummel 
+hummel 
+hummel 
+afroman 
+fica 
+hentai 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+metallica 
+canzoni italiane 
+canzoni italiane 
+dvdrip italian 2007 
+red zone 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+mamma mp3 
+mamma mp3 
+mamma mp3 
+ufo 
+girl of next door 
+costantin 
+benny jnn 
+naruto 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+flauto 
+flauto 
+jeepers creepers 
+fisarmonica 
+fisarmonica 
+mini 
+erotic 
+sexy 
+l esorcista 
+l'esorcista 
+nude 
+l esorcista film 
+film drammatico 
+vertigo 
+vertigo alfred hitchcock 
+film 
+film 
+film ita 
+naked 
+film 
+search files, youtube and liveleak 
+la terra madre 
+televisione 
+televisione 
+televisione 
+search files, youtube and liveleak 
+a guide to the penis for men and women 
+programmi utili 
+programmi 
+antivirus 
+antivirus +crak 
+antivirus +crak 
+commedia 
+commedia 
+commedia 
+animali 
+animali 
+sex 
+sex 
+mp3 
+massimo ranieri 
+albano 
+flauto pan 
+flauto pan 
+tutto 
+mp3 ita 
+mp3 
+italia 
+italia 
+film wester 
+film wester 
+film western 
+film western 
+g-data 
+gdata 
+tinto brass 
+chavez 
+brigitta bulgari 
+chavez 
+eva henger 
+kaspersky 
+gdata 
+antiviruskit 
+avk 
+f-secure 
+jeepers creepers 
+asian 
+search files, youtube and liveleak 
+os intocaveis dublado 
+tropa de elite 
+orgia 
+witches 
+witches 
+witches 
+witches 
+witches 
+witches 
+witches 
+witches 
+resident evil 
+orgy 
+kat 
+cradle of filth 
+cradle of filth 
+cradle of filth 
+avp2 
+avp2 
+avp2 
+enchanted 
+er medici in prima linea 
+endgame 
+star trek 
+house hunters 
+hgtv 
+leopard 
+foot 
+feet 
+nipples 
+legs 
+thigh 
+thighs 
+thighs 
+tongues 
+kissing 
+kissing 
+mature 
+over 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+video porno 
+video porno 
+video porno 
+j neto 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx files 
+xxx 
+xxx 
+xxx 
+teens 
+teens 
+teens 
+preteens 
+adult 
+schwester 
+schwester 
+schwester 
+sister 
+porn 
+teens 
+teens 
+teens porn 
+preteens 
+adult 
+porn teens 
+teen sex 
+fuking video 
+www.yahoo.com 
+teens picture gallery 
+adult teen movies 
+300 
+rigas sargi 
+the mist 
+i am legend 
+love my way 
+template monster 
+guta 
+recordnow 
+nero 
+van 
+hindi 
+hindi 
+hindi 
+hindi 
+hindi 
+hindi 
+english 
+english 
+english 
+english 
+methini 
+pat 
+jam session 
+session 
+playboy 
+jazz 
+xp sp2 
+kittie 
+kittie 
+planet earth 
+kombi 
+kombi 
+bergkamp 
+search files, youtube and liveleak 
+moggl 
+playboy 
+u2 
+akira 
+akira 
+u2 
+u2 
+akira 
+ghost in the shell 
+paris 
+.mov 
+groningen 
+shrek3.pl 
+shrektrzeci 
+shrek 
+mp3 
+ratatuj 
+search files, youtube and liveleak 
+ratatouille 
+tioz 
+semantic web 
+ratatuj 
+bdsm 
+ratatopuille 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+carmen luvana 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+matthijs kuhl 
+matthijs kuhl 
+matthijs kuhl 
+matthijs kuhl 
+kuhl 
+kuhl 
+rolfes 
+acoustic guitar 
+blackened 
+blackened.mp3 
+mature 
+shol 
+s6 
+superbad 
+pan's labyrinth 
+
+search files, youtube and liveleak 
+nursery rhymes 
+bonzo dog 
+star trek 
+chavez 
+touhou 
+stuffit standard 
+stuffit compress 
+zip 
+compress files 
+nadine jansen 
+big breasts 
+r. kelly 
+r. kelly 
+r. kelly 
+transformers 
+reggae 
+the nun 
+the saint 
+huge tits 
+huge tits 
+huge tits 
+50 cent curtis 
+osx 
+the starlite desperation 
+entourage season 4 
+pornoraphy 
+pornography 
+sex 
+
+
+
+pornagraphy 
+porngraphy 
+porngraphy 
+
+soliloquists of sound 
+search files, youtube and liveleak 
+top gear 
+simpsons 
+bourne ultimatum 
+bourne ultimatum 
+resident evil extinction 
+medal of honour airborne 
+handjob 
+handjob 
+the kingdom 
+buena vista 
+g3 
+steve vai 
+die bedrohung 
+red hot chili peppers 
+red hot chili peppers 
+
+k3 
+2063 
+house german 
+scrubs german 
+eva angelina 
+blowjob 
+need for speed 
+windows vista 
+bangbus 
+divx 
+amateur 
+cum 
+divx 
+search files, youtube and liveleak 
+over the top 
+fuck vag fuck anus 
+sinbad 
+south park 
+mongrain 
+mongrain 
+friday 
+ozzy 
+bioshock 
+barbara 
+fun 
+citty 
+kitty 
+stalingrad 
+colonial home 
+colonial house 
+three days grace 
+three days grace 
+
+gay 
+relax 
+star trek 
+star trek voyarger 
+star trek voyager 
+gay 
+holly day 
+ron paul 
+alex jones 
+alex jones 9-4-07 
+brandi starr 
+hilary scott 
+soliloquists 
+taylor rain 
+yellow scab 
+yellow scab 
+the wives 
+jessica simpson 
+the wives 
+edward scissorhands 
+danny elfman 
+poison 
+metallica 
+the last of the mohicans 
+
+poison 
+metallica 
+metallica 
+metallica 
+
+300 
+gaber 
+
+rosetta 
+entourage 
+entourage 4 
+forced 
+lolita 
+top gear 
+ita 
+the three musketeers 
+pc 
+dusty rhodes 
+ita 
+carbon 
+dusty rhodes westminster 
+xxx 
+trieste 
+boobs 
+porn 
+bergamas 
+nordio 
+justin warfield 
+ween 
+lacucaracha 
+sammi 
+gay 
+little bri 
+little britain 
+janey do 
+high heels 
+search files, youtube and liveleak 
+humair 
+guys and dolls 
+desenho chaves 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the office 
+jenna 
+dita 
+dita 
+dita 
+youtube 
+youtube 
+xxx 
+hairypotter 
+high school musical 2 
+hannah montana 
+cures' 
+cures 
+the cure 
+311 
+greek 
+greek.s 
+greek 5 
+greek 6 
+greek 7 
+greek 8 
+greek 9 
+greek 10 
+greek 11 
+battlestar gallactica 
+battlestar galactica 
+game 
+tabula rasa 
+famaly guy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rob thomas 
+rob thomas santana 
+rob thomas 
+daily show 
+bob dylan 
+weired al 
+weird al 
+rubberfunk 
+dead parrot 
+rescue me s0e4e13 
+rescue me s0e4e13 
+rescue me s0e4e13 
+rescue me s0e4e13 
+my first sex teacher 
+my first sex teacher 
+avi 
+divx 
+movies 
+simpons dvdscr 
+li-ann pop the cherry 
+li-ann pop the cherry 
+nero 
+bioshock 
+ftv 
+bioshock 
+fifa 
+tuneup advisor 
+tune-up advisor 
+advisor 
+regcure 
+search files, youtube and liveleak 
+euro sex 
+
+divx ita 
+divx 
+divx ita 
+grillo 
+florin salem 
+it 
+ita 
+lost 
+lost it 
+simpsons 
+futurama 
+rob thomas 
+intervideo windvd 8 
+cradle of filth 
+ijzeren reus 
+
+juno 
+juno 
+pro7 
+ärzte 
+mac 
+osx 86 
+ned1 
+ned3 
+lama's 
+lama's 
+lama's 
+ebony 
+
+nina 
+nina mercedez 
+david gilmour 
+search files, youtube and liveleak 
+private 
+xxx 
+die ärzte 
+hans zimmer 
+zimmer 
+robot chicken 
+axxo 
+pthc 
+
+pthc 
+pedo 
+creampie 
+hentai 
+preteen 
+webcam 
+young 
+lsm 
+ls magazine 
+lsm 
+axxo 
+sandra 
+sandra teen 
+vcl 
+vclvlc 
+vlc 
+axxofiles, youtube and liveleak 
+elvis 
+joe cocker 
+morecambe wise 
+twenty four 
+twenty four season 6 
+twenty four season 6 
+bicep 
+bicep 
+bicep 
+bon jovi 
+bicep 
+make a memory 
+kelly clarkson 
+feist 
+isomagic 
+alcohol 
+teenpinkvideo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+krav maga 
+wing chun 
+muscle cam 
+remind me 
+muscle cam 
+chris cam 
+mr roboto 
+hung huge 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+chapelle oprah 
+chapelle oprah interview 
+chapelle oprah 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hung huge 
+the brave one 
+war 
+ufc 
+sqirt 
+squirt 
+porn 
+porn 
+business 
+conspiracy 
+conspiracy 
+conspiracy 
+
+a 
+a 
+wing chun 
+300 
+die 
+heavenly 
+perquisite 
+kraftwerk 
+kraftwerk 
+underworld 
+underworld 
+underworld 
+underworld 
+underworld 
+underworld 
+kraftwerk 
+underworld 
+tangerine dream 
+minimal music 
+oceans thiteen 
+oceans thirteen 
+fist 
+vlc media player 
+chrictina branco 
+christina branco 
+christina branco blof 
+lameirinhas 
+lameirinhas 
+lameirinhas 
+britney spears 
+dead parrot 
+angelina jolie 
+dead parrot 
+paris hilton 
+search files, youtube and liveleak 
+plyboy 
+cristina aguilera 
+a4proxy 
+proxyway 
+ncis 
+dead parrot 
+hidemyip 
+hide ip 
+house 
+smallville 
+smallville 
+ghostsurf 
+heroes 
+sex video 
+dead parrot 
+lesbian 
+shana 
\83ã¤ã\83\86 
+proxyway 
+bitcomet 
+economic concepts 
+economic concepts 
+economic concepts 
+search files, youtube and liveleak 
+streettivo 
+streettivo 
+cricket 
+cricket 
+cricket 
+cricket 
+pakistan 
+house 
+ita 
+italy 
+discografia 
+finley 
+ministry of sound 
+sound 
+sound 
+sound 
+prison break 
+ebook 
+dexter 
+simpsons movie 
+take that 
+take 
+dexter 
+the queen 
+gympl 
+gina wild 
+gina wild 
+the wind that shakes 
+naked 
+office 
+naughty 
+4400 
+mathematica 
+prison break 
+prison break season 3 
+silvia saint 
+mathematica 
+mathematica 
+pornstar 
+pornstar 
+dover 
+kitzeln 
+car 
+spaceship 
+media player 
+science fiction 
+weeds season 3 
+weeds 
+kelly clarkson 
+mets 
+incest 
+taboo 
+taboo 
+sex 
+
+
+taboo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fun 
+fun 
+fun 
+lustiges 
+lustiges 
+the 11th hour 
+search files, youtube and liveleak 
+dead parrot 
+dead parrot 
+dead parrot 
+proxyway 
+nero 8 
+dead parrot 
+search files, youtube and liveleak 
+wildhearts 
+nero 8 
+police 
+kean 
+captain sensible 
+pil 
+public image limited 
+24 
+die 
+24 
+24 3 
+idiocracy 
+puppet mastaz 
+southpark 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mediadefender 
+dexter 
+test 
+mediadefender 
+sensual 
+prank 
+prank 
+mediadefender 
+
+mediadefender 
+mediadefender 
+mediadefender 
+axxo 
+the one 
+"the one" 
+the one jet li 
+primary colors 
+hudgens 
+hudgens 
+hindi 
+twenty four 
+24 s06e09 
+24 s06e10 
+24 s06e11 
+24 s06e13 
+24 s06e22 
+24 s06e23 
+as 101 de almada 
+sa leao 
+southpark 
+orn 
+porn 
+asian 
+asian 
+teen 
+korn 
+korn 
+korn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+a 
+woodcock 
+woodcock 
+woodcock 
+woodcock 
+balls of fury 
+504 boyz i can tell 
+upskirt 
+wetting 
+panties 
+panties 
+panty 
+peeing 
+upskirt 
+armageddon empires 
+squirt 
+wet pant 
+wet 
+bobbi 
+
+peep 
+fantasy wars 
+fantasy wars 
+
+private 
+dead parrot 
+celeb 
+dead parrot 
+underage 
+marion raven 
+dead parrot 
+http://www.youtube.com/watch?v=bn8ofpc_aq8 
+nixon resignation 
+nixon resignation 
+
+mediadefender 
+mediadefender 
+ladytron 
+tits 
+tits 
+softcore 
+woman 
+women 
+amateur 
+wu tang 
+rza 
+vai 
+fucking 
+funn 
+simpsons 
+poo 
+daria 
+daria 
+daria 
+daria 
+katie fey 
+superman 
+swinger 
+katie fey 
+katie fey 
+wächter der nacht 
+fucked 
+nochnoi dozor 
+the bourne ultimatum 
+teens 
+teens nudity 
+william orbit 
+ozric tentacles 
+transformers 
+osx86 
+
+chinese teens 
+eureka 
+lacuna coil 
+sucks 
+adult sins 
+the bourne ultimatum 
+sucks cock 
+search files, youtube and liveleak 
+paris hilton 
+ivanovic 
+gay 
+gay 
+
+wu tang 
+blue states 
+
+orgy 
+search files, youtube and liveleak 
+two 
+two 
+two 
+brand new 
+bioshock 
+kreatief 
+blowjob 
+sex 
+brand new 
+ricj 
+rock 
+rock your body 
+alternative 
+scrubs 
+scrubs 
+tiesto 2007 
+american snuff movie 
+the bourne ultimatum 
+i wish 
+rock your body 
+wantens 
+tone loc wild thing 
+reckless 
+bodywork 
+scrubs 
+david lee roth 
+scrubs 4 
+filmes portugueses 
+patio das cantigas 
+take on me 
+jamiroquai 
+jamiroquai little l 
+need for speed carbon 
+jenna 
+tom 
+anal 
+naruto 
+naruto 
+ubuntu 
+eddie izzard 
+footbag 
+stargate 
+leave britney alone 
+adobe photoshop 
+stickam 
+cam girls 
+linux 
+prey 
+prey 
+yiff 
+the beatles i need you 
+adobe 
+bleach 
+finale 
+finale 2008 
+one peice 
+one piece 
+text editor 
+tycoon 
+text editor 
+resident evil 
+ditor 
+text 
+beryl 
+
+
+blade runner 
+adobe 
+transformers 
+
+phish 
+kiddy porn 
+r@ygold 
+amateur 
+indian 
+indian 
+indian 
+indian 
+desi 
+desi 
+indian 
+desi 
+desi 
+heroes 
+phish 
+phish 
+
+phish 
+olivia monijca 
+desi 
+lesbian 
+olivia mojica 
+amateur 
+enemy territory 
+indian 
+buerets 
+arab 
+arab 
+knoppix 
+lela 
+dude where's my car 
+24 
+vanessa hudgens 
+death 
+don't tase me bro 
+tintin 
+tintin dvd 
+search files, youtube and liveleak 
+
+deadparrot 
+dead parrot 
+deadparrot 
+dead parrot 
+dead parrot 
+harry 
+dead parrot 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+java 
+thinking in java 
+pavarotti 
+thinking in java 
+search files, youtube and liveleak 
+traviata 
+traviata pavarotti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+frangoulis 
+28 settimane dopo 
+28 settimane dopo 
+28 settimane dopo 
+28 settimane dopo 
+28 settimane dopo 
+search files, youtube and liveleak 
+28 settimane dopo 
+fall out boy 
+one piece vegapunk 
+handjob 
+handjob 
+handjob 
+handjob 
+grave digger 
+overkill 
+gary moore 
+eva angelina 
+weeds 
+lingerie 
+aurora snow 
+leandro leonardo 
+akon 
+karma police 
+rabbit in your headlights 
+daily show 
+colbert report 
+upskirt 
+abba 
+dawson miller 
+abba 
+abba 
+abba 
+abba 
+abba 
+abba 
+upskirt 
+simpsons movie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anal 
+rock 
+rock 
+metal 
+metal 
+pop 
+pop 
+pop 
+pop 
+olodum 
+upskirt 
+ivete sangalo 
+olodum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+muscle llatinos boy 
+atalanta 
+muscle latin boy 
+muscle boy 
+upskirt 
+muscle boy video 
+muscle boy video 
+310 to yuma 
+oasis 
+oasis 
+dvd 
+film 
+film 
+film 
+film 
+adobe cs3 ita 
+simple life 
+tomtom 
+linear programing 
+h.264 
+survivor 
+fetisch 
+fetisch 
+funny 
+dragon ball 
+linkin park 
+search files, youtube and liveleak 
+milf 
+italian 
+linkin park 
+dragon ball 
+dragon ball 
+dragon ball 
+gackt 
+gackt utaban 
+trailer 
+pantyhose 
+the clarks 
+stardust 
+unreal for maxc 
+unreal for max 
+unreal for mac 
+unreal for mac 
+unreal tournament 
+unreal tournament apple 
+unreal tournament mac 
+unreal tournament 
+far cry 
+madonna 
+britney spears 
+voetbal 
+search files, youtube and liveleak 
+pdf 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+prey 
+psp metal gear solid 
+unreal tournament apple 
+unreal tournament 
+goku 
+lsbian 
+doom 3 
+search files, youtube and liveleak 
+doom 3 mac 
+lesbian 
+apple doom 3 
+goku 
+goku 
+apple unreal 
+eminem 
+counter strike 
+search files, youtube and liveleak 
+counter strike os x 
+unreal tournament os x 
+unreal tournament for apple 
+unreal tournament for mac 
+umbrella rihanna 
+masturbation 
+rihanna 
+as 
+a 
+briana banks 
+banks 
+webcam 
+webcam 
+norton 
+norton 
+norton 
+norton 
+norton 
+http://www.freerapidaccount.com/free/?r=1057 
+http://www.freerapidaccount.com/free/?r=1057 
+http://www.freerapidaccount.com/free/?r=1057 
+fanny lu 
+fanny lu 
+me enamora - juanes 
+me enamora 
+juanes 
+juanes 
+stone age 
+search files, youtube and liveleak 
+pjk 
+microsoft windows vista 
+pjk 
+pjk rbv 
+horses 
+horses 
+pjk 
+pjk 
+pjk 
+vista 
+incest 
+incest 
+armin 
+armin the great escape 
+ps2 
+search files, youtube and liveleak 
+das streben nach glück 
+eagles of death metal 
+
+
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cream 
+avatar 
+avatar 
+avatar 
+mediadefender 
+search files, youtube and liveleak 
+serch dragon ball 
+serch dragon ball 
+aniston 
+courtney cox 
+http://videolog.uol.com.br/video?205085 
+naruto dublado 
+puffy 
+naruto dublado 
+puffies 
+voab 
+dragon ball 
+naaktstrand 
+videos legais.tv 
+videos legais.tv 
+neger 
+negerlul 
+penis 
+filmes dublados 
+filmes dublados 
+filmes dublados 
+filmes 
+big breasts 
+small breasts 
+rush 
+radiohead 
+fanny lu 
+van halen 
+coheed and cambria no world for tomorrow 
+no world for tomorrow 
+coheed and cambria 
+coheed and cambria running free 
+shemale 
+mother superior 
+coheed mother superior 
+rangers v stuttgart 
+coheed mother superior 
+rangers v stuttgart 
+lt 
+rus 
+[lt] 
+ita 
+boeijen 
+kite 
+manowar 
+meat loaf 
+boeijen 
+boeijen 
+pino daniele 
+leather boots 
+.net 
+.net 
+.net 
+
+evening gloves 
+
+
+azores 
+azores 
+azores 
+azores 
+azores 
+angra 
+ps2 
+dvd 
+dvd 
+dvd 
+blanzing 
+terceira 
+i 
+pico 
+pico 
+genesis 
+britney 
+heroes 
+heroes.s01e3 
+heroes.s01 
+heroes 
+azores 
+abby 
+lover 
+piss 
+compilation 
+nude 
+nudist 
+fkk 
+azores 
+scat 
+xxx 
+britney 
+britney 
+puke 
+elephants dream 
+modul 8 
+bleach 
+onepiece 
+one piece 
+one piece 
+one piece 324 
+one piece 
+l word 
+the l word 
+core rhythm 
+musical 
+shane carmen 
+shane carmen 
+l word 
+you tube 
+you tube 
+search files, youtube and liveleak 
+a 
+oral 
+big 
+big 
+big 
+the l word 
+big 
+big 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+avril lavigne 
+the l word italiano 
+the l word 
+l word 
+christina aguilera 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+l word 
+madonna 
+tiziano ferro 
+madonna 
+audioslave 
+xutos 
+twain 
+madonna 
+janet jackson 
+twain 
+bob 
+divx 
+bob 
+l word, youtube and liveleak 
+, youtube and liveleak 
+hairspray 
+rugby 
+eva mendes 
+photoshop 
+shabal 
+panda antivirus 
+virtumonde 
+gmail 
+preeteens 
+lost 04x00 
+lost 
+lost season 4 
+thursday 
+fist 
+faust 
+emma 
+chopin 
+macgamut 
+handicapt 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mandarin 
+downtown 
+downtown matsumoto 
+foo 
+big oppa 
+matsumoto 
+spiderman 3 
+matsumoto comedy 
+bourne 
+search files, youtube and liveleak 
+foo fighters 
+suse 
+search files, youtube and liveleak 
+korea 
+unreal tournament 
+unreal tournament 
+unreal tournament 
+unreal tournament 
+fifa 08 
+korea 
+domino 
+thai 
+hardcore 
+java 
+10 years after 
+10 years after 
+10 years after 
+10 years after 
+10 years after 
+10 years after 
+10 years after 
+10 years after 
+10 years after 
+safety 
+safety 
+young teen 
+search files, youtube and liveleak 
+porn 
+californication 
+weeds 
+weeds 
+jenna 
+jameson 
+blowjob 
+samantha 
+def leppard 
+lee aaron 
+thunder dirty love 
+charlie 
+darrian 
+red blue 
+ozzy 
+dexter 
+lavoie 
+halo 
+hazzard 
+lynda carter 
+baywatch 
+heroes season 
+hee haw 
+mandrell 
+alba 
+derek jeter 
+boy edgar 
+yankees 
+beyonce 
+shania 
+mcphee 
+kiss 
+lingerie 
+lynch mob river of love 
+charlie 
+charlie blowjob 
+football 
+playboy 
+gigi d'alessio 
+porn 
+naughty america 
+dutch 
+dutch tvseries 
+bundesliga 
+public tv 
+mtv 
+public tv 
+public tv 
+kbs 
+britney 
+britney 
+spanish 
+search files, youtube and liveleak 
+pc iso 
+kween 
+jeeg 
+autobahn 
+dhl 
+oktoberfest 
+dildos 
+dildos 
+dildos 
+dildos 
+dildos 
+dildos 
+nero 
+xxx 
+xxx 
+akon 
+mulatos 
+xxx 
+ganzta zoli 
+szakasz hun 
+szakasz hun 
+hun 
+monsterfotzen 
+akon 
+sperma 
+ggg 
+bed boys ii 
+far cry 2 
+far cry 2 game 
+search files, youtube and liveleak 
+ficken 
+cubase 
+tenniphil 
+xxxtorrent 
+xxxtorrents 
+test 
+korea 
+korea 
+korea 
+spiderman 
+spiderman 
+panda antivirus 2008 
+wrc 
+xxx 
+panda antivirus 2007 
+mature 
+koea 
+soccer 
+korea 
+search files, youtube and liveleak 
+guitarsynth 
+guitarsynth 
+guitar synth 
+guitar synth 
+these are the facts 
+korea 
+korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+freundin 
+teen 
+acdsee 
+vs.php 
+noci 
+search all files 
+vs.php 
+vs.php 
+lomu 
+korea 
+php 
+vb.net 
+vb.net 
+vb.net 
+vb.net 
+vb.net 
+net 
+rape 
+vs.php 
+grafica 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+webserver 
+jolie 
+jolie 
+batman 
+salsa 
+merenghe 
+merengue 
+jolie 
+moto gp 2007 ps2 
+tomtom 
+fwyh 
+black 
+avi 
+bioshock 
+nas 
+nas my way 
+one mic 
+nas one mic 
+vasco 
+zucchero 
+zucchero 
+zucchero 
+ebony 
+american choppers 
+mothers 
+mothers 
+50 cent 
+harry 
+rugby 
+milf 
+naruto 
+war 
+dalai lama 
+stephen colbert 
+dalla 
+luccio dalla 
+psp 
+psp 
+wwe 
+weeds 
+medal of honor 
+search files, youtube and liveleak 
+divx ita 
+bikini 
+16 year 
+wallpaper 
+pink floyd 
+rape 
+surf 
+film 
+nude 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porn 
+anime 
+hentai 
+razor1991 
+razor1911 
+anime 
+heroes 
+heroess01 
+heroess01 
+heroes.s01 
+prison break 
+galactica 
+viv thomas 
+invasion 
+prison break 
+johnnie to election 
+hide 
+hard video 
+cricket 
+movie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trinspotting 
+search files, youtube and liveleak 
+kank 
+hum tum 
+pierino 
+hum tum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+movie 
+pierino 
+lomu 
+lomu 
+pierino in divx 
+divx ita 
+close combat 
+divx ita 
+divx ita 
+das streben nach glück 
+ita film 
+movie 
+movie 
+scopata 
+red hot chili peppers 
+red hot chili peppers 
+red hot chili peppers 
+search files, youtube and liveleak 
+debbie does dallas 
+amateur 
+amateur 
+amatoriale 
+amatoriale 
+thai vcd 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+spiaggia 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+amteur 
+foo fighters 
+amateur 
+foo fighter 
+britney spears 
+topa 
+avatar 
+hypnotic 
+german television 
+luci rosse 
+nude 
+search files, youtube and liveleak 
+avi to dvd 
+dvd 
+watch 
+nuda 
+korea 
+search all files 
+search all files 
+search all files 
+عÙุ± Ø§Ø¯Ù�¨ 
+england samoa rugby 
+kctvjeju 
+starwars 1 
+proxy way 
+anonimity 
+proxy 
+film porno 
+amy winehouse 
+jannes 
+69 
+jannes 
+sex 
+jannes 
+seredova 
+heroes 
+jannes 
+porno 
+proxyway 
+search files, youtube and liveleak 
+korea 
+korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+korea 
+dildo 
+heroes 
+insertion 
+divx ita 
+macromedia 
+
+eureka 
+latex 
+pelican 
+shrek 3 
+briana 
+latex balloon 
+mature 
+blow jobs 
+mature 
+mature 
+milf 
+nero8 
+nero8 software 
+nero8 software 
+divx 
+divx ita 
+the it crowd 
+the.it.crowd 
+selen 
+selen 
+eva henger 
+eva henger 
+pirati dei caraibi 
+eva henger 
+pirati dei caraibi 
+tribler 
+tribler 
+the.it.crowd s02e01 
+the.it.crowd s02e01 
+eva henger 
+henger 
+erlang 
+sandoval 
+sandoval 
+arturo sandoval 
+arturo sandoval 
+search files, youtube and liveleak 
+radio recorders 
+radio recorders 
+psp 
+fantozzi 
+007 
+fantastici 4 
+fantastici 4 
+fantastici 4 
+pozzeto 
+pozzetto 
+the it crowd 
+renato pozzetto 
+winrar 
+the it crowd 
+win 4 
+win german 
+winrar 
+winrar 4 
+german 
+house german 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+sadomaso 
+search files, youtube and liveleak 
+osama bin laden 
+bondage 
+leo can dive 
+tnt jackson 
+halo 
+tnt jackson 
+andre van duin 
+blowjob 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+corset 
+unreal germa 
+unreal german 
+unreal 
+inflatable 
+unreal 
+inflatable latex 
+1408 
+guitrasynth 
+guitarsynth 
+guitarsynth 
+boots 
+fetish 
+mina 
+m irc 
+mirc 
+du super controler 
+airbag test 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sony mhc 5900 
+brandweer 
+sony mhc5900 
+sony mhc5900 
+brandweer 
+brandweer 
+search files, youtube and liveleak 
+love 
+walker brothers 
+brandweer 
+dion 
+brandweer 
+.rar 
+marta sui tubi 
+marta sui tubi 
+route 66 
+sampletank 
+sampletank 
+sampletank 
+brandbestrijding 
+search files, youtube and liveleak 
+sampletank 
+sampletank 
+frankie goes to hollywood 
+sezen aksu 
+rolling stones 
+50 cent 
+war 2007 
+phalanx 
+phalanx 
+phalanx test 
+phalanx test 
+top 40 
+kardashian 
+hilton 
+claymore 
+anime 
+linux 
+heroes 
+search files, youtube and liveleak 
+weeds 
+lexington steele 
+breakfast 
+breakfast at tiffany's 
+transformer 
+toto 
+totò 
+trance 
+trance 
+trance 
+pamela 
+sm 
+bdsm 
+search files, youtube and liveleak 
+ina deter 
+cutting edge 
+cutting edge 
+moterhead 
+motorhead 
+aman 
+atlantis 
+aman 
+aman 
+atlantis 
+stargate 
+stargate atlantis 
+acronis 
+search files, youtube and liveleak 
+acronis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+norton 
+britney spears 
+inzest 
+inzest 
+inzest 
+gay 
+the way i are 
+super controller 
+house german 
+scubs 
+scrubs 
+stargate 
+candyman 
+star trek german 
+candyman 
+candyman 
+no te pido la luna 
+justin timberlake 
+sexy back 
+sexy back 
+sexy back 
+spice girls 
+chakaron 
+i see girls 
+gwen stefani 
+search files, youtube and liveleak 
+borsato 
+borsato 
+avril lavigne 
+pump it 
+belinda 
+superbad 
+search files, youtube and liveleak 
+ggg 
+utorrent 
+utorrent 
+xxx 
+anal 
+deutsch 
+anal 
+prison break 
+prison break 
+suck 
+xp 
+vista 
+heroes 
+hot 
+crysis 
+linda thompson 
+whisky bob 
+kierra kiki 
+whisky bob 
+chopin 
+knocked up 
+jesse james 
+jesse james robert ford 
+fuck 
+planet terror 
+kim holland 
+chopin 
+fisting 
+bonn 
+dennis deyoung 
+nutten 
+huren bonn 
+huren 
+straßemstricj 
+straßemstrich 
+straßenstrich 
+telefonsex 
+lolits 
+lolita 
+kindfrau 
+zierlich 
+hot strip 
+morocco 
+shakira 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+bjsex 
+the mars volta 
+ratatouille 
+spiderman3 
+spiderman 3 
+axxo 
+electric masada 
+german 
+xxx 
+search files, youtube and liveleak 
+nuance 
+half life 
+battlestar galactica 
+private 
+divx-ita 
+maradona 
+search files, youtube and liveleak 
+pirates 
+serie a 
+inter 
+udinese 
+virtual clone drive 
+football 
+slysoft 
+soccer 
+arsenal 
+pirati 
+search files, youtube and liveleak 
+keith urban 
+keith urban 
+keith urban 
+private video 
+keith urban 
+leann rimes 
+brad paisley 
+cs3 
+alan jacksom 
+alam jackson 
+alan jackson 
+alan jackson 
+alan jackson 
+ndesign 
+indesign 
+dance 
+dance 
+dance 
+private 
+
+bbw 
+search files, youtube and liveleak 
+nfl 
+gay porn 
+steelers vs fortyniners 
+hard eight 
+310 to yuma 
+310 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+310 to yuma 
+balls of fury 
+metalocalypse 
+search files, youtube and liveleak 
+rubber soul 
+ebony 
+ebony 
+bang bros 
+toast 
+toast crack 
+pthc 
+pedo 
+kacey 
+illegal teen 
+kecey 
+kacey 
+ableton 
+balls of fury 
+balls of fury 
+
+
+divx 
+divx 
+divx 
+divx 
+terrohrtom 
+bibi blocksberg 
+footjob 
+the maitrix 
+the maitrix 
+microsoft office 2007 
+zscudetto 2007 
+scudetto 2007 
+scudetto 2007 
+scudetto 
+scudetto 
+scudetto 
+scudetto2007 
+scudetto 2007 
+search files, youtube and liveleak 
+simpson 
+simpson 
+simpson 
+casablanca 
+casablanca 
+casablanca 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porno 
+amatoriali 
+ramazzotti 
+miguel bose 
+search files, youtube and liveleak 
+o'riordan 
+sigmund freud 
+sigmund freud 
+sigmund freud 
+shakira 
+microsoft front page 
+microsoft front page 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+jennifer lopez 
+jennifer lopez 
+microsoft front page 2007 
+harry potter order of the pheonix 
+search files, youtube and liveleak 
+nicola cancedda 
+schrek 3 
+vasco rossi 
+ligabue 
+elisa 
+bawdy boys 
+sevp 
+dragon wars 
+mr woodcock 
+axxo 
+hed kandi 
+sevp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ratatouille 
+ratatouille german 
+ratatouille 
+harry potter german 
+harry potter und der orden des phönix 
+s.e.v.p. 
+search files, youtube and liveleak 
+indiscreet passions 
+grand prix 
+krezip pinkpop 
+filme 
+krezip pinkpop 
+krezip pinkpop 
+pinkpop krezip 
+ubuntu 
+ugly betty 
+film 
+muzika 
+muzika 
+indiscreet passions 
+indiscreet passions 
+gay 
+gay 
+anal 
+dragon ball z 
+search files, youtube and liveleak 
+indian beauties 
+indian beauties 
+pregnant 
+r@ygold 
+93dvdrip 
+smart 
+high school musical 2 
+united dvdrip 
+love 
+naked 
+porn 
+south park 
+shroud of turin 
+rally 
+rally 
+rally 
+rally 
+sherk 
+sherk 3 
+sherk 3 
+matrix 
+ocean 12 
+braiana banks 
+jena jameson 
+superbad 
+superbad 
+superbad 
+superbad 
+superbad 
+blue sky mining 
+asstrafic 
+linux 
+porn 
+search files, youtube and liveleak 
+movies 
+electrons 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+formula 1 
+search files, youtube and liveleak 
+moto 
+baglioni 
+games 
+high school musical 2 axxo 
+eros ramazzotti 
+villeneuve 
+duri a morire 
+giochi 
+the best 
+gp 2007 
+gp f1 1978 
+the best 
+lara 
+lara 
+gp f1 1978 
+gp f1 1978 
+smith 
+gp f1 1978 
+gp f1 1978 
+smith 
+noiva 
+smith 
+smith 
+smith 
+smith 
+veritas backup 
+veritas backup 
+x 
+eva 
+bare minerals 
+ass 
+md formulations 
+search files, youtube and liveleak 
+the practice ita 
+mina 
+anal lick 
+high school musical 2 
+in tribunale con lynn 
+lomu 
+mina 
+basketball 
+prendilo 
+ajax 
+az-ajax 
+pane e tulipani 
+terminator 
+itunes 
+veritas backup 
+shemale 
+shemale 
+anal 
+house german 
+german 
+chaienn 
+pregnant 
+masturbate 
+i now pronounce you chuck and larry 
+jessica biel 
+david 
+
+jan smit 
+mika 
+queen 
+heroes 
+teacherq 
+teachers 
+resident evil extinction 
+eureka 
+pompini 
+female 
+eureka episode 1 
+eureka 
+eureka s1 
+kyla cole 
+heroes 
+search files, youtube and liveleak 
+rape 
+indian rape 
+indian rape 
+indian rape 
+indian rape 
+indian rape 
+little rape 
+little rape 
+rape 
+games 
+splinter cell 
+splinte 
+splinte 
+psv 
+splinter cell 
+anal 
+sex 
+sex 
+sex 
+dragon ball z film 
+adult 
+battlefield bad company 
+battlefield bad company 
+stargate atlantis 
+lektor 
+lektor 2007 
+muzyka 2007 
+techno 2007 
+jackyll&hyde 
+akell hyde 
+potter 
+hip hop 
+blakes 7 
+blakes 7 
+survivor 
+doggy 
+doggy style 
+doggy style 
+kill 
+smith 
+ger 
+war 
+dr who 
+pussy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mars 
+mars 
+mars volta 
+zecchino doro 
+gypsy 
+balkan 
+balkan 
+doctor who 
+doctor who invasion 
+eureka 
+eureka 
+eureka 
+dragonball z il legendario super sayan 
+dragonball z il legendario super sayan 
+dragonball z il legendario super sayan 
+dragonball z il film 
+rambo 
+i guerrieri della notte 
+i guerrieri della notte 
+i guerrieri della notte 
+i guerrieri della notte 
+i guerrieri della notte 
+search files, youtube and liveleak 
+mindquarry 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rings 
+naomi 
+lineage 
+openbsd 
+rammstein 
+rammstein 
+tera patrick 
+it crowd 
+rambo 
+rambo 
+nannini 
+rambo 
+titanic 
+titanic 
+i guerrieri della notte 
+i guerrieri della notte 
+titanic italiano 
+titanic italiano 
+titanic italiano 
+titanic italiano 
+doctor who invasion 5 
+world trade ceer 
+world trade center italiano 
+ebook 
+dragon wars 
+doctor who invasion 6 
+rtl 
+ebook 
+krecik 
+krecik 
+krecik 
+krecik 
+krecik 
+krecik zegarmistrzem 
+krecik zegarmistrzem 
+krecik zegarmistrzem 
+krecik zegarmistrzem 
+chuck and larry 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+dvd ita 
+dvd ita 2007 
+dvx ita 2007 
+mauro nardi 
+world of warcraft the burning crusade 
+de wandeling 
+crash 
+paul de leeuw 
+hed kandi 
+hed kandi 
+borat ita 
+deja vu ita 
+un'impresa da dio 
+friends 
+fairy tale 
+my life 
+my life fairy tale 
+harry potter order of the pheonix 
+divx 
+hitman reborn 
+harry potter order of the pheonix dvd 
+harry potter order of the pheonix dvd 
+divx ita 
+teen 
+kkk 
+harry potter order of the pheonix 
+step up 
+manowar 
+shrek 3 
+pc game ita 
+manowar gods made metal 
+yes close to the edge 
+yes close to the edge 
+yes close to the edge 
+yes close to the edge 
+yes close to the edge 
+moody blues 
+samim 
+egy makulátlan elme örök ragyogása 
+zene 
+musik 
+zene 
+mostra geração 
+mika 
+sesso 
+search files, youtube and liveleak 
+mana 
+adobe 
+adobe 
+top 20 
+nds 
+nintendo ds 
+nintendo ds 
+nl top 20 
+nes 
+juiced 2 nds 
+cobra 
+week 38 
+week38 
+speedtv 
+speedtv 
+juiced 2 nds 
+eric clapton 
+gay 
+goth 
+mistress 
+divx 
+divx ita 
+insertion 
+bizarre 
+guys gone wild 
+dildo 
+search files, youtube and liveleak 
+casa 
+can't turn you on 
+can't turn you loose 
+can't turn you loose 
+can't turn you loose 
+can't turn you on 
+can't turn you on 
+can't turn you loose 
+can't turn you loose 
+casa 
+casa 
+amore 
+timberlake 
+let it go keyshia cole 
+arturo gatti 
+best buds get tagged 
+corbin fisher 
+gay 
+st. vincent 
+gay 
+sex 
+bang bros 
+bang bros 
+search files, youtube and liveleak 
+metal 
+legal 
+karaoke 
+search files, youtube and liveleak 
+it crowd 
+airbag granny 
+sicko 
+rape 
+pirates of the caribbean 
+htid 
+softimage 
+3d studio max 
+linux xp 
+bang bros 
+cameltoe 
+mature 
+sexe 
+search files, youtube and liveleak 
+german 
+nhl 
+pittsburgh penguins 
+live tv 
+search files, youtube and liveleak 
+porno 
+sex 
+lord of the rings 
+haskell 
+telefon tel aviv 
+prison break 
+gay 
+wheel of fortune 
+wheel of fortune 
+bioshock 
+korea 
+chearleader 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ben hur 
+ben hur 
+editorial 
+computer editorial 
+how to build a computer editorial 
+how to build a computer 
+how to build a computer 
+polloi 
+house 
+motorola 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+russian 
+mature 
+ps2 
+ps2 moto gp 
+moto gp 
+ps2 
+ps2 ita 
+ps2 it 
+guitar hero ps2 
+harry potter 
+fantastici quattro e silver surfer ita 
+fantastici quattro e silver surfer 
+transformers it 
+fantastici quattro it 
+fantastici quattro 
+notte 
+fantastic four 
+fantastic four it 
+harry potter it 
+harry potter ita 
+harry potter ita 
+xxx asia 
+xxx cina 
+xxx tokyo 
+asian 
+search files, youtube and liveleak 
+asian xxx 
+russian xxx 
+russian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+schooltv 
+schooltv brandpreventie 
+schooltv 
+schooltv-brandpreventie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anita meijer 
+schooltv-brandpreventie 
+schooltv-brandpreventie 
+schooltv-brandpreventie 
+schooltv-brand 
+eric clapton 
+schooltv 
+search files, youtube and liveleak 
+premonition 
+premonition eng 
+asian 
+asian teen 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+883 
+883 
+883 
+883 
+ps2 
+harry potter ita 
+spiderman 
+spiderman 
+spiderman 
+torture 
+bdsm 
+k3 
+k3 
+plain white 
+hard-fi 
+suburban nights 
+suburban nights 
+hard fi 
+goo goo dolls 
+iris 
+sex 
+sleepers 
+brad pitt 
+carmen electra 
+angeline jolie 
+hard fi 
+goo goo dolls 
+boris kovac 
+dom lomp 
+opposites 
+prince 
+fiction plane 
+bruce willis 
+die hard 
+die hard 
+die hard 
+die hard 4 
+die hard 4 
+angeline jolie 
+angeline jolie 
+angeline jolie 
+angelina jolie 
+elle mcpherson 
+stolen tape 
+homemade videos 
+tatort 
+tatort 
+tatort 
+doggie style 
+madonna 
+madonna 
+madonna 
+madonna 
+madonna 
+syllart 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+street 
+street 
+fracture 
+fracture movie 
+rush hour 3 
+fatman 
+fatman 
+blue 
+street 
+street line 
+street lan 
+fatman scoop 
+fatman scop 
+porn 
+porn 
+porn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+prison break 
+prison break ita 
+chiara 
+linux 
+sara 
+windows 
+bootcd 
+hirens 
+bourne 
+heroes season 1 episode 19 
+heroes episode 19 
+search files, youtube and liveleak 
+yuo tube 
+juve 
+harry potter philosephers stone 
+when the mountains tremb 
+when the mountains tremble 
+house german 
+shrak 3 
+shrek 3 
+dutch sub 
+german 
+gloria 
+win-agenda 1.0 
+vrt 
+dutch sub 
+food 
+appletv 
+it crowd 
+bon jovi 
+u2 
+shemale 
+sean paul 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+big titts 
+big titts 
+titts fuck 
+fuck 
+ass 
+ass 
+jenna 
+kingdom under fire 
+kingdom under fire 
+kingdom under fire xbox 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gay sex video 
+gay sex video 
+manciano 
+search files, youtube and liveleak 
+doggy stile 
+doggy 
+ace ventura 
+drumtools performance designer 
+anita meijer 
+anita meijer 
+margie ball 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+andress 
+creedence clearwater 
+streisand 
+avinu 
+streisand 
+divx ita 
+i simpson 
+i simpson il film 
+i simpson il film ita 
+superbad 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+croatia 
+mika 
+albums 
+albums mika 
+albums justin timbelake 
+justin timbelake 
+justin timbelake 
+justin timbelake 
+search files, youtube and liveleak 
+300 
+nemeswis 4 
+nemeswis 4 
+ronaldo 
+ronaldo 
+ronaldo 
+ronaldo 
+games 
+games 
+sex 
+ganes 
+black christmas ita 
+black christmas 
+vasco rossi 
+ass 
+vasco rossi 
+divx ita 
+soundtrack 
+pearl harbor 
+pearl harbor 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+desperado 
+desperado dvx 
+desperado dvix 
+desperado ita 
+desperado ita 
+dvd ita 
+heroes 
+heroes 2x 
+cycling 
+blowjob 
+veronica zemanova 
+veronica zemanova 
+veronica zemanova 
+porno 
+sex 
+xxx 
+dvd ita 
+veronica zemanova 
+xxx 
+xxx 
+xxx 
+xxx 
+petra verkaik 
+dvd ita 
+mad world 
+metallica 
+keba 
+hazes 
+hazes 
+hazes 
+hazes 
+hazes 
+hazes 
+l'ultima legione 
+crtani 
+crtani xxx 
+divx 
+xxx 
+divx ita 
+the science of sleep 
+dragana 
+divx ita 
+divx ita 
+dragana 
+dragana 
+film ita 
+it crowd 
+dvd ita 
+the great world of sound 
+closing escrow 
+self medicated 
+kill bill 
+refactor 
+refactor 
+spss 
+spss 
+spss 
+spss 
+divx ita 
+dvd ita 2007 
+gigi d'agostino 
+sean paul 
+kelis 
+dvd ita 
+bones ita 
+dvd ita 
+jimmy reed 
+can't turn you loose 
+csi las vegas 7 
+divx ita 
+divx ita 2007 
+divx film ita 2007 
+shrek 3 
+superbad 
+shrek 3 film 
+lesbian 
+lesbian ebony 
+the simpson movie 
+the simpson movie ita 
+porno 
+the simpson ita 
+film 
+film ita 
+i simpson il film 
+sex 
+stories 
+erotic 
+film ita 
+il sette e l'otto 
+ficarra e picone 
+japanese girl doggie style 
+ficarra e picone 
+japanese girl doggie style 
+japanese girl doggie style 
+japanese girl doggie style 
+il sette e l'otto 
+foot 
+oceans thirteen 
+oceans thirteen 
+oceans thirteen ita 
+divx ita 
+film ale e franz 
+kari wuhrer 
+kari wuhrer 
+kari wuhrer 
+kari wuhrer 
+kari wuhrer 
+dutch 
+doctor 
+infermiere 
+kari wuhrer 
+pornostar 
+pornstar 
+movie ita 2007 
+movie ita 2007 
+lesbian foot 
+movie 2007 
+film 2007 
+lesbian 
+ed2k://|file|[divx%20ita]%20porky%20college%202%202007%20by%20minyc%20dj.avi|1408829714|6923a49c1d90a5e4677ba62e6394a2a0|/ 
+film ita 
+parky college 
+porky college 
+porky college film 
+porky college film 
+lesbo 
+dvd ita 
+fred durst 
+film ita 
+xxx 
+youporn 
+porno 
+sex 
+nl subt 
+pop 
+toastee 
+search files, youtube and liveleak 
+halo 3 
+incubus 
+sexe 
+teen 
+skirt dance 
+deadwood hun 
+deadwood s2 
+teen 
+simon and garfunkel 
+classical music 
+flipsasa 
+mozart 
+beethoven 
+paul simon 
+lightroom 
+simon and garfunkel 
+bee gees 
+phantom of the opera 
+les miserable 
+chicken shoot 
+jonathan livingston seagull 
+die hard 4 
+sm 
+les miserables 
+phantom of the opera 
+shrek 3 
+shrek 3 ita 
+shrek ita 
+dvd ita 
+simon and garfunkel 
+simon and garfunkel 
+simon and garfunkel 
+simon and garfunkel 
+dvd il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+ficarra e picone 
+ficarra e picone 
+ale e franz 
+dvd-ita 
+dvd-ita 
+planet terror ita 
+games 
+divx ita 
+planet terror ita 
+porn 
+pak girls 
+divx 
+il dolce e l'amaro 
+games 
+il bacio che aspettavo 
+il bacio che aspettavo 
+manuale d'amore 2 
+album 
+manuale d'amore 
+manuale d'amore 
+manuale d'amore 2 
+kiaa 
+kiss 
+manuale d'amore 2 
+night of the living homeless 
+manuale d'amore 2 
+c# 
+south park 
+southpark 
+hollywood sex 
+america 
+americas army 
+linux 
+ubuntu 
+open office 
+doctor who 
+photoshop cs3 
+resident evil 
+iben hjejle 
+blonde 
+maria bello sex 
+cartao 
+cartao de visita 
+dvd ita 
+maria bello 
+ellen barkin 
+dvd ita 
+ita 
+search files, youtube and liveleak 
+sexcams 
+sexcams 
+ita 2007 
+xvid ita 
+the secretary 
+sexcams 
+sexcams 
+sexcams 
+kissing on thte mouth 
+sexcams 
+sexcams 
+cartao de visita corel 
+kissing on the mouth 
+cartao no corel 
+laura linney 
+tania russof 
+ccorel 
+mika 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+severance ita 
+vi dichiaro marito e marito 
+navigon 
+premonition ita 
+ratatouille 
+ita 
+ratatouille it 
+nomadi 
+it crowd 
+search files, youtube and liveleak 
+laura 
+cazzi 
+hard 
+clasical 
+vista 
+search files, youtube and liveleak 
+twin peak 
+twin peax 
+twin 
+twinpix 
+twinpeax 
+privat 
+vista 64 
+vista 64 
+vista 64 
+vista 64 
+twin 
+twin peaks 
+pes 
+search files, youtube and liveleak 
+xxx 
+pro evolution soccer 
+tanita tikaram 
+el polaco 
+red hat 
+mi pc 
+red hat linux 
+webcam 
+instrumenals 
+instrumentals 
+teen 
+beos 
+3gp 
+arina 
+matrix os 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+sarah brigthman 
+kelly joyce 
+game pc ita 
+game pc ita 
+game pc 2007 
+game pc 
+photoshop cs3 
+game 2007 
+game pc 
+tits 
+game pc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleakfilmes 
+game pc 
+pes 7 
+pes 
+pes 
+pc game 
+pc game ita 
+pc game trailer 
+game trailer 
+alvaro vitali 
+lesbo 
+simpson 
+simpson ita 
+ita 
+shrek 3 
+shrek 3 ita 
+shrek ita 
+turkish porn 
+turkish porn 
+skinny 
+u2 
+skinny 
+big clit 
+search files, youtube and liveleak 
+sonu nigam 
+spiderman 3 
+rammstein 
+rammstein 
+rammstein 
+search files, youtube and liveleak 
+blues brothers 
+electric six 
+electric six 
+wagner 
+wagner 
+wagner 
+wagner 
+wagner 
+porn 
+lina ramon 
+lena ramon 
+lena ramon 
+lena ramon 
+lena ramon 
+lena ramon 
+lena ramon 
+lena ramon 
+battles atlas 
+battles atlas 
+ferrari 
+ho'oponopono 
+ratatouille 
+http://br.youtube.com/watch?v=mijqhp--od0 
+ass 
+dancing 
+booty meat 
+booty shake 
+shaking 
+ass 
+ass shake 
+lapdance 
+blowjob 
+booty meat 
+booty meat 
+fruit loops 
+tropa de elite 
+lolita 
+teen 
+search files, youtube and liveleak 
+indra 
+soccer 
+playboy 
+bourne 
+soccer 
+bourne 
+bangbros 
+pussy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kylee reese 
+asian 
+mlb 
+search files, youtube and liveleak 
+xxx movie 
+halloween 1978 
+halloween 1978 
+chuck 
+chuck -pronounce 
+heroes -s01 
+japan girls 
+japan girls xxx 
+pthc 
+loose change 
+sarah 
+heroes 
+heroes s02 
+tango 
+silk 
+spears 
+css 
+php 
+php 
+porn 
+raven riley 
+private xxx 
+animal 
+zoo sex 
+zoo sex 
+sex farm 
+evergreen 
+evergreen 
+luba 
+andy william 
+boulevard 
+search files, youtube and liveleak 
+ligabue 
+tiny 
+labia 
+scarface 
+biagio antonaccci 
+biagio antonacci 
+gigi finizio 
+lucio dalla 
+finizio 
+foto 
+video porno 
+eva henger 
+ls model 
+models 
+hegre 
+colonne sonore 
+freedom 
+monella 
+biancaneve sotto i nani 
+biancaneve sotto i nani film porno 
+biancaneve sotto i nani film porno 
+pinocchi porno 
+pinocchio porno 
+film porno 
+sex 
+scarface ita 
+biancaneve film porno 
+sesso in classe 
+professoresse nude 
+the simpson 
+the simpson italiano 
+porno 
+eva henger 
+film porno 
+film porno 
+candy store 
+candy dulfer 
+love is gone 
+gemelli diversi 
+napoli 
+gemelli diversi 
+yoga 
+simpson film 
+ramazzotti 
+tutto 
+оÑÃ\90¾Ð±Ð¾Ðµ Ð¼Ð½ÐµÐ½Ð¸Ðµ 
+minority report 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
\8b¼ã® 
+rihanna 
+fmp 
+fullmetal 
+search files, youtube and liveleak 
+fullmetal 
\83\99ãÄ\9eãÅ¥ãÄ\9eãÅ» 
+çÄ\84修正 
+office 
+çÄ\84修正 
+電マ 
+電マ 
+i simpson 
+i simpson il film 
+distretto 
+distretto 
+distretto 
+distretto 
+distretto 
+distretto 
+distretto di polizia 6 
+see dick run 
+fun with dick e jane 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ghost raides 
+rocky 
+giochi playstation 2 
+playstation 2 
+distretto di polizia 6 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tre metri sopra al cielo 
+playstation 2 giochi 
+fonogramma 
+anniÅÂ\86a vanniÅÎ\94Â\81 
+cultura moderna 
+cultura moderna 
+cultura moderna 
+cultura moderna gioco 
+cultura moderna gioco 
+cultura moderna gioco 
+gran turismo 
+moto gp ps2 
+spider man ps2 
+shrek 3 
+spider man ps2 
+jazz 
+metal gear ps2 
+thbled 
+the bled 
+new amsterdams 
+new amsterdams 
+search all files 
+new amsterdams 
+new amsterdams 
+new amsterdams 
+new amsterdams 
+pdf 
+ubuntu 
+kde4 
+music 
+bourne 
+chopin 
+chopin 
+classic 
+classic 
+classic 
+classic 
+classic 
+css 
+leg 
+photography 
+
+cock 
+cream 
+girlsoutwest 
+seventeen 
+seventeen 
+seventeen 
+boobs 
+boobs 
+mediadef 
+mediadefender 
+public 
+ratatouille 
+mediadefender 
+public 
+defender 
+kaze no stigma 
+darker than black 
+tokyo majin 7 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+"family tree maker 2008 
+"family tree maker 2008" 
+"familytree maker 2008" 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+chopin 
+http://www.liveleak.com/view?i=c66_1176846833 
+batman 
+dr steel 
+run fat boy run 
+run fat boy run 
+"family tree maker 2008" 
+"family tree maker 2008" 
+family tree maker 2008 
+sosft 
+sisoft 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+sex 
+accordion 
+defender 
+ryan adams 
+the others 
+chessbase 
+zadornov 
+search files, youtube and liveleak 
+acronis migrate 
+teen 
+883 
+883 
+karaoke 
+karaoke brasil 
+883 
+karaoke brasil 
+search files, youtube and liveleak 
+prison break 
+brotherhood 
+search files, youtube and liveleak 
+californication 
+counter strike 
+counter strike source 
+dutch 
+l'ultima legione dvd 
+dvx ita 
+u2 
+mp3 
+xvid 
+xvid ita 
+xvid de luxe 
+italian subbed 
+zwartboek 
+high school musical 
+high school musical ita 
+xvid imc 
+tettone 
+tettone 
+tettone 
+dutch 
+movie 
+dutch movie 
+no reservations 
+britney 
+soft erotic 
+soft porn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+soft porn 
+xvid ita 
+xvid 
+xvid 
+xvid 
+fantasmes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kurdistan 
+bar hopping hotties 2 
+bar hopping hotties 
+kurdistan 
+kama sutra 
+eruh 
+emmanuelle 
+objects of her desire 
+confessions of a groupie 
+confessions of a groupie 
+naked encounters 
+sin city diaries 
+chuck 
+fantasmes 
+my first female lover 
+bound cargo 
+playboy 
+oral 
+teenage cavegirl 
+teenage cavegirl 
+amateurs 
+search files, youtube and liveleak 
+movies 
+borat 
+spiderman 3 
+indian sex 
+spongebob 
+miss italia 
+vanessa 
+death note 
+torrentsmd.com 
+torrentsmd.com 
+ac dc 
+tokio hotel 
+simpson 
+simpson ita 
+paid in full 
+paid in full 
+paid in full 
+paid in full 
+nas illmatic 
+toots 
+thielemans 
+death note 
+mike 
+search files, youtube and liveleak 
+ocean 
+hot fuzz 
+hot fuzz ita 
+anita blonde 
+youtube 
+sylvia saint 
+timbaland 
+hot fuzz ita 
+sybian 
+cracked 
+emule 
+deutsch 
+search files, youtube and liveleak 
+[anime-ita] 
+quim barreiros 
+quim barreiros 
+original flavor 
+original flavor 
+roling stones 
+feat boy slim 
+fat boy slim 
+fat boy slim 
+fat boy slim 
+fat boy slim 
+dragon ball z 
+dragonball z 
+dragonball z 
+dragonball z 
+search files, youtube and liveleak 
+pink floyd 
+pink floyd 
+pink floyd 
+pink floyd 
+pink floyd 
+depeche mode 
+depeche mode 
+depache mode 
+depache mode 
+depache mode 
+depache mode 
+depache mode 
+depache mode 
+lamas 
+lamas 
+lamas 
+rhcp 
+dragon wars 
+thai 
+soilwork 
+feet 
+feet 
+feet lick 
+wrestle 
+oil 
+feet lick 
+orgasm 
+teen 
+tokyo majin 7 
+halo 
+halo 
+luca giurato 
+loretta goggi 
+jaguar 
+navtech 
+terminator 
+shrek terzo walt disney 
+shrek terzo walt disney 
+shrek terzo 
+shrek terzo films 
+dragonball 
+fr 
+pirates 
+pirates of silicone 
+dragonball gt 
+search files, youtube and liveleak 
+the brave one 
+terror 
+720p 
+720p king kong 
+king kong 
+king kong xvid 
+720p 
+hdtv 
+jade 
+jade 
+1080p 
+search files, youtube and liveleak 
+dragon baal 
+1080i 
+pantyhose 
+omas 
+mature 
+lessen to love 
+level 2 
+granny 
+playstation 2 
+fifa 2008 
+bmx playstation 2 
+playstation 2 
+i listen to love 
+search files, youtube and liveleak 
+mtv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+oma 
+cnn 
+tomb raider anniversary 
+tomb raider anniversary ps2 
+bach 
+spider man 3 ps2 
+king lir 
+tomb raider anniversary ps2 
+zero assoluto 
+dragon ball z ps2 
+dragon ball z psp 
+giochi psp 
+giochi psp 
+psp 
+ps2 
+navteq 
+brazzers 
+bush 
+grannis 
+grannies 
+grandma 
+grandmother 
+grandmother 
+grandmother 
+vieilles 
+the police 
+search files, qcad 
+search files, ubunt 
+the police 
+sting 
+sting 
+ubuntu cube 
+ubuntu 
+gary moore 
+ubuntu plugin 
+q cad 
+dream theater 
+ubuntu 
+search files, youtube and liveleak 
+worried ray 
+pam tommy 
+incest 
+crank that 
+t-pain 
+heroes 
+heroes s02 
+prison break 
+prison break s03 
+prison break s03e02 
+heroes s02 
+heroes s02 
+mom 
+mature.nl 
+mature 
+old 
+grandmother 
+grandma 
+grandmas 
+lusty grandmas 
+lusty 
+mature woman 
+alt 
+gilmour 
+fortzen 
+vieille 
+femme mure 
+femme 
+femme 
+mure 
+gong 
+hand 
+alicia keys 
+carrie underwood 
+paula de anada 
+paula deanada 
+paula deanda 
+search files, youtube and liveleak 
+rascal flats 
+rascal flats 
+rascal flats 
+rascal flat 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+50 cent 
+good luck chuck 
+the game plan 
+dragon wars 
+office 2007 
+walking tall 2 
+walking tall 2 
+wrong turn 2 
+drivergenius 
+photoshooting 
+soccer 
+best of fashion tv 
+sex 
+女åÄ\92 
+女åÄ\92 
+sex 
+victoriassecret 
+natacha peyre photoshooting 
+natacha peyre 
+fotoshooting 
+kader loth 
+fotoshooting 
+prison break 
+prison break hr 
+voz propia 
+inxs 
+inxs 
+rock 
+zz top 
+zz top 
+sex 
+angelina jolie 
+pamela 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ghulam ali 
+ghulam ali 
+ghulam ali 
+ghulam ali download 
+ghulam ali download 
+fener 
+planet earth 
+blue planet 
+arab 
+arab 
+game 
+windows vista ultimate 
+blue planet 
+adult 
+kuwait 
+shemale 
+asian 
+teen 
+kuwait 
+arab 
+Ø´Ù�ู�Â\84 
+عرب 
+heros 
+heros s02e01 
+muft.tv 
+ghulam ali 
+ghulam ali 
+heros 
+heros s02e01 
+heros s02e01 
+heros s02e01 
+family guy 
+house season 4 
+house alone 
+house s4ep1 
+divx 
+divx 
+divx 
+divx 
+divx 
+japan xxx 
+sound choice karaoke 
+karaoke brasil 
+ahmadinejad 
+bartoli 
+sod 
+selen 
+japan girl xxx 
+japan girl 
+japan teen 
+japan 
+russian incest 
+porno 
+insegnanti 
+insegnanti xxx 
+asian xxx 
+asian teen 
+xxx 
+simpson 
+simpson ita 
+sex asian 
+zoosex 
+japanese teenn 
+japanese teen 
+serenity 
+serenity ita 
+serenity ita 
+openid 
+hot asian 
+hot asian xxx 
+asian xxx 
+maria - the album 
+maria - the album 
+maria 
+bartoli 
+zetsubou raw 
+shrek 
+pirates 
+search files, youtube and liveleak 
+wam 
+wet and messy 
+piss 
+pee 
+simpson 
+wam 
+selen 
+trasformer 
+sompson 
+i simpson 
+i simpson ita 
+corso di ballo 
+zetsubou 
+(divx) corso di ballo - lezioni di salsa portoricana - lezione di base.mpg 
+(divx) corso di ballo - lezioni di salsa portoricana - lezione di base.mpg 
+(divx) corso di ballo - lezioni di salsa portoricana - lezione di base.mpg 
+i simpson 
+moana 
+rizzo 
+caraibi 
+gessica 
+300 
+novita
+*.* 
+dvd 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+dvd 
+dvd ita 
+pirati dei caraibi 
+dvd ita 
+dvd ita 
+dvd ita 
+dvd ita 
+friends 
+tra 
+friends ita 
+nansy 
+nansy 
+madonna 
+sparagna 
+sparagna 
+friends ita 
+friends ita 
+divx ita 
+divx ita friends 
+dvd ita 
+foo 
+sex 
+heroes 
+search files, youtube and liveleak 
+xbox 
+dreamcast 
+xbox 
+divx ita 
+divx ita 
+divx ita 
+prison break 
+tango 
+legal 
+legaltorrents 
+legaltorrents 
+bbc ansi 
+bbc 
+bbc ansi 
+ansi 
+ansi bbc 
+bbc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+divx 
+divx 
+matia bazar grido 
+aqua teen hiung 
+aqua teen hiun 
+aqua teen hiun 
+aqua teen hiun 
+aqua teen hunger force 
+clubfanatix 
+mac os 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+compiz 
+wolfram 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vmware 
+mature 
+madonna 
+mature 
+mature 
+top gear 
+hack 
+malmsteen 
+malmsteen 
+yngwie 
+malmsteen 
+creative cow 
+cmos password recovery tools 4.0 
+eragon 
+german 
+xxx 
+seinfeld 
+cum 
+girls 
+seinfeld 
+girls 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sigur ros 
+sigur ros 
+erik mongrain 
+harry potter 
+rush hour 3 
+george strait 
+seinfeld 
+dragonball z 
+seinfeld 
+adobe 
+slayer 
+crazy 
+search files, youtube and liveleak 
+prison break 
+prison break.s03 
+crazy 
+prisonbreak.s03 
+prisonbreak 
+prison.break.s03 
+prison.break.s03 
+prison.break.s0 
+prison.break.s 
+prison.break. 
+prison.break 
+prison break s03 
+prison break s03 
+prison break s0302 
+prison break s03 02 
+orgy 
+lesbian orgy 
+dragonball z 
+chevelle 
+chevelle 
+medieval 2 total war 
+medieval 2 total war ita 
+prison break s03 02 
+prison break s03 
+prison break s03 
+lesbian orgy 
+hentai 
+teens 
+phill collins 
+genesis 
+phil collins 
+lesbian 
+longman 
+xxx 
+simpson 
+lara pausini dove l'aria è polvere 
+laura pausini dove l'aria è polvere 
+zwartboek the movie 
+private 
+private 
+chevelle 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+greatest opera show on earth 
+greatest opera show on earth 
+greatest opera show on earth 
+si ministre 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+paard 
+sinterklaas 
+sinterklaas 
+usa maps 
+usa maps 
+usa maps 
+usa maps 
+usa map 
+us map 
+us map 
+acrobat 8 professional 
+larell hardy 
+laurell hardy 
+laurel hardy 
+die hard 
+die hard 
+laurel hardy 
+laurel hardy 
+potter phoenix 
+resident evil 
+sims 2 
+beatles 
+ratatouille 
+inter sampdoria 
+inter sampdoria live 
+inter sampdoria live 
+foo 
+sugarbabes 
+grönemayer 
+grönemeyer 
+grönemeyer 
+720p 
+laurel hardy 
+tango 
+300 
+dj tiesto 
+dvx ita 
+daddy yankee 
+daddy yankee 
+riverdance 
+riverdance 
+amateur 
+pharrell 
+ti 
+search files, youtube and liveleak 
+allo allo 
+allo allo ep3 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+capleton 
+strumpfhose 
+strumpfhose 
+capleton 
+bareback 
+fantastici 4 
+gay speedo 
+speedo 
+slingblade 
+lanois 
+cash 
+the war 
+fantastic four 
+vista 
+cum guzzlers 
+gay bareback 
+search files, youtube and liveleak 
+most evil 
+most evil 
+brotherhood 
+most evil 
+gay bare 
+naruto shippuden ita 
+naruto ita 
+gay spiderman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+axxo 
+american history x 
+house 
+placebo 
+grindhouse 
+planet terror 
+axxo 
+mastadon 
+ubuntu 
+the brave one 
+the brave one 
+claymore 
+search files, youtube and liveleak 
+cisco 
+dirty rich 
+dirty sexy money 
+die hard 4 
+teen 
+steeleye span 
+steeleye span 
+vista 
+hitzone 43 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 
+joan of arcadia 
+joan of arcadia 
+joan of arcadia 
+joan of arcadia 
+lost 
+shine mitchell 
+friday night lights 
+groningen 
+transformer 
+star trek 
+walt disney 
+tanja savic 
+tanja savic 
+tanja savic 
+lenny bruce 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+
+search files, youtube and liveleak 
+python 
\8f�¢é\87\91å\88\9a 
+www.orkut.com 
+www.orkut.com 
+www.orkut.com 
+alicia keys 
+turnstall 
+kd 
+thurn 
+mom and son 
+walt disney 
+kurnstall 
+kt turnstall 
+bioshock 
+meatloaf 
+vinyl 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+naruto 
+britney spears 
+borland 
+segreto sahara 
+porno 
+porno 
+elisa 
+hd 
+atlantis hd 
+stargate hd 
+vivaldi 
+atlantis hd 
+hd 
+segreto sahara 
+manuela arcuri 
+vivaldi 
+electric six 
+wheels of steel 
+wheels of steel 
+pc game 
+csi 
+csi 
+csi 
+c.s.i 
+settlers game 
+porno 
+spiderman 3 
+gta 
+moto gp 
+i soprano 
+porno 
+ac/dc 
+sex 
+settlers the game 
+hentai 
+hentai 
+maura 
+hentai 
+hentai 
+hentai school 
+robin trower 
+hentai rape 
+immoral sisters 
+immoral sisters 
+hentai 
+hentai rape sex 
+hentai rape 
+hentai sex 
+rape hentai 
+schoolgirl hentai 
+hentai movie 
+immoral sisters 
+schoolgirl sex 
+schoolgirl sex 
+sex 
+hentai 
+the bourne ultimatum 
+the office 
+the bourne ultimatum 
+bangles 
+harry potter 
+hot 
+busty 
+sigur ros 
+search files, youtube and liveleak 
+demonoid 
+anal 
+grillo 
+bukkake 
+dummies 
+torrent 
+bittorrent 
+faccial 
+faccial 
+facial 
+bittorrent 
+xxx 
+dummies 
+xxx 
+xxx 
+facial 
+facial 
+facial 
+facial 
+torrent 
+torrent 
\8f�¢é\87\91å\88\9a 
\8f�¢é\87\91å\88\9a 
+24 
+24 
+24s06e17 
+24s06e17 
+28 weeks later 
+24 
+24 
+24 
+24 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+mr woodcock 
+shoop da woop 
+weeds 
+weeds season 1 
+mac os x 
+harry potter mac 
+harry.potter mac 
+harry potter 
+http://br.youtube.com/watch?v=-jmgjdesjwy 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/-jmgjdesjwy"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/-jmgjdesjwy" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+bionic woman 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/-jmgjdesjwy"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/-jmgjdesjwy" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+http://br.youtube.com/watch?v=-jmgjdesjwy 
+styx 
+billy squier 
+complete discography 
+greatest hits 
+bourn 
+bionic 
+bionic 
+transformers 
+jericho 
+teen 
+asian 
+elton john 
+santana 
+coldplay 
+coldplay 
+bleach 
+mysql 
+coldplay 
+elton 
+wicked 
+burma 
+vivid 
+vivid 
+inernet 7 
+bittorrent 
+dummies 
+bittorrent 
+kvcd 
+run fat boy 
+matrix 
+prova 
+girl 
+prova 
+aa 
+ab 
+free energy 
+asian 
+mp4 
+.mp4 
+.mp4 
+fossett 
+leave britney alone 
+japanese 
+a-ha 
+tit 
+tits 
+asian 
+latina 
+maxed out 
+algebra 
+algebra 
+course 
+course 
+1984 
+pokerface 
+tits 
+fix 
+asdlfkjs 
+omario 
+bob singlar 
+mina 
+mia martina 
+porno 
+musica 
+ipaq 
+you kill me 
+you kill me 
+elephants 
+knocked up 
+the 11th hour 
+fataal 
+the 11th hour 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fossett 
+fossett 
+legal torrents 
+america's next top 
+van der graaf 
+iceclimbing 
+kiwi flight 
+fossett 
+pes 2008 
+fossett 
+anderson 
+eurosong 
+gay 
+gay 
+fantastic four 
+luba hegre 
+tina hegre 
+neverwinter 
+tina 
+virtual 
+bionic woman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mediadefender 
+24 
+lexx 
+prison break 
+24 
+morrissey 
+smiths 
+ayo 
+dj sy 
+my bum 
+mylene farmer 
+mylene farmer 
+cellfactor 
+cellfactor 
+mylene farmer 
+mylene farmer 
+hold you 
+sublisoft 
+dylan 
+conan 
+nova 
+atb hold you 
+bill moyers 
+atb hold you 
+cum 
+atb hold you 
+xxx 
+history of disbelief 
+tony rice 
+atb hold you 
+atb hold you 
+atb hold you 
+atb hold you 
+atb hold you 
+atb hold you 
+dylan 
+hold you 
+dylan 
+atb 
+jenna jameson 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hold you 
+robin trower 
+robin trower 
+linkin park 
+hold you 
+hold you 
+hold you 
+spanish 
+hold you atb 
+hold you atb 
+hold you atb 
+spanish 
+linkin park 
+you tube 
+atb 
+simpson 
+simpson 
+mom 
+l 
+a 
+a 
+bionic woman 
+lily 
+pontiac gto 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+van halen 
+search files, youtube and liveleak 
+mr. brooks 
+mr. brooks 
+eastern promises 
+search files, youtube and liveleak 
+lilly thai 
+dragonball z 
+dragonball z 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball 
+dragonball z 
+gto 
+metalocalypse 
+tony moser 
+indesign 
+indesign 
+smartdraw 
+kraftwerk 
+milton ericksson 
+milton erickson 
+hypnos 
+search files, youtube and liveleak 
+milton 
+diner spinner 
+the 11th hour 
+half life 
+joe rogan 
+de curtis 
+medioevo 
+tanja savic 
+tanja savic 
+tanja savic 
+ita 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+film 
+powerthirst 
+powerthirst 
+guns'n roses 
+d'alessio 
+aerosmith 
+ramazzotti 
+grignani 
+war 
+grignani 
+grignani 
+gianluca grignani 
+venditti 
+music maker 
+ramazzotti 
+powerthirst 
+heather 
+tigirl 
+tgirl 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+acdc 
+acdc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gianluca grignani 
+search files, youtube and liveleak 
+xxx 
+gianluca grignani 
+gianluca grignani 
+gianluca grignani 
+3gp hard 
+3gp xxx 
+gianluca grignani 
+vasco 
+gianni celeste 
+gianni togni 
+2007 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+dvd rip ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tom tom 
+acrobat 
+ad-aware 
+kenner 
+kinzie kenner 
+photo edit 
+photoshop 
+rape bride 
+rape 
+rape bride 
+bestman bride 
+bride 
+incest 
+incest drug 
+incest rape 
+incest 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rape 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dolly parten 
+celine dion 
+search files, youtube and liveleak 
+sandra und andres 
+rape 
+totò 
+veritas backup 
+rape 
+rape 
+rape 
+jennifer lopez 
+britney rears 
+britney spears 
+search files, youtube and liveleak 
+policy 
+policy 
+the policy 
+de filippo 
+borat 
+eureka 
+age of empire 
+bionic woman 
+age of empire 
+search files, youtube and liveleak 
+le settimana 
+a109 
+search files, youtube and liveleak 
+troisi 
+asian 
+tia tanaka 
+blowjob 
+asian blowjob 
+blowjob 
+jap 
+jap 
+eragon 
+jap 
+jap 
+rape 
+public 
+eragon 
+sex 
+sex 
+sex 
+porn 
+amateur 
+flag 
+gils 
+girls 
+girls 
+girls 
+girls 
+sheen and rockets 
+sheena and rockets 
+sheena 
+hentai 
+sex tape 
+sims 2 
+photoshop 
+lucky star 
+lucky star 
+ratatouille 
+adobe cs3 
+zetsubou 
+lucky star 
+ultraman mebius 
+house 
+search files, youtube and liveleak 
+vivaldi 
+purcell 
+russian girls 
+rocco 
+pick up artist 
+lucky star 
+lucky star raw 
+zetsubou 
+zetsubou sensei 
+debian 
+search files, youtube and liveleak 
+debian etch 
+debian iso 
+powerpc linux 
+stalin 
+silvia 
+tt5 
+silvia 
+star club 
+rebecca 
+anarchy 
+anarchy 
+blowjob 
+burst city 
+teacher 
+russia 
+silvia saint 
+hellsing 
+hellsing 
+hellsing raw 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+squirt 
+little april 
+little april 
+teen 
+wet pussy 
+vmware 
+knoppix 
+pc88 
+ayayo 
+ayayo 
+negima 
+negima 
+luciano pavarotti 
+nerima 
+resident evil 
+chechen 
+wet pussy 
+jessica rizzoi 
+jessica rizzoi 
+jessica rizzo 
+jessica rizzo 
+jessica rizzo 
+porno 
+helvis 
+elvis presley. 
+elvis presley 
+bolin 
+bolin 
+linux 
+free energy 
+austin kincaid 
+resident evil 
+codegear 
+killing 
+spears 
+aha 
+dance hall days 
+linux 
+titans 
+bolin 
+underage 
+iphone 
+teen 
+teen cum 
+cubase 
+freaks of nature 
+freaks of nature 
+teen cum 
+freaks of nature 
+freaks of nature 
+freaks of nature 
+freaks of nature 
+bolin 
+monster cock 
+moray 
+moray valve 
+moray valve 
+freaks of nature 
+madonna 
+madonna 
+search files, youtube and liveleak 
+south park 
+mickey mouse 
+search files, youtube and liveleak 
+stargate sg1 
+stargate sg1 
+stargate sg1 
+stargate sg1 
+black cock 
+avast 4.7 professional 
+avast 4.7 professional 
+avast 4 
+eastern promises 
+desi 
+leviation 
+search files, youtube and liveleak 
+
+animal 
+ana ivanovic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+emily 
+emily 
+emily18 
+tesla coil 
+induction coil 
+dream theater 
+rainbow 
+kicking coil 
+hookup wire tesla 
+eros ramazzotti 
+bangbros 
+dvd rip 
+del amitri 
+madonna 
+madonna 
+erasure 
+erasure 
+erasure 
+erasure 
+joni mitchell 
+erasure 
+è¨ï¿½Â\97æ\88\91å\97\8e 
+james bond 
+james bond 
+james bond 
+james bond 
+piratebay 
+1408 
+ubuntu 
+jenna 
+prison break 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+marini 
+falchi 
+topless 
+parietti 
+scarlett 
+search files, youtube and liveleak 
+napoli 
+adobe 
+japan 
+syperman 
+matlab 2007a 
+matlab 2007a 
+xxx movies 
+jacky cheung 
+high school musical 
+füchse 
+absolute beginner 
+stranglehold 
+search files, youtube and liveleak 
+nadine 
+ad-aware 
+avg 
+scarlett 
+ellen 
+ellen 
+brass 
+tinto brass 
+grace 
+tirol 
+tirol 
+britney 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pc game 
+pc game 
+tiziano ferro 
+xxx 
+dunstan 
+fifa 
+fifa psp 
+psp fifa 08 
+moric 
+as marcianas 
+as marcianas 
+as marcianas 
+zeze di camargo e luciano 
+niplpe 
+nipple 
+heroes ita 
+heroes ita 
+angel 
+angel perverse 
+perverse 
+ashlynn 
+handjob 
+tiesto 
+almodovar 
+ita 
+a94ebd82a188beeac84272cc6e44762f5b3e7292 
+http://inferno.demonoid.com:3389/announce 
+indesign cs2 mac 
+search files, youtube and liveleak 
+crash test 
+chienese crash test 
+medio 
+underage 
+adobe indesign 
+adobe indisign 
+clapton 
+300 
+om 
+om shanti 
+chal 
+mountains of makkah 
+trailers 
+trailers 
+patner 
+war 
+lula 
+battle 
+tycoon 
+medieval 
+age of 
+vista 
+paganini 
+paganini 
+300 
+wagner 
+zuppy 
+. 
+ita 
+333 
+battle 
+battle 
+makkah 
+ling 
+king 
+corso 
+corso 
+corso 
+desi 
+sail 
+ubuntu 
+gta cops and gangsters 
+300 
+wi-fi 
+gta 
+wi 
+print 
+rise 
+300 
+knight 
+lula 
+napoletana 
+trans 
+grey 
+battle 
+star wars 
+ita 
+al ataque 
+transformers 
+miss potter 
+transformers 
+gatos que hablan 
+blazing angels 2 
+blazing angels 2 
+tits 
+jan smit 
+f16 
+into the wild 
+foo fighters 
+death on the baltic 
+foo fighters 
+placebo 
+scarlett 
+simpsons 
+resident evil 
+scarlett 
+resident evil german 
+resident evil 3 
+email 
+get high 
+scarlett 
+männer wie wir 
+容祖兒 
+proxyway 
+avg antispyware 
+fanfare 
+fanfare 
+haanstra 
+madonna 
+novarip 
+vibrazioni 
+vibrazioni 
+madonna 
+blazing angels 2 
+collin mcrea 
+collin mcrea dirt 
+colin mc rea dirt 
+dtm race driver 3 
+neutron 
+big trouble 
+michael jackson janet 
+come and get it 
+2pac 
+2pac.mp3 
+changes.mp3 
+changes.mp3 
+blazing angels 2 
+transformes 
+transformers 
+oceans 13 
+halo3 
+halo 3 
+games 
+test 
+flyboys 
+flyboys 
+flyboys 
+deutsch spanisch kurs 
+deutsch spanisch 
+spanisch 
+501 verbs 
+501 
+vista 
+spanish verbs 
+monstertruck 
+fitness 
+nederland in beweging 
+travler 
+traveler 
+501 spanish verbs 
+fitness eurosport 
+legia 
+winrar 
+hamilton 
+heroes 
+trans 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xpunlimited 
+eastern promise 
+no results 
+notpresentonanysystemfile 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666 
+alive 
+xxx 
+indian movies 
+big black 
+big black 
+kean 
+katja kean 
+beethoven gilels 
+katja kean 
+young 
+hamilton 
+david hamilton 
+parker posey 
+1000 
+gay boy 
+adobe 
+milf 
+adobe 
+adobe indesing 
+adobe indising 
+dreamweaver cs3 
+dreamweaver cs3 
+adobe 
+esc 1990 
+terminator 
+terminator 
+blond 
+open source 
+finger 
+masturbate 
+esc 1989 
+database 
+adobe 
+slutty hot 
+adobe indesing 
+adobe indesing 
+adobe indesing 
+slutty 
+black 
+desperate 
+eurowings 
+adobe cs3 
+redacted 
+opgezwolle 
+scrubs 
+test 
+paprika 
+scrubs 
+scrubs 
+scrubs 
+scrubs scrubs 
+scrubs scrubs 
+doors 
+animal sex 
+horse 
+shemale 
+green 
+horse dick 
+wife 
+hairy 
+hairy 
+search files, youtube and liveleak 
+terry tate 
+search all files 
+terry tate 
+terry tate 
+little children 
+kusturika 
+mr brooks 
+click 
+click 
+click 
+non siamo soli 
+ihra drag racing 
+fados carlos saura 
+preg 
+preggo 
+preggo 
+search files, youtube and liveleak 
+kanye west 
+nelly furtado 
+nelly furtado 
+juno 
+little miss sunshine 
+justim timberland 
+justim timberland 
+justim timberland 
+justim timberland 
+pimsleur 
+woodwork 
+melayu deli 
+bob baumeister 
+search files, youtube and liveleak 
+ju 
+armands 
+armands 
+armands 
+ju 
+ju 
+armands 
+armands 
+plaquinha magica 
+calling 
+plaquinha magica 
+the caling 
+the calling 
+the calling 
+the calling 
+maksim 
+heleem 
+heleem 
+filmes 
+maksim 
+dj pagal 
+dj pagal 
+maksim 
+filmes 
+maksim 
+search files, youtube and liveleak 
+sex 
+maksim 
+maksim 
+deja.vu 
+deja 
+james blunt 
+nove settimane e mezzo 
+divx 
+search files, youtube and liveleak 
+guitar 
+il campione 
+il campione 
+il campione 
+guitar lessons 
+guitar lessons 
+guitar lessons 
+die siedler vi 
+il campione divx 
+divx il campione 
+isso pro 
+fifa 
+isspro 
+isspro evolution 
+gina 
+pro evolution 
+santana 
+die siedler vi 
+divx 
+divx ita 
+house 
+linux 
+sopranos 
+hoi 
+faceculture 
+deja 
+faceculture jannes 
+faceculture 
+public nudity 
+jannes+faceculture 
+last legion 
+* 
+gay massage 
+gay massage 
+gay massage 
+massage 
+gay 
+naturist family 
+nudist naturist family 
+nudist family 
+nude family 
+naked family 
+gay dad 
+nude family 
+nudist family 
+nudist family 
+naturist nudist family 
+menino 
+interracial 
+prison break s03e03 
+prison break s03e04 
+search files, youtube and liveleak 
+jazz 
+bind 9 
+bind9 
+o'reilly 
+corel paintshop pro 
+norton internet security 
+vetorizar corel 
+renato zero 
+renato zero 
+.avi 
+big 
+take that 
+vetorizar corel 12 
+vetorizar 
+heros ita 
+heros i 
+heros 
+heros ita 
+robbie 
+james blunt 
+high school musical 2 
+james blunt 1973 
+lafee 
+pink floyd 
+corel 12 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+corel 12 vetor 
+vetor 
+video 200 
+video 2000 
+les 
+ls 
+heros ita 
+visual 
+visual studio 
+britney 
+nude cel 
+harry potter 
+one three hill 
+snd-webcam.monitor.3.48 
+webcam.monitor.3.48 
+webcam.monitor.3.48 
+webcam 
+webcam 
+lindsay lohan 
+lohan 
+lohan 
+3js 
+ilahi 
+3j's 
+het nieuws 
+itil 
+midnight club 2 
+midnight club 2 
+midnight club 2 
+smallville 
+itil 
+itil 
+age of empires iii: the asian dynasties 
+naked news 
+smallville 
+smallville full 
+amara slide shows builder 
+topless 
+amara slide shows builder 
+amara slide shows builder 
+ich und ich 
+mango 
+mature 
+mature porn 
+mature porn 
+mature porn 
+porn 
+search files, youtube and liveleak 
+lesbian 
+lesbian 
+naked news 
+topless 
+paris hilton 
+search files, youtube and liveleak 
+porno 
+roff 
+roff 
+nero 
+ubuntu 
+desperate hosewive 
+desperate housewive 
+desperate housewive s3 fr 
+aqu 
+desperate housewive s3 
+hits 
+naruto fr 
+naruto 
+narut 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+linux xp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+linux 
+video 
+video xxx 
+video xxx 
+7.10 final 
+7.10 final 
+xxx 
+7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+xxx 
+gusty 7.10 
+parol pou ri 
+parol pou 
+parole pou 
+morne a l'eau 
+deja 
+guadeloupe 
+search files, youtube and liveleak 
+lv 
+black 
+wga 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+search all files 
+300 
+the simposon ita 
+search files, youtube and liveleak 
+salsa 
+madonna 
+die hard 4 
+ratatouille 
+ratatouille german 
+binic woman 
+bionic woman 
+paris 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+journeyman 
+pirates 
+pirates worlds 
+magik 
+jaguar 
+milf 
+new 
+nokia 
+converter 
+ulead 
+jim carrey 
+jim carrey 
+u-lead 
+porn 
+carrey 
+[2007] 
+popcap 
+nl 
+discography 
+heroes 2x01 
+heroes 2x01 
+prison break 3x01 
+bourne 
+search files, youtube and liveleak 
+xxx 
+xxx 
+fat women 
+fat womens 
+fat 
+fat 
+fat 
+room 1408 
+ana reverte 
+dvd5 
+dvd5 ita 
+ita dvd5 
+nude 
+prison break 
+search files, youtube and liveleak 
+ben amin 
+ben amin 
+ben amin 
+sherine 
+anna vissi 
+sam asadi 
+lilian haddad 
+katia abbas 
+may matta 
+warwa 
+najwa sultan 
+najwa 
+soleil alegria 
+soleil alegria 
+alegria 
+chevel 
+mazicana 
+boys 
+oscar athie 
+oscar athie 
+oscar athie 
+pasteles verde 
+los pasteles verde 
+los iracundos 
+sex 
+jourey 
+jourey 
+santo & yohnny 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+medieval 
+paulina rubio 
+paulina rubio 
+paulina rubio 
+thalia 
+rihanna 
+rihanna 
+rihanna 
+rihanna 
+rihanna 
+cumshot 
+cumshot 
+cumshot 
+stargate atlantis 
+stargate atlantis ita 
+stargate atlantis ita 
+stargate atlantis ita 
+stargate atlantis 
+ghost 
+fantasmi 
+fantascienza 
+ub40 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+suriname 
+ita 
+lamu 
+sport 
+live 
+pompino 
+tv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+suriname 
+suriname 
+suriname 
+ita 
+search files, youtube and liveleak 
+live sport 
+live tv 
+pornography 
+dutch subs 
+dennis p 
+hand job 
+chuck und larry 
+pornography 
+porno 
+ani di franco 
+ani di franco 
+spellbound 
+spellboundance 
+mauro astolfi 
+suriname 
+tori amos 
+tori amos 
+tribler 4.1.4 
+tribler 
+tribler 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+superbad deutsch 
+superbad german 
+superbad 
+zero 
+superbad ger 
+ascensore per l'inferno 
+mauro astolfi 
+mauro astolfi 
+dr house 4x 
+aap 
+aap 
+aap 
+aap 
+die 
+die 
+die 
+video erotici 
+erotismo 
+die 
+hustler 
+xxx 
+pthc 
+preteen 
+kinderficker 
+kinderficker 
+könig der wellen 
+hero 
+heroes 
+hindi 
+ugly betty 
+johnny gaddar 
+johnny gaddar 
+sex 
+die 
+antiarp4.3.1 
+die 
+antiarp4.3.1 
+antiarp 
+anna karina 
+hallo jelle 
+green wing 
+poetry 
+spoken words 
+spoken word 
+audio book 
+couple 
+hardcore 
+black 
+group 
+group 
+transfoermer 
+transformers 
+ger 
+group 
+simpsons 
+kinderliedjes 
+kerst 
+la ink 
+linux 
+police 
+la ink 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+la ink 
+la sposa cadavere 
+die 
+die 
+la sposa cadavere 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+dashboard 
+dashboard 
+bellucci 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+kiss 
+search files, youtube and liveleak 
+all 
+kiss 
+pinnacle studio 
+tiener 
+teen 
+cum 
+southpark 
+southpark 
+girlfriend 
+girlfriend 
+ex 
+live leak 
+coelers 
+horror 2007 divx 
+wraak 
+truhutje 
+tietjes 
+boobs 
+lotte 
+tietjes 
+wraak 
+divx horror 2007 
+voyeur 
+divx horror 
+divx ita horror 
+latex 
+latex 
+search files, youtube and liveleak 
+smackdown vs raw 2008 
+porno 
+svr 2008 
+alles is liefde 
+lucha libre 
+lucha libre 
+lucha libre 
+lucha libre 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+latex 
+latex 
+smackdown vs raw 2008 
+dvd 
+close combat 
+kiss 
+rubber 
+him 
+spanking 
+kiss 
+simpsons 
+wwe 
+wwe smackdown vs raw 2007 
+wwe smackdown vs raw 2008 
+amateur 
+ku 
+kut 
+slikken 
+sperm 
+teen 
+wwe smackdown here comes the pain 
+amateur xxx 
+die 
+lost 
+transformer 
+transformer 
+lost 
+lost s04 
+madona 
+fetish 
+lyx 
+lost s04 
+adobe indisign 
+game 
+game 
+tombraider 
+commando 
+dreamwreaver cs3 
+dreamwreaver 
+dreamwreaver 
+adobe 
+serj tankian 
+to feggaraki 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stardust 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+daron malakian 
+serj tankian 
+stardust 
+mika 
+mika 
+akon 
+serj tankian 
+die 
+to feggaraki 
+to feggaraki 
+californication 
+to feggaraki 
+halo 2 
+halo 2 
+greece 
+desparate housewives 
+transformer 
+school 
+mango 
+to feggaraki 
+mango 
+mango africa 
+to feggaraki 
+tiziano ferro 
+adaware 
+nino d angelo 
+harry potter 
+senza giacca e cravatta 
+autodesk autocad 
+autodesk autocad lt 
+gemelli diversi 
+insieme 
+dj francesco 
+o'reilly 
+o'reilly 
+o'reilly 
+o'reilly 
+salta 
+gigi d alessio 
+bob seger 
+dove sei 
+gigi finizio 
+bob seger 
+bob seger 
+wwe 
+ninananna 
+ni na nanna 
+alabama 
+alabama 
+alabama 
+alabama 
+office 2003 br 
+battlestar galactica 
+ancilia 
+cat 
+cable guy 
+aero flux 
+surf up 
+ww2 
+sex4idiots 
+jordenvandijk 
+the crow 
+hot action cofee 
+counter strike 
+scarface 
+here comes the pain 
+jackass the game 
+smackdown just bring it 
+smackdown vs raw pc 
+fever for the flava 
+stuntman ignition 
+stuntman ignition game 
+erot 
+here comes the pain game 
+smackdown vs raw 2008 game 
+pro evolution soccer 2008 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+she male 
+rise and fall 
+pro evolution soccer 2008 game 
+hi dear delaila 
+hi dear delaila 
+sex 
+dear delaila 
+education 
+dear 
+dear 
+dear 
+dear 
+dear mr president 
+dear mr president 
+once 
+fetish 
+once john carney 
+dear delaila 
+dear delilah 
+dear delilah 
+jane austen 
+once john carney 
+once john carney 
+there delilah 
+hey there delilah 
+hey dear delilah 
+scooter 
+special d 
+old harrys game 
+cum drinking 
+clips 
+soriety 
+soriety 
+hazing 
+hazing 
+paris hilton 
+little children 
+eyes wide shot 
+nod 32 ita 
+search files, youtube and liveleak 
+sean kingston 
+sean kingston 
+sean kingston 
+ub40 
+50cent 
+50cent 
+50cent 
+50cent 
+50cent 
+search files, youtube and liveleak 
+meeuwis 
+fogerty 
+sting 
+sting 
+sting 
+sting 
+sting 
+sting 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tull 
+tull 
+tull 
+tull 
+vicky love 
+vicky love 
+vicky love 
+windows 
+eminem 
+a 
+incubus i miss you 
+sting 
+dog 
+search files, youtube and liveleak 
+cumshot 
+zara whites 
+the unit 
+nod 32 
+britney 
+alex murdoch 
+orange sky 
+dalla 
+lisa 
+lisa 
+elisa 
+elisa 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+negramaro 
+negramaro 
+sig oppa 
+big oppa 
+dj 
+him 
+fear and loathing in las vegas 
+discography 
+fear and loathing in las vegas spanish 
+fear and loathing in las vegas espanol 
+fear and loathing in las vegas 
+the red violon 
+the red violin 
+the red violin 
+the red violin 
+the red violin 
+search files, youtube and liveleak 
+le violon rouge 
+dirty dancing 2 
+windows 
+kama sutra: a tale of love 
+the motorcycle diaries 
+search all files 
+him 
+the motorcycle diaries 
+the life is beautiful 
+v for vendetta 
+spy game 
+pontello 
+faust 
+dildos 
+candy 
+casablanca 
+bonn 
+pressing catch 
+cidade de deus 
+xbox 
+taxi driver 
+big fish 
+pedofilia 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amores perros 
+memoirs of a geisha 
+xxx 
+osx 
+la cage aux folles 
+witch hunter 
+aps.net 
+asp.net 
+asp.net 
+malua 
+elvis 
+elvis 
+kinky 
+opensuse 10.3 
+frans bouwer 
+adobe 
+adobe bridge keygen 
+adobe bridge 
+adobe bridge 
+adobe bridge 
+tv west 
+ass collector 
+search files, youtube and liveleak 
+tv west 
+yes-r 
+yes-r 
+aziz el berkani 
+search files, youtube and liveleak 
+sean kingston 
+pokertracker 
+latex 
+cable guy 
+battlestar galactica 
+windows 2003 
+ecstasy rising 
+bianca buchamp 
+windows 2003 r2 
+ancilia tilia 
+lolita 
+lost s04 
+sex 
+russian 
+lolita 
+gigi finizio 
+metallica 
+gigi 
+dire straits 
+dire straits 
+ass 
+fist 
+pre-teen 
+preteen 
+childsex 
+teensex 
+interacial 
+interracial 
+interracial 
+fall of troy 
+surf's up: i re delle onde 
+shakespeare 
+nude 
+dfx 
+gay wrestling 
+gay sex wrestling 
+gay sex wrestgay sexling 
+gay sex 
+gay sex 
+muscle wrestling 
+booty 
+muscle wrestling 
+muscle wrestling 
+transformers 
+muscle wrestling 
+gay hump 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+southpark 
+southpark 11 
+la ink 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+pornografia infaltiles 
+24 hours 
+24 hours 
+adamo 
+adamo 
+francais 
+fr 
+babyface 
+slightly stoopid 
+xxx 
+xxx asian 
+dvd fab platinum 
+xxx asian teen 
+xxxjapanteen 
+xxx japan teen 
+russian incest 
+nina harley 
+nina hartley 
+sensual 
+russian 
+russian xxx 
+russian incest 
+asian teen 
+nicky 
+zett 
+gurren 
+search files, youtube and liveleak 
+weezer 
+winrar 
+coitus 
+sexy 
+pthc 
+incest 
+r@ygold 
+interracial sex 
+interracial 
+caramel 
+nokia 6630 
+metallica 
+pthc 
+kinder 
+kinderporn 
+svay pak 
+cambodia 
+passwordlist 
+lolita 
+war 
+mpg 
+californiacation 
+rail simulator 
+emma shapplin 
+learning 
+learning 
+rush hour 
+barbie 
+ita i soliti ignoti 
+i soliti ignoti 
+learning 
+lara fabian 
+lara fabian 
+learning 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+il gladiatore 
+web 
+johnny 
+how i met your mother 
+how i met your mother s3e1 
+how i met your mother 301 
+how i met your mother 301 
+how i met your mother 
+music 
+wintersun 
+penn & teller 
+
+johnny mneumonic 
+white girl dancing 
+search files, youtube and liveleak 
+esx 
+search files, youtube and liveleak 
+tbs 
+tbs 
+faceculture jannes 
+diamonds are forever 
+james bond 
+ps2 
+search files, youtube and liveleak 
+porno 
+porno 
+britney 
+dutch 
+fantastic four 
+windows 
+ocean 13 
+ubuntu 
+kaspersky 
+autoccad 
+autocad 
+50 volte il primo bacio 
+colonna sonora 50 volte il primo bacio 
+50 volte il primo bacio colonna sonora 
+50 volte il primo bacio colonna sonora 
+i colori dell'anima 
+i colori dell'anima 
+50 volte il primo bacio 
+50 volte il primo bacio 
+selen 
+selen 
+selen 
+selen 
+rocco 
+pooh 
+pooh in concerto 
+bokito 
+il trionfo dell'amore 
+manuale d'amore 
+ls 
+windows 
+dj 
+dalla 
+video angels 
+james bond jr 
+dragon ball 
+dragon ball digitally remastered 
+dragon ball remastered 
+dragon ball remastered 
+dragon ball remastered 
+james bond 
+never say never again 
+psp 
+james bond .cbr 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+.cbr 
+ita 
+cbr 
+bond cbr 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cbr 
+007 cbr 
+spy cbr 
+xbox360 
+free running 
+free running 
+free running 
+free running 
+xbox360 
+never say never again 
+big heat 
+big booty white girls 
+never say never again 
+warcraft 
+big booty white girls 
+diamonds are forever 
+the kingdom 
+creampie 
+breaking benjamin 
+das fünfte element 
+diva 
+diva song 
+diva 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bronskbeat 
+bronskibeat 
+bronski 
+search files, youtube and liveleak 
+zz top 
+tvu:\\53060 
+beata dubasova 
+karmen 
+dance 
+pavarotti 
+aloha shooby-doo 
+hight school musical 2 
+hight school musical 2 
+hight school musical 2 
+madonna 
+dalla 
+pluto 
+norton360 
+norton360 
+norton 360 
+jpg 
+porno 
+xxx beast 
+beastiality 
+beastiality 
+beastiality 
+beastiality 
+incest 
+achi 
+a chi 
+xxx horse 
+sidera 
+sidera 
+ridera 
+diavolo biondo 
+diavolo biondo 
+diavolo biondo 
+spiderman 
+axxo 
+tits 
+dite a laura che l amo 
+dite a laura che l amo 
+boobs 
+di te a laura che l amo 
+dirte laura che l amo 
+laura che l amo 
+franco 1 franco4 
+ho scrito tamo sulla sabia 
+ho scrito tamo sulla sabia 
+ho scritto tamo sulla sabia 
+ho scritto tamo sulla sabbia 
+ho scritto t amo sulla sabbia 
+tanta voglia di lei 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gino d angelo 
+nino d angelo 
+senza giacca e cravatta 
+i ricordi dell cuore 
+i ricordi dell cuore 
+ricordi dell cuore 
+amedeo minghi 
+i ricordi del cuore 
+ricordi del cuore 
+clowns und helden 
+und helden 
+ndw 
+neue deutsche welle 
+hackers 
+silicon valley 
+mindy 
+mega mindy 
+mega mindy 
+k3 
+rata 
+rata nl 
+rata dutch 
+junior song 
+das modul 
+osx 10.5 
+mac os x 10.5 
+mac os x 
+xbox 
+hitzone 
+norton 
+caligula 
+caligula 
+caligola 
+the silence of the lambs 
+prison break 
+prison break 1 
+prison break 1 episode18 
+prison break 1 episode17 
+prison break 1 
+prison break 1 
+prison break season 1 
+prison break flight 
+crocodile dundee 
+prison break go 
+prison break 21 
+crocodile dundee 
+g-unit 
+g-unit 
+g-unit 
+the office 
+asia 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lesbo 
+tenacious d pick 
+tenacious d pick 
+tenacious d pick 
+sex 
+g-unit 
+tusenee 
+anita blonde 
+porno 
+sex 
+spiderman 3 
+kitchen nightmares 
+mythbusters 
+gpethes erben 
+goethes erben 
+goethes erben 
+asia 
+heatha hunter 
+heather hunter 
+teen 
+resident evil: extinction 
+könige der wellen 
+wächter des tages 
+planet terror 
+resident evil: extinction 
+mai dire candid 
+search files, youtube and liveleak 
+fifa 
+porno 
+vacanta mare 
+uitzendinggemist.nl 
+uitzendinggemist.nl 
+beastiality 
+vacanta mare 
+windows xp 
+nero 7.10 
+nero 
+search files, youtube and liveleak 
+private 
+trivial pursuit 
+search files, youtube and liveleak 
+you tube 
+the kingdom nl 
+the kingdom 
+sims 
+wedding 
+wedding date 
+wedding date 
+wedding date 
+koksijde 
+koksijde 
+sims 
+prison break 
+nepali 
+il buio nell'anima 
+300 
+300 ita 
+hed kandhi 
+bourne ultimatum 
+switch 
+dance 
+hits 
+ebony feet 
+freeloader 
+mika 
+regi 
+xxx 
+moskovskiy gigolo 
+moskovskiy gigolo 
+regi 
+dance 2007 
+muschi 
+muschi 
+fotze 
+sex 
+xbox 
+xbox fifa 
+fifa 2007 
+fifa 2007 xbox 
+desperate 3 
+desperate 3 stagione completa 
+tool 
+desperate housewives 3 stagione completa 
+desperate housewives 3 stagione 
+desperate housewives 3 
+desperate housewives 3 ita 
+tool 
+desperate housewives 3 ita 
+desperate housewives 3 
+amatuer 
+amateur 
+lesbian 
+desperate housewives 3 ita 
+desperate 3 ita 
+sandra 
+desperate pioggia 
+korn 
+beatles 
+beatles 
+mega mindy 
+shrek 
+sports 
+ps3 
+playstation 
+naughty 
+paris letterman
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+tenacious d pick 
+letterman paris 
+pick of destiny 
+nero 
+frans bauer 
+andre hazes 
+eminen 
+eminen 
+ita 
+ita 
+search files, youtube and liveleak 
+forro 
+sex 
+mpg 
+dmg 
+andre hazes 
+andre hazes 
+andre hazes 
+andre hazes 
+andre hazes 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+muziek 
+software 
+software 
+audio 
+search files, youtube and liveleak 
+films 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+http://myspacetv.com/index.cfm?fuseaction=vids.individual&videoid=7208936 
+visio 
+teijema 
+fedex 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/niua9kozgra"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/niua9kozgra" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+boy 
+boy nude 
+boy nude 
+boy nude 
+young boy 
+this boy's life 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+video 
+spanish 
+gay porn 
+autos locos 
+8 
+shemale fuck guy 
+pedo boy 
+surfs up 
+dawn patrol 
+game plan 
+wax korea 
+wax 
+jesse james 
+system of a down 
+system of a down 
+wax 
+wax korea 
+wax korean 
+radio head 
+papa roach 
+papa roach 
+ratatouille 
+indonesia sex 
+1200 
+til lies do us part 
+search files, youtube and liveleak 
+bearforce1 
+southpark 
+tarzan x 
+seether 
+last legion 
+scientist 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bjork 
+canucks 
+search files, youtube and liveleaklost 
+lost 
+lost so3e17 
+search files, youtube and liveleak 
+ac/dc 
+the office 
+the office the injury 
+http://torrents.thepiratebay.org/3725005/top_1000_pop_hits_of_the_80s_(4.32gb)__.3725005.tpb.torrent 
+search files, youtube and liveleak 
+dexter 
+denis chamber 
+threes company 
+the prisoner 
+search files, youtube and liveleak 
+sexy 
+teens 
+wil ferell 
+sexy 
+barbara dennerlein 
+pablo neruda 
+poesie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hungarian 
+linkinpark 
+linkin park 
+uverworld 
+uverworld 
+uverworld 
+teen 
+stanlio 
+fantasmi 
+stanlio 
+stanlio 
+teen in school 
+snowboard 
+naruto 
+naruto 
+naruto folge 1 
+ich liebe dich 
+cage fighting fury 
+midi 
+ps2 
+asian 
+last legion 
+the brave one 
+the brave one dvdrip 
+last legion 
+rape 
+saw dvdrip 
+lilly the world 
+search files, youtube and liveleak 
+netball 
+sisster 
+sister 
+povia 
+superman return 
+simson 
+simson il film 
+zucchero 
+henti 
+hentai 
+gynslip 
+gymslip 
+forced th 
+forced tg 
+forced 
+femdom 
+swollow 
+swollow 
+swallow 
+nappy 
+ebony feet 
+big feet girl 
+diaper 
+diaper 
+long feet girl 
+transformed 
+pool change 
+pool 
+pool girl 
+pigtails 
+panties 
+panties 
+shopping gitl 
+shopping girl 
+shopping gitl 
+shopping girl 
+shopping girl 
+ptff 
+skirt pink 
+melua 
+feet cartoon 
+feet cartoon 
+anime feet xxx 
+kiss feet anime 
+http://www.fulldls.com/download-others-31710-test_drive_unlimited_pal_multi5_ps2dvdkudos.torrent 
+http://www.fulldls.com/download-others-31710-test_drive_unlimited_pal_multi5_ps2dvdkudos.torrent 
+chet baker 
+dvds 
+snuffx 
+snuffx xxx 
+vagina torture 
+mfx feet 
+xxx extreme 
+femdom 
+ptff 
+child feet 
+feet mature 
+feet soles 
+you are the music in me 
+mikayla feet 
+search files, youtube and liveleak 
+teacher 
+hot mom 
+search files, youtube and liveleak 
+padova 
+search files, youtube and liveleak 
+sex 
+squrting 
+squrting xxx 
+squrti xxx 
+squirt xxx 
+squirt xxx you tube 
+pumping pussy 
+pump pussy 
+mac & bumble 
+mac & bumble 
+mcdonalds 
+yanni 
+jesse cook 
+axxo 
+dvdrip 
+dvdrip 2007 
+vasco rossi 
+surf'up 
+joaquin sabina 
+k3 
+scrubs 
+search files, youtube and liveleak 
+eminen 
+eminen 
+eminen 
+eminen 
+aerosmith 
+babyshambles 
+eminen 
+forro 
+pis 
+flamengo 
+search files, youtube and liveleak 
+linkin park 
+live in texas 
+album live in texas 
+album live in texas 
+album live in texas 
+album live in texas linkin park 
+live in texas linkin park 
+linkin park 
+linkin park 
+linkin park 
+opengl 
+opengl tutorial 
+timbaland, timberlake, 50 cent 
+timbaland 
+timberland 
+timberlake 
+spears gimme 
+spears gimme gimme 
+javascript:deliverjwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny');search files, youtube and liveleak 
+jwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny') 
+jwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny') 
+pussycat 
+spacehog 
+pussycat dolls 
+tatu 
+sex 
+apple dvd studio 
+osx 
+apple 
+fish 
+german 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+soleil alegria 
+* 
+* 
+* 
+a 
+b 
+icon 
+icon 
+icon 
+icon 
+icon 
+crash moto 
+crash moto 
+precious 
+etheridge 
+latin 
+billy ocean ultimate collection 
+resturant empire 
+empire 
+batman 
+tarzan 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+* 
+* 
+moana 
+drunk 
+sex 
+night cam 
+night cam 
+spy cam 
+spacehog 
+windows vista 
+american gang 
+hits 2007 
+versuz 
+bacardi 
+new 
+cad 
+corrupção 
+corrupção 
+mix 
+steel 
+steel 
+steel 
+highschool 
+rounders 
+frankie goes 
+veronica 
+maksim 
+radio veronica 
+maksim 
+maksim 
+maksim 
+radio caroline 
+resident evil 
+nero 
+chess 
+dirt 
+symbian 
+crash 
+ghostrider 
+stunt 
+command 
+swat 
+disney 
+fatherland 
+lightness 
+disney 
+llead 
+ulead 
+code pack 
+upskirt 
+sister 
+naughty america 
+milf 
+granny 
+mature 
+older 
+search files, youtube and liveleak 
+mother 
+in law 
+spycam 
+sister 
+toilet 
+ringtone 
+realtone 
+handyman 
+battle 
+mature 
+tomtom 
+mature 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+francuski 
+regsupreme 
+swingers 
+swinger 
+hip hop hood 
+15 
+vlc 
+mature 
+mature 
+sports 
+incest 
+incest 
+sharon stone 
+sharon stone 
+autocad 
+barry white 
+winxp black 
+black xp 
+nlisa ann 
+lisa ann 
+noriko sakai 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+feet 
+ecdh 
+elliptic 
+elliptic curve cryptology 
+microsoft office 2003 czech 
+asian shemale 
+search all files 
+prive 
+pink 
+voyeur 
+search files, youtube and liveleak 
+change room 
+lockeroom 
+hellman 
+lockeroom 
+changingroom 
+showeroom 
+showe 
+shower 
+change room 
+change room 
+change room 
+hidden movies 
+hidden movies 
+handjobs 
+handjobs 
+handjobs 
+the kingdom 
+handjobs 
+handjobs 
+the brave one 
+browulf 
+the dark knight 
+30 days of night 
+dragon wars 
+imagine me & you 
+hitman 
+the invasion 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+we're getting nowhere 
+imagine me & you 
+the carter 3 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+childsex 
+animalsex 
+grote lul 
+selfsuck 
+penus 
+pissen 
+kind 
+boys 
+pink handjobs 
+fucking 
+imagine me & you 
+fucking machines 
+device bondage 
+sex submission 
+hogties 
+hogtied 
+imagine me & you 
+jumpstyle 
+search files, youtube and liveleak 
+hogties 
+hogtied 
+hogtied 
+fucking machine 
+vuze 
+feed 
+uninettuno 
+pippo 
+linux 
+heroes 
+fantozzi 
+lupin 
+sybian 
+blue chart 
+supermoto 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+foto 
+a 
+a 
+a 
+a 
+die hard 4 
+divx 
+divx 
+jetzt oder nie jodie 
+jetzt oder nie jodie 
+jetzt oder nie 
+encarta 
+ms office languages 
+ms office languages 
+ms office 
+lost 
+u2 
+ms.office.language 
+romanzo criminale 
+ms.office.language.pack.italian 
+ms.office 
+ms office 
+ts 
+mirc 
+shemale 
+asian 
+tiesto 
+tiesto 
+å°Ã¦¾¤å\9c\93 
+tiesto 
+radiohead 
+oops 
+pedo 
+tomtom 
+pedo 
+tottenham 
+mature black 
+mature black 
+mature 
+celeb 
+appleby 
+portugal 
+skate 
+davideo 
+vivid 
+lolo 
+emily18 
+de andrè 
+picasa 
+de andre 
+windows xp 
+windows vista 
+criminal minds 
+anorexia 
+i'm a boy anorexic 
+anorexic 
+boy anorexic 
+when a man loves a woman 
+reason 
+dying to dance 
+gba rom ita 
+gba rom 
+i'm a boy anorexic 
+radio hauraki 
+pirate radio 
+nina harthly
+minotaur 
+nina harthly 
+nina harthly 
+korn 
+deutsch 
+squ 
+surf's up: i re delle onde 
+spiderman 
+annie lenox 
+annie lennox 
+hd dvd 
+annie lennox 
+pic 
+ferrari 
+720p 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sport 
+hd dvd movies 
+wrestling 
+catfight 
+hd dvd 
+catfight 
+batman begins hd 
+slipknot 
+casting 
+darering 
+journey 
+poker 
+sexgame 
+assparade 
+sexgame 
+mixed fight 
+assparade 
+dww 
+fkk 
+rare silk 
+casting 
+ballet 
+nipple slip 
+erotic 
+greg lake 
+spong 
+james gang 
+barbara dennerlein 
+five finger 
+five finger 
+five finger 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+speed up my pc 
+search files, youtube and liveleak 
+shrek 
+black ps2 
+search files, youtube and liveleak 
+atreyu 
+atreyu 
+kill bill ru 
+as i lay dying 
+flybar 
+lord of the rings 
+shrek 
+kurasawa 
+clubland extreme 
+clubland 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kagemusha 
+shichinin 
+samurai 
+samurai 
+the heartbreak kid 
+tool 
+avatar 
+tool 
+xbox360 
+xbox360 halo 3 
+tool music 
+snowboard 
+xbox360 halo 3 
+xbox360 halo 3 
+prison break season 3 
+robot chicken 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+robot chicken season 3 
+boondock 
+the boondock 
+boondocks cartoon 
+franky tunes 
+kamps 
+family guy 
+search files, youtube and liveleak 
+pink floyd 
+password list 
+junior songfestival 2007 
+rapidshare 
+torrent 2.0 
+roisin murphy 
+airport tycoon 
+happy feet jumpstyle 
+force 
+the kingdom 
+over there 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+renato zero 
+u2 
+trample 
+face 
+santana 
+trampling 
+trampling 
+trampling 
+trampling 
+trampling 
+trampling 
+trampling 
+trampling 
+trampling 
+search files, youtube and liveleak 
+bon jovi 
+visual basic 6 
+visual basic 6 
+visual basic 6 
+vb6 
+vb6 
+heroes 11 ita 
+heroes 11 ita 
+sports 
+pro evo 
+pro evo 08 
+pro evo 08 
+pro evo 08 
+porno hentai 
+airport tycoon 3 
+airport tycoon 3 
+pee 
+pis 
+666 
+pis 
+ggg 
+thompson 
+thompson 
+xp 
+pee 
+ggg 
+dvd ita 2007 
+heroes 
+heroes ita 
+mauro nardi 
+lucio battisti 
+alles 
+dvx ita 
+harry potter ita 
+herbalife 
+codegear 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adult 
+omnipage 
+roboform 
+autocad 2008 
+roboform 
+stardust memories 
+* 
+annie hall 
+annie hall 
+microstation 
+microstation 
+dragon dictate ita 
+autocad 2008 
+winrar 
+nero 8 crack 
+piss 
+piss 
+hwk 
+hwk 
+ufs 
+prison break 
+surf's up ita 
+surf's up ita 
+surf's up ita 
+she wants revenge 
+saras soft 
+sarassoft 
+sarassoft 
+sarassoft 
+n-box 
+n-box 
+500 person sex 
+this is forever 
+in rainbows 
+500 person sex 
+csi 
+dave gahan kingdom 
+csi 4 1 
+csi 4 
+csi 6 
+ita divx 
+dave gahan hourglass 
+divx ita 2007 
+surf's up: i re delle onde 
+porn 
+beastiality 
+mac 
+anal teen 
+anal teen 
+world of warcraft 
+elan 
+madona 
+games 
+warcraft 
+sophie sweet 
+amv 
+amv 
+loli 
+wrestling 
+womens wrestling 
+tampa 
+womens pro wrestling 
+brother bear 
+bob and doug 
+search files, youtube and liveleak 
+abby winters 
+abby winters 
+nwwl 
+viv thomas 
+girlfriends films 
+twilight women 
+rsp2psharespy 
+ultimato burner 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+smallville 
+satellite tv 
+smallville 
+fifa 
+fifa 2006 
+ambient 
+winning elleven 
+cable tv 
+tv 
+nero 8 
+chama 
+internet satellite tv player 
+dubai 
+eva green 
+sexo 
+craig david 
+sexo 
+eva green 
+eva green 
+atlarge recorder 
+eva green os sonhadores 
+copo de colera 
+avg 
+vista 
+vista 
+eva green 
+eva green dreamers 
+eva green dreamers scene 
+de la nux 
+joseph alessi 
+eva green scene 
+p v de la nux 
+os 
+mac os 
+nicole graves 
+office 
+office 
+office2007 
+office2007 
+office2007 
+nicole graves 
+nicole graves 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+intocable 
+secret 
+public nudity 
+naked 
+rosenberg 
+topless 
+jimmy rosenberg 
+less than three 
+brawl 
+brawl nintendo 
+streghe 03x06 
+korea 
+diablo 
+planetes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pthc 
+ivana 
+search files, youtube and liveleak 
+pedo 
+grillo 
+francesco e chiara 
+korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+weezer 
+weezer 
+placebo 
+prince 
+prince live 
+amateur home 
+amateur home 
+amateur home 
+amateur 
+prince 
+dvd ita 2007 
+porn] 
+porn 
+dvd 
+metallica 
+metallica 
+porn 
+raped 
+black 
+raven riley 
+nicole graves 
+next door nikki 
+next door nikky 
+next door nikky 
+autodesk animator 
+next door nikki 
+next door 
+kate's playground 
+kate 
+heroes ita 
+tottenham 
+interpol 
+heros ita 
+heroes ita 
+dvd ita 2007 
+search files, youtube and liveleak 
+live 
+cum 
+sexy 
+couple 
+cleevage 
+cleevage 
+cleevage 
+elisa 
+pop 
+lavasoft 
+pop 
+the mouse hunt 
+goes mi heart 
+goes mi heart 
+goes mi heart 
+amateur 
+goes mi heart 
+goes my heart 
+goes my heart 
+notte primadegli esami 
+24 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cannonball run 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+eric johnson 
+steve morse 
+team galaxy 
+malmsteen 
+kate bush 
+queen 
+abba 
+gimme more 
+eva henger 
+ambrus 
+alessia merz 
+valeria marini 
+valeria marini 
+ls 
+ls magazine 
+ls magazine 
+valeria marini 
+15 y 
+14y 
+14 y 
+web cam teen 
+web cam preteen 
+valeria marini 
+eva henger 
+jesse cook 
+futurama 
+ambrus 
+gitarre 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fiest 
+feist 
+feist 
+anal 
+feist 
+health 
+health 
+nutritional 
+centerfold 
+centerfold 
+centerfold 
+centerfold 
+giles 
+geiles 
+jannes thialf 
+jannes thialf 
+smallville 
+smallville 6 ita 
+smallville ita 
+castellammare di stabia 
+castellammare di stabia 
+castellammare di stabia 
+castellammare di stabia 
+castellammare 
+bonni bianco 
+jill 
+jaime pressly 
+castellammare 
+prison break 
+the unit 
+metallica one 
+click 
+cameltoe 
+cameltoe 
+cameltoe 
+guns n' roses 
+snowboard 
+metalica 
+metallica 
+metallica 
+nothing else matters 
+ower there 
+over there 
+tobias regner 
+over there episode 2:roadblock duty part 3 
+i stlii burn 
+i still burn 
+lolita models 
+lolita models 
+13 y 
+eat pre 
+eat pre 
+young 
+young 
+painkiller 
+judas priest 
+dune 
+pairs skating 
+windows vista 
+dune 
+amateur 
+do you know 
+homemade 
+search files, youtube and liveleak 
+kuschelrock 
+over there episode 2:roadblock duty part 4 
+enrique iglesias 
+reign 
+adamski 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias - love to see you cry 
+enrique iglesias 
+enrique iglesias - love to see you cry 
+love to see you cry 
+love to see you cry 
+meeuwis 
+dune 
+dune 
+do you know 
+evita 
+op zoek naar evita 
+feist 
+ist 
+fist 
+fist 
+feist 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+50 cent 
+xxx 
+xxx 
+xxx 
+13 
+eminem 
+search files, youtube and liveleak 
+ipod 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+babel 
+a mighty heart 
+feist 
+taxi 
+squirt 
+hot party 
+mmf 
+mff 
+mff 
+george michael 
+iwork 
+laura pausini 
+laura pausini 
+snatch 
+fight club 
+a mighty heart 
+anal 
+breast 
+pocketwijzer 2007 
+rocco 
+breast 
+breast 
+george michael 
+haze 
+samatha 
+samantha 
+silvia 
+spanking 
+99bb 
+tia tanaka 
+99bb 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+search files, youtube and liveleak 
+foto 
+barbie sexy 
+barbie sexy 
+nomadi 
+nomadi 
+sugarbabes 
+totò 
+totò 
+xfiles 
+toto' 
+totò 
+totò 
+mollo tutto 
+harry potter 
+harry potter ita 
+sylvie vartan ma liberte 
+sylvie vartan ma liberté 
+amanda lear 
+300 
+300 iso 
+amanda lear 
+300 
+amanda lear 
+masterbation 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vasco rossi 
+poweriso 
+google earthn 
+google earth 
+film ita 
+bitdefender 
+bitdefender total security 
+transformers ita 
+sugarbabes 
+crossroads festival 
+crossroads festival 
+harry potter 
+harry potter ita 
+sugarbabes 
+vasco rossi 
+female masterbation 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+snow patrol - run 
+snow patrol - run 
+blur 
+daniel bedingfild 
+uitzending gemist 
+billy talent 
+ice baby 
+top20 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tuning car 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+feist 
+fiat punto 
+i fell good 
+i want a break fee 
+i want a break free 
+higt school musical 
+wallpapers 1280 
+punto tuning 
+wallpaper 1280 
+disco 
+prison break 
+queen 
+sex 
+pop goes my heart 
+porno 
+drifter 
+search files, youtube and liveleak 
+horror ita 
+search files, youtube and liveleak 
+twsited sisterç 
+twsited sister 
+twsted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+horror ita 
+horror ita avi 
+divx ita horror 
+divx ita horror 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+divx ita rush hour 
+search files, youtube and liveleak 
+long feet girl 
+o 
+korn 
+molotov 
+office2007 
+pop goes my heart 
+download molotov 
+download molotov 
+pop goes my heart 
+office2007 
+office2007 
+office2007 
+download molotov 
+download molotov 
+office2007 
+office2007 
+pop goes my heart 
+pop goes my heart 
+microsoft vista 
+ita 
+microsoft vista 
+microsoft vista 
+microsoft vista 
+large tongue 
+vista 
+linkin park 
+vista 
+pop goes my heart 
+chicago 
+biggest xxx 
+dvx ita 
+dvx ita 
+voice 
+snuff 
+snuffx 
+snuff xxx 
+unfaithful 
+large vagina 
+large pussy 
+nero 
+bebe long tongue 
+bebe long tongue 
+babe long tongue 
+live for speed 
+save ryan 
+search files, youtube and liveleak 
+onvert 
+convert 
+convert 
+babe long tongue 
+big dick 
+shemale 
+squirt 
+dramatic chipmunk 
+model 
+dramatic chipmunk 
+dramatic chipmunk 
+defrag 
+o&o 
+dramatic chipmunk 
+dramatic chipmunk 
+juanita 
+car 
+heroes 
+omnipage 16 
+omnipage 
+dvdshrink 
+autocad 2008 
+autocad 2008 
+omnipage 
+omnipage 
+search files, youtube and liveleak 
+heroes 
+search files, youtube and liveleak 
+
+hot 
+pop 
+big 
+blond 
+blonde 
+two flags west 
+paris hilton 
+xxx 
+it crowd 
+jenna haze 
+lisa 
+amanda peat 
+betty 
+christy 
+naughty 
+legal 
+barely legal 
+campbell 
+anetta 
+erica 
+erica 
+sunny 
+amy 
+stardust 
+vivid 
+private 
+the invation 
+the invation 
+the invation 
+barbara 
+search files, youtube and liveleak 
+brozman 
+brozman lumiere 
+brozman 
+brozman 
+lumiere 
+lumiere bro 
+tanita tikaram 
+tanita tikaram 
+tanita tikaram 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+a 
+tanita tikaram 
+tanita tikaram 
+prison break s03e04 
+heroes s03e03 
+prison break s03e04 
+anime 
+earth world 
+slevin 
+heroes 
+ita slevin 
+donnie 
+magix music maker 
+vmware 
+vmware 6 
+vmware esx 
+gangbang 
+gangbang 
+gangbang 
+gangbang 
+amsterdam 
+rodney mullen 
+search files, youtube and liveleak 
+gay 
+gay 
+search files, youtube and liveleak 
+nederland 
+hard 
+motor 
+rape 
+hard 
+porno 
+sesso 
+green 
+asia 
+heroes s02e03 
+giorgia 
+giorgia 
+giorgia girasole 
+gateway 
+giorgia girasole 
+giorgia girasole 
+katherina feet 
+manuale 
+babes xxx 
+manuale d'amore 
+babe xxx 
+babe long tongue 
+manuale d'amore 
+private gladiator 
+private gladiator 
+gladiator 
+gladiator 
+greys anatomie 
+greys anatomie 
+search files, youtube and liveleak 
+dvd 
+dvd ita 
+borden 
+long tongue 
+lizzie borden 
+un impresa da dio 
+sweet tongue 
+sweet long tongu 
+sweet long tongue 
+sweet feet 
+50 cent 
+black label 
+sixx 
+nin 
+nine 
+britney spears 
+nine 
+black label 
+nero 
+nero 8 keygen 
+[pc-full-ita] adobe cs3 illustrator, indesign, fireworks, flash, dreamweaver 
+[pc-full-ita] adobe cs3 illustrator, indesign, fireworks, flash, dreamweaver 
+[pc-full-ita] adobe cs3 illustrator, indesign, fireworks, flash, dreamweaver 
+[pc-full-ita] adobe cs3 illustrator, indesign, fireworks, flash, dreamweaver 
+foo fighters 
+german 
+adobe cs3 illustrator, indesign, fireworks, flash, dreamweaver 
+dio 
+exceed 
+britney 
+skat 
+gladiator 
+gladiator 
+adobe indesign cs3 premium 
+adobe indesign cs3 premium italiano 
+vanessa del rio 
+ddd 
+search files, youtube and liveleak 
+ita divx 
+pandora tomorrow 
+blonde 
+adobe creative cs3 premium 
+adobe creative cs3 premium italiano 
+adobe creative cs3 premium ita 
+adobe creative cs3 premium 
+adobe creative cs3 premium it 
+adobe creative cs3 premium ita 
+nena 
+nena 
+tits 
+nomadi 
+rolling stones 
+pussy 
+phoneix 
+oblivion multiplication bug 
+oblivion multiplication 
+oblivion 
+oblivion 
+oblivion 
+cumshot 
+vagina 
+berkeyley course 
+berkeyley course 
+berkeyley course 
+windows xp 
+filerecovery 3.1 
+filerecovery 3.1 
+filerecovery 3.1 
+filerecovery 3.1 
+file recovery 3.1 
+file recovery 
+filerecovery 
+file recovery 
+porn 
+gay porn 
+top 10 goals 
+one piece 
+ita 
+game pc 
+game pc 
+ita 
+spoed 
+search files, youtube and liveleak 
+game ita 
+game pc 
+search files, youtube and liveleak 
+milf drunk 
+alma chua 
+titanic 
+titanic 
+zoo 
+pc games 
+divx ita enterprise 
+divx ita enterprise 
+pc adult games 
+beastiality 
+divx ita enterprise 
+snake 
+dog 
+horse 
+enterprise ita 
+pc games 
+train 
+anal 
+rattatoulie 
+ratatouille 
+nepali 
+anal 
+black friday 
+train model 
+virutal 
+virtual 
+stardust 
+album 
+vangelis 
+syngress 
+books 
+desi 
+exdesi 
+search files, youtube and liveleak 
+cherry ghost 
+grey anatomy 
+tomtom 
+chateau 
+young harlots 
+preteen 
+animal 
+massive r&b 
+train crach 
+nepalese cultural evening 
+nepalese cultural evening 
+nepalese cultural evening 
+nepalese cultural evening 
+wah maiya 
+johnny cash john henry 
+johnny cash john henry 
+johnny cash john henry 
+prison break 
+rape 
+cherry ghost people 
+dylan dog 
+futurama spanish 
+300 
+300 
+shrek terzo 
+futurama sp 
+prison break s03e03 
+complete 
+completa 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amateur 
+l'esorcista ita 
+xbox 360 
+l'esorcista ita 
+l'esorcista ita 
+simpson 
+los simpson 
+300 
+300 spa 
+the kingdom 
+death sentence 
+trade 
+dragon naturally speaking 
+xxx 
+dragon naturally speaking serial 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+the kingdom 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+alles is liefde 
+shoot 'em up 
+futurama spa 
+codec 
+codec 
+codec 
+codec 
+codec 
+rape 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tintin en america 
+red dragon - before the silence 
+8mm 
+feet 
+foot 
+the agency 
+the agency 
+the missing 
+johnny cash john henry 
+johnny cash cd 
+massive r & b 
+massive r & b winter collection 
+300 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+2600 
+isazi 
+activation xp sp2 
+hacker 
+webcam 
+handjob 
+brasilian 
+brasilian 
+brasilian 
+brazilian 
+harry potter 
+harry potter and the order of the phoenix 
+wwe 
+tna 
+wwe 
+xxx 
+wwe 
+wwe 
+wwe 10 
+match 
+goo goo 
+deadwood 
+xxx 
+shrek 
+pirates of the carribean 
+adobe photoshop 
+fuck 
+sex party 
+sex party 
+parkour 
+parkour 
+johny cash 
+office:mac 
+search files, youtube and liveleak 
+gay 
+halo 3 
+gta 
+gta crack 
+film 
+gay dad 
+dad son 
+father figure 
+gay 
+craymo 
+gay 
+resident evil extinction 
+colton ford 
+anal 
+project vector 
+fall out boy 
+gay 
+gay 
+simple plan 
+femdom 
+gay 
+high heels 
+beethoven 
+la mamma 
+la figa 
+ocean thirteen 
+capoeira 
+foto 
+foto 
+search files, youtube and liveleak 
+ita 
+ita 
+house 
+house 
+animal 
+scott 
+pc games 
+tony scott 
+ulrich 
+southpark 
+scientology 
+virtual 
+virtual games 
+hentai 
+hentai games 
+priya rai 
+priya rai 
+alma chua 
+yovo 
+filipina 
+pinay 
+where the boys 18 
+where the boys 
+styles yamaha 
+anal 
+italy anal 
+italy 
+italy 
+psr 
+the frames 
+diva 
+spanish 
+dvdrip 
+python 
+cookbook 
+simulator 
+the frames 
+python 
+stage 
+hiroko 
+php 
+ubuntu 
+psr 8000 
+napoli xxx 
+amateur 
+napoli 
+napoli 
+napoli 
+foggia 
+ragazze 
+sexy girl 
+divx ita 
+webcam italy 
+telecamera nascosta 
+php 
+explosions in the sky 
+ebony 
+black 
+miles davis 
+miles davis 
+miles davis 
+miles davis 
+cs3 
+star wars kid 
+php 
+php 
+gladiator 
+2pac 
+dr. dre 
+dr.dre 
+dre 
+vno ncw 
+black 
+creampie 
+wii ps3 
+php 
+ghost footage 
+28 weeks later 
+prison break 
+madonna 
+the gossip 
+python 
+symfony 
+lost 
+lost sub nl 
+lost sub dutch 
+wii ps3 
+tolle 
+byron katie 
+columbo 
+course of miracles 
+dutch sub 
+dutch sub 
+search files, youtube and liveleak 
+sex 
+art of noise 
+mvc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+csi 
+rail road tycom 3 
+xxx 
+csi 
+csi 
+bassie en 
+rihnna 
+rihanna 
+paris hilton 
+beauty 
+nicole graves 
+nikki 
+trombonr 
+beauty 
+trombone 
+arie en 
+girls 
+le fate ignoranti 
+le fate ignoranti 
+le fate ignoranti 
+le fate ignoranti 
+le fate ignoranti 
+naak 
+naakt 
+mindy vega 
+pregnant 
+search files, youtube and liveleak 
+pregnant 
+lee ann rimes 
+trombone 
+studente 
+leann rimes 
+barely legal 
+le fate ignoranti 
+le fate ignoranti 
+le fate ignoranti 
+star trek 
+er 
+ragazze a beverly hills 
+live 
+search files, youtube and liveleak 
+http://www.youtube.com/ 
+eckharrt tolle 
+eckharrt tolle 
+eckhart tolle 
+super internet tv key 
+http://www.youtube.com/ 
+super internet tv 
+http://www.inbox.lv/ 
+http://www.inbox.lv/ 
+fifa 2008 
+fifa 2008 
+search files, youtube and liveleak 
+china 
+chinesse 
+chineese 
+chinese 
+korea 
+terror 
+thai 
+helo 
+pc games 
+rat 
+radiohead 
+radiohead 
+pamela 
+music 
+jannes 
+snow 
+prison break 
+prison break 304 
+snow 
+bach 
+saw 
+trombones 
+trombones 
+trombones 
+trombones 
+trombone 
+trombone 
+the unit 
+unit 
+unit 
+snow queen 
+snow queen 
+snow 
+top gear 
+wild hogs 
+fifa 2008 
+lost 3.14 
+lost 3 
+lost 
+lost season 3 
+massive attack 
+audio books 
+lost 3 
+heroes 
+ipod 
+stargate 
+lost episode 14 
+lost episode 14 
+lost season 
+keiser chiefs 
+just jinger 
+big brother 
+mac 
+results 
+lilly 
+mature 
+bangbros 
+game video 
+biggest 
+biggest 
+big 
+ines 
+big tits 
+mein kampf 
+mein kampf 
+verboten 
+verboten 
+banned 
+wwe 
+taxi4 
+"king of kong" 
+wwe 
+footloose 
+wwe 
+wwe raw 
+mp3 
+milk inc 
+the bold 
+regi 
+as world turns 
+regi pinxten 
+milk inc 
+search files, youtube and liveleak 
+toca race 3 
+tokio hotel 
+toca race 3 
+fergie 
+fergie 
+heroes 
+ome henk 
+ome henk 
+preuteleute 
+spanish 
+pc wolfenstein 
+pc wolfenstein 
+pc olimpics 
+pc olympics 
+pc submarine game 
+pc silent hunter 
+pc silent hunter 
+pc submarine 
+pc submarine hunter 
+pc tour off duty 
+umbarella 
+r.kelly 
+we are the campions 
+queen 
+search files, youtube and liveleak 
+arkadia 
+basso 
+katie melua star 
+star wars 
+katie+malua+star 
+katie+malua 
+katiemalua 
+katie malua 
+katie melua 
+katie melua star 
+katie melua 
+porn 
+hamburg 
+hamburg 
+bush 
+sailboot katie 
+lolita 
+lesbo 
+surfcum 2002 italiano 
+sex 
+sex 
+fuck 
+surfcam 2002 italiano 
+fuck 
+fuck 
+surfcam 2002 italiano 
+surfcam 2002 italiano 
+surfcam 2002 italiano 
+surfcam 2002 italiano 
+fuck 
+carcass 
+dutch 
+dutch subs 
+crash 
+u pana boga w ogródku 
+alex agnew 
+shemales 
+transsexual 
+sheila 
+sybian 
+hayden 
+hayden panettiere 
+hayden panettiere 
+hayden panettiere 
+jenna jameson 
+jenna jameson 
+amateur 
+shemale 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+resident evil 3 extiction 
+hayden panettiere 
+multiagent systems 
+andre van duin 
+duin 
+witte 
+front 242 
+happy butt 
+front 242 
+klinik 
+pdf 
+wwe 
+linux 
+billboard 
+mazÄÂ\81 dvÄÂ\93selÄ«te 
+jordan 
+seu merda 
+thundercats 
+http://fun.inbox.lv/ 
+http://fun.inbox.lv/ 
+http://fun.inbox.lv/ 
+resident evil 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+sex 
+sex 
+sex 
+sex 
+girls 
+knoked up 
+young 
+de lift 
+insomniac 
+certe notti 
+creepa kid 
+hardbass 12 
+hardbass 12 
+hardbass 12 
+hardbass 
+silverhawks 
+wife 
+dexter 
+dexter s02e02 
+groningen 
+4400 
+"marc collin" 
+wheelie aerox 
+wheelie aerox gelderland !!! 
+clapton 
+mark knopfler 
+scooter wheely on motorway 
+csi 
+gay 
+multiagent systems 
+multiagent systems 
+multiagent systems 
+kamal 
+vista wallpapers 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ebook 
+aisha 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jade 
+microsoft office 2007 
+agent 
+search files, youtube and liveleak 
+team america 
+james uk group 
+an introduction to multiagent systems 
+madness complete 
+michael wooldridge 
+introduction to multiagent system 
+multiagent systems - a modern approach to distributed artificial intelligence 
+james blunt 
+kate melua 
+quantum leap 
+james morrison 
+pablo montero 
+east west quantum leap 
+east west quantum leap 
+quake 3 
+try teens 
+tryteens 
+try teens 
+waves 
+waves mercury 
+symphonic orchestra gold pro 
+bioshcok 
+bioshock 
+quake wars 
+jordan capri 
+sex tae 
+sex tape 
+preteen 
+e-myth revisited 
+e-myth 
+e-myth 
+search files, youtube and liveleak 
+dr who 
+pedo 
+chamillioaire 
+jimi hendrix 
+in rainbows 
+barenaked ladies 
+syun aika 
+shun aika 
+jason alexander 
+opus iii 
+new order 
+the simpsons 
+red drawf 
+chemistry 
+house of ass 
+xana 
+bi apple 
+the elegant spanking 
+black glove 
+tristan 
+search files, youtube and liveleak 
+heroes 
+movie 
+divx 
+search files, youtube and liveleak 
+joey 
+joey young 
+sport 
+joey 
+private 
+search files, youtube and liveleak 
+asian 
+paris hilton 
+paris xxx 
+animal xxx 
+animal porno 
+animal japan 
+japan teen 
+japan 
+japan lesbian 
+asian lesbian 
+asian 
+porno 
+porno russian 
+hostel 
+csi 
+ncis 
+russian 
+renato zero 
+renato zero il cielo 
+vanishing point 
+era glaciale 
+apocalypto 
+dead parrot 
+who pais the ferryman 
+leonard bernstein norton 
+leonard bernstein 
+leonard bernstein 
+classical 
+chucl 
+chuck s01e03 
+teens 
+anal 
+hammerfall 
+sinergy 
+blok 
+het blok 
+het blok net 5 
+xxx 
+mika 
+bravo hits 
+simon 
+nicolle 
+nicolle 
+nicol 
+fifa 08 
+fifa 08 pc games 
+fifa 08 pc game 
+fifa 08 
+fishgame 
+fish game 
+search files, youtube and liveleak 
+pc games 
+fifa 
+jan 
+jan smit 
+smit 
+bzn 
+bzn 
+pavarotti 
+satellite tv player 
+satellite tv titanium 
+satellite tv titanium 
+internet satellite tv player 
+pissing 
+tv players 
+videofonino 
+tv 
+maial college 
+search files, youtube and liveleak 
+multiagent systems 
+http://www.afhakers.nl/media.asp?x=6695#afhurl 
+maya 
+xsi 
+xsi linux 
+3ds 
+het blok 
+blok 
+blok 
+visual studio 2005 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+visual studio 2005 
+search files, youtube and liveleak 
+visual studio 2005 
+prison break 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vasco rossi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lino banfi 
+300 
+resident evil 
+resident evil extincion 
+resident evil exctincion 
+resident evil extinction 
+resident evil extinction ita 
+ita 
+ps2 
+massive r&b 
+chuck s01e02 
+google 
+paris 
+google 
+google 
+ps2 
+google video 
+simpson 
+herman van veen 
+jlo 
+jlo 
+beyonce 
+sandee 
+gossip girl 
+soundtrack 
+soundtrack no reservations 
+soundtrack the nanny's diary 
+soundtrack 
+search files, youtube and liveleak 
+meat puppets 
+nirvana 
+candlebox 
+mondo marcio 
+mondo marcio 
+mondo marcio 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+acoustic guitar 
+shol 
+s6 
+superbad 
+pan's labyrinth 
+
+search files, youtube and liveleak 
+nursery rhymes 
+bonzo dog 
+what i got linkin park 
+what i got 
+what i've done 
+miami vice 
+numb/encore 
+star trek 
+chavez 
+stuffit deluxe 
+touhou 
+stuffit standard 
+stuffit compress 
+zip 
+compress files 
+nadine jansen 
+big breasts 
+r. kelly 
+r. kelly 
+r. kelly 
+transformers 
+reggae 
+the nun 
+the saint 
+huge tits 
+huge tits 
+huge tits 
+osx 
+file maker pro 
+the starlite desperation 
+entourage season 4 
+pornoraphy 
+pornography 
+sex 
+
+
+
+pornagraphy 
+porngraphy 
+porngraphy 
+
+soliloquists of sound 
+search files, youtube and liveleak 
+top gear 
+simpsons 
+bourne ultimatum 
+bourne ultimatum 
+resident evil extinction 
+medal of honour airborne 
+handjob 
+handjob 
+the kingdom 
+g3 
+yeah yeah yeah 
+steve vai 
+die bedrohung 
+red hot chili peppers 
+red hot chili peppers 
+
+2063 
+house german 
+scrubs german 
+need for speed 
+windows vista 
+bangbus 
+divx 
+amateur 
+cum 
+divx 
+over the top 
+fuck vag fuck anus 
+mongrain 
+mongrain 
+friday 
+ozzy 
+bioshock 
+barbara 
+fun 
+citty 
+kitty 
+colonial home 
+colonial house 
+three days grace 
+three days grace 
+
+gay 
+gay 
+alex jones 9-4-07 
+brandi starr 
+hilary scott 
+soliloquists 
+taylor rain 
+jessica simpson 
+poison 
+metallica 
+
+poison 
+metallica 
+metallica 
+metallica 
+
+300 
+gaber 
+
+entourage 
+entourage 4 
+ita 
+the three musketeers 
+pc 
+ita 
+carbon 
+dusty rhodes westminster 
+xxx 
+trieste 
+porn 
+bergamas 
+nordio 
+justin warfield 
+ween 
+lacucaracha 
+sammi 
+gay 
+little bri 
+little britain 
+janey do 
+search files, youtube and liveleak 
+guys and dolls 
+desenho chaves 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jenna 
+dita 
+dita 
+dita 
+youtube 
+youtube 
+xxx 
+hairypotter 
+gay 
+high school musical 2 
+hannah montana 
+hannah montana 
+cures' 
+cures 
+the cure 
+311 
+greek 
+greek.s 
+greek 5 
+greek 6 
+greek 7 
+greek 8 
+greek 9 
+greek 10 
+greek 11 
+battlestar gallactica 
+battlestar galactica 
+resident 
+simpson 
+famaly guy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+daily show 
+bob dylan 
+weired al 
+weird al 
+mac osx 86 
+rubberfunk 
+
+rubberfunk 
+rescue me s0e4e13 
+rescue me s0e4e13 
+rescue me s0e4e13 
+rescue me s0e4e13 
+avi 
+divx 
+movies 
+simpons dvdscr 
+li-ann pop the cherry 
+li-ann pop the cherry 
+nero 
+bioshock 
+ftv 
+bioshock 
+fifa 
+advisor 
+regcure 
+search files, youtube and liveleak 
+divx ita 
+divx 
+divx ita 
+grillo 
+florin salem 
+it 
+ita 
+lost 
+lost it 
+intervideo windvd 8 
+juno 
+juno 
+pro7 
+ärzte 
+ebony 
+
+nina 
+nina mercedez 
+search files, youtube and liveleak 
+private 
+xxx 
+die ärzte 
+virgin 
+virginity 
+zimmer 
+pedo 
+gilmore girls 
+creampie 
+hentai 
+ls magazine 
+lsm 
+sandra 
+sandra teen 
+axxofiles, youtube and liveleak 
+elvis 
+ihra drag racing 
+ihra professional drag racing 
+joe cocker 
+ihra professional drag racing 
+ihra professional drag racing 
+morecambe wise 
+twenty four 
+twenty four season 6 
+twenty four season 6 
+bicep 
+bicep 
+bicep 
+bicep 
+isomagic 
+alcohol 
+prison break 
+muscle cam 
+remind me 
+muscle cam 
+chris cam 
+hung huge 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+chapelle oprah 
+chapelle oprah interview 
+chapelle oprah 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hung huge 
+the brave one 
+war 
+ufc 
+sqirt 
+squirt 
+porn 
+search files, youtube and liveleak 
+business 
+conspiracy 
+conspiracy 
+conspiracy 
+
+a 
+a 
+wing chun 
+die 
+nelly 
+nelly 
+heavenly sword 
+heavenly 
+perquisite 
+kraftwerk 
+underworld 
+underworld 
+underworld 
+underworld 
+underworld 
+underworld 
+kraftwerk 
+underworld 
+tangerine dream 
+minimal music 
+oceans thiteen 
+oceans thirteen 
+fist 
+kraftwerk 
+kraftwerk 
+kraftwerk 
+dusk till down 
+britney spears 
+dead parrot 
+angelina jolie 
+paris hilton 
+search files, youtube and liveleak 
+plyboy 
+cristina aguilera 
+a4proxy 
+proxyway 
+hidemyip 
+hide ip 
+ghostsurf 
+sex video 
+dead parrot 
+wicked 
+the gopod the bad the wicked 
+the good the bad the wicked 
+the good the bad the wicked 
+wicked productions 
+wicked productions 
+lesbian 
+shana 
\83ã¤ã\83\86 
+proxyway 
+economic concepts 
+economic concepts 
+economic concepts 
+hitman blood money reloaded 
+hitman blood 
+search files, youtube and liveleak 
+streettivo 
+streettivo 
+cricket 
+cricket 
+cricket 
+pakistan 
+house 
+ita 
+italy 
+discografia 
+finley 
+ministry of sound 
+sound 
+sound 
+sound 
+prison break 
+ebook 
+dexter 
+take 
+dexter 
+the queen 
+gympl 
+gina wild 
+gina wild 
+the wind that shakes 
+naked 
+office 
+naughty 
+4400 
+tickling 
+mathematica 
+mathematica 
+solletico 
+dover 
+cosquillas 
+kitzeln 
+weeds season 3 
+weeds 
+ratatouille 
+axxo woodcock 
+search files, youtube and liveleak 
+dead parrot 
+talking heads 
+nero 8 
+police 
+kean 
+captain sensible 
+pil 
+public image limited 
+24 
+die 
+24 
+24 3 
+idiocracy 
+puppet mastaz 
+southpark 
+mina 
+mediadefender 
+dexter 
+mediadefender 
+mediadefender 
+
+mediadefender 
+mediadefender 
+mediadefender 
+axxo 
+the one 
+"the one" 
+the one jet li 
+primary colors 
+heroes 
+potter 
+tickling 
+tickling 
+24 s06e09 
+24 s06e10 
+24 s06e11 
+24 s06e13 
+24 s06e22 
+24 s06e23 
+southpark 
+orn 
+porn 
+asian 
+asian 
+incest 
+teen 
+wife and eldest 
+
+
+kanye west 
+sony ericsson 
+w810i 
+w810i 
+w810i 
+house md 
+korn 
+korn 
+korn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+woodcock 
+woodcock 
+woodcock 
+balls of fury 
+504 boyz i can tell 
+download manager 
+dead parrot 
+fossett 
+underage 
+marion raven 
+twelve 
+dead parrot 
+
+mediadefender 
+mediadefender 
+ladytron 
+tits 
+tits 
+softcore 
+woman 
+women 
+amateur 
+wu tang 
+rza 
+vai 
+funn 
+simpsons 
+poo 
+daria 
+daria 
+daria 
+daria 
+katie fey 
+superman 
+swinger 
+katie fey 
+katie fey 
+wächter der nacht 
+fucked 
+nochnoi dozor 
+the bourne ultimatum 
+william orbit 
+ozric tentacles 
+transformers 
+osx86 
+
+lacuna coil 
+sucks 
+adult sins 
+the bourne ultimatum 
+sucks cock 
+search files, youtube and liveleak 
+paris hilton 
+ivanovic 
+gay 
+gay 
+
+wu tang 
+blue states 
+
+orgy 
+search files, youtube and liveleak 
+two 
+two 
+two 
+brand new 
+bioshock 
+kreatief 
+blowjob 
+sex 
+brand new 
+ricj 
+rock 
+rock your body 
+alternative 
+scrubs 
+scrubs 
+tiesto 2007 
+american snuff movie 
+the bourne ultimatum 
+i wish 
+rock your body 
+wantens 
+tone loc wild thing 
+reckless 
+bodywork 
+scrubs 
+david lee roth 
+scrubs 4 
+filmes portugueses 
+patio das cantigas 
+take on me 
+jamiroquai 
+jamiroquai little l 
+naruto 
+naruto 
+ubuntu 
+eddie izzard 
+footbag 
+stargate 
+leave britney alone 
+adobe photoshop 
+cam girls 
+linux 
+prey 
+yiff 
+adobe 
+bleach 
+finale 
+finale 2008 
+one peice 
+one piece 
+galactica 
+text editor 
+tycoon 
+text editor 
+resident evil 
+ditor 
+text 
+beryl 
+
+
+blade runner 
+adobe 
+transformers 
+
+phish 
+indian 
+indian 
+indian 
+indian 
+desi 
+desi 
+indian 
+desi 
+desi 
+phish 
+phish 
+
+phish 
+olivia monijca 
+desi 
+lesbian 
+olivia mojica 
+amateur 
+enemy territory 
+indian 
+buerets 
+arab 
+arab 
+knoppix 
+lela 
+dude where's my car 
+24 
+vanessa hudgens 
+death 
+don't tase me bro 
+tintin 
+tintin dvd 
+deadparrot 
+dead parrot 
+harry 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pavarotti 
+traviata 
+traviata pavarotti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+frangoulis 
+one piece vegapunk 
+handjob 
+handjob 
+weeds 
+karma police 
+rabbit in your headlights 
+daily show 
+colbert report 
+upskirt 
+abba 
+abba 
+abba 
+abba 
+abba 
+abba 
+abba 
+upskirt 
+simpsons movie 
+metal 
+metal 
+pop 
+pop 
+pop 
+pop 
+olodum 
+ivete sangalo 
+olodum 
+atalanta 
+muscle boy video 
+muscle boy video 
+muscle boy video 
+muscle boy video 
+muscle boy video 
+muscle boy video 
+310 to yuma 
+oasis 
+dvd 
+film 
+film 
+film 
+film 
+survivor 
+dragon ball 
+linkin park 
+search files, youtube and liveleak 
+milf 
+italian 
+dragon ball 
+dragon ball 
+dragon ball 
+trailer 
+far cry 
+madonna 
+britney spears 
+voetbal 
+search files, youtube and liveleak 
+pdf 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+psp metal gear solid 
+unreal tournament apple 
+unreal tournament 
+goku 
+lsbian 
+doom 3 
+search files, youtube and liveleak 
+doom 3 mac 
+lesbian 
+apple doom 3 
+goku 
+goku 
+apple unreal 
+eminem 
+counter strike 
+counter strike os x 
+unreal tournament os x 
+unreal tournament for apple 
+unreal tournament for mac 
+umbrella rihanna 
+masturbation 
+rihanna 
+as 
+a 
+briana banks 
+banks 
+webcam 
+webcam 
+http://www.freerapidaccount.com/free/?r=1057 
+http://www.freerapidaccount.com/free/?r=1057 
+http://www.freerapidaccount.com/free/?r=1057 
+fanny lu 
+fanny lu 
+me enamora - juanes 
+me enamora 
+juanes 
+juanes 
+search files, youtube and liveleak 
+pjk 
+microsoft windows vista 
+pjk 
+pjk rbv 
+horses 
+horses 
+pjk 
+pjk 
+pjk 
+vista 
+incest 
+incest 
+armin 
+armin the great escape 
+ps2 
+search files, youtube and liveleak 
+das streben nach glück 
+eagles of death metal 
+
+
+mediadefender 
+aniston 
+courtney cox 
+http://videolog.uol.com.br/video?205085 
+naruto dublado 
+puffy 
+naruto dublado 
+puffies 
+voab 
+naaktstrand 
+neger 
+negerlul 
+penis 
+big breasts 
+small breasts 
+rush 
+radiohead 
+fanny lu 
+van halen 
+coheed and cambria no world for tomorrow 
+no world for tomorrow 
+coheed and cambria 
+coheed and cambria running free 
+massive asses 
+massive asses 
+mother superior 
+coheed mother superior 
+coheed mother superior 
+kite 
+search all files 
+manowar 
+meat loaf 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+cashback 
+pino daniele 
+pino daniele 
+pino daniele 
+pino daniele 
+evening gloves 
+
+
+azores 
+azores 
+azores 
+azores 
+azores 
+angra 
+ps2 
+dvd 
+dvd 
+dvd 
+blanzing 
+terceira 
+i 
+pico 
+pico 
+genesis 
+britney 
+heroes 
+heroes.s01e3 
+heroes.s01 
+heroes 
+xxx 
+britney 
+britney 
+bleach 
+onepiece 
+one piece 
+one piece 
+one piece 324 
+one piece 
+l word 
+the l word 
+core rhythm 
+musical 
+shane carmen 
+you tube 
+you tube 
+the l word 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+avril lavigne 
+the l word italiano 
+the l word 
+l word 
+christina aguilera 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+l word 
+madonna 
+tiziano ferro 
+madonna 
+michael jackson 
+audioslave 
+xutos 
+twain 
+madonna 
+janet jackson 
+twain 
+bob 
+divx 
+bob 
+l word, youtube and liveleak 
+, youtube and liveleak 
+hairspray 
+rugby 
+eva mendes 
+photoshop 
+shabal 
+panda antivirus 
+virtumonde 
+gmail 
+preeteens 
+lost season 4 
+fist 
+faust 
+emma 
+chopin 
+handicapt 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mandarin 
+downtown 
+downtown matsumoto 
+foo 
+matsumoto 
+matsumoto comedy 
+bourne 
+foo fighters 
+lela star 
+suse 
+amater 
+amater 
+amater 
+amater 
+amater 
+amater 
+amater 
+korea 
+unreal tournament 
+unreal tournament 
+unreal tournament 
+unreal tournament 
+fifa 08 
+korea 
+domino 
+thai 
+hardcore 
+java 
+young teen 
+search files, youtube and liveleak 
+porn 
+californication 
+weeds 
+weeds 
+jenna 
+jameson 
+blowjob 
+samantha 
+def leppard 
+lee aaron 
+thunder dirty love 
+charlie 
+darrian 
+red blue 
+ozzy 
+dexter 
+lavoie 
+halo 
+hazzard 
+lynda carter 
+baywatch 
+hee haw 
+mandrell 
+alba 
+derek jeter 
+yankees 
+beyonce 
+shania 
+mcphee 
+kiss 
+lingerie 
+lynch mob river of love 
+charlie 
+charlie blowjob 
+football 
+playboy 
+gigi d'alessio 
+bundesliga 
+public tv 
+mtv 
+public tv 
+public tv 
+kbs 
+britney 
+britney 
+dora 
+search files, youtube and liveleak 
+pc iso 
+kween 
+jeeg 
+autobahn 
+dhl 
+oktoberfest 
+nero 
+hun 
+akon 
+bed boys ii 
+far cry 2 
+far cry 2 game 
+search files, youtube and liveleak 
+ficken 
+cubase 
+tenniphil 
+xxxtorrent 
+xxxtorrents 
+test 
+korea 
+milburn 
+spiderman 
+spiderman 
+wrc 
+these are the facts 
+xxx 
+mature 
+soccer 
+korea 
+search files, youtube and liveleak 
+guitarsynth 
+guitarsynth 
+guitar synth 
+guitar synth 
+these are the facts 
+preteen 
+preteen 
+preteen 
+exfreundin 
+freundin 
+teen 
+vs.php 
+vs.php 
+vs.php 
+lomu 
+korea 
+php 
+vb.net 
+vb.net 
+vb.net 
+vb.net 
+vb.net 
+net 
+vs.php 
+grafica 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+webserver 
+jolie 
+jolie 
+batman 
+salsa 
+merenghe 
+merengue 
+jolie 
+moto gp 2007 ps2 
+black 
+avi 
+bioshock 
+nas 
+nas my way 
+one mic 
+nas one mic 
+vasco 
+zucchero 
+zucchero 
+zucchero 
+ebony 
+american choppers 
+mothers 
+mothers 
+harry 
+milf 
+naruto 
+war 
+dalai lama 
+stephen colbert 
+dalla 
+luccio dalla 
+baby 
+children 
+psp 
+psp 
+wwe 
+bikini 
+pink floyd 
+rape 
+film 
+lebanese 
+nude 
+galactica 
+viv thomas 
+invasion 
+prison break 
+cricket 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+movie 
+search files, youtube and liveleak 
+trinspotting 
+kank 
+search files, youtube and liveleak 
+hum tum 
+pierino 
+hum tum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+movie 
+pierino 
+lomu 
+lomu 
+pierino in divx 
+divx ita 
+close combat 
+divx ita 
+divx ita 
+ita film 
+movie 
+movie 
+red hot chili peppers 
+red hot chili peppers 
+red hot chili peppers 
+search files, youtube and liveleak 
+debbie does dallas 
+amateur 
+amateur 
+amatoriale 
+amatoriale 
+thai vcd 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+amatoriale 
+spiaggia 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+pompino 
+amteur 
+foo fighters 
+amateur 
+foo fighter 
+britney spears 
+topa 
+german television 
+luci rosse 
+nude 
+avi to dvd 
+dvd 
+watch 
+nuda 
+korea 
+search all files 
+search all files 
+search all files 
+عÙุ± Ø§Ø¯Ù�¨ 
+kctvjeju 
+proxy way 
+anonimity 
+proxy 
+film porno 
+69 
+jannes 
+seredova 
+heroes 
+proxyway 
+search files, youtube and liveleak 
+korea 
+korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+korea 
+macromedia 
+briana 
+mature 
+blow jobs 
+mature 
+mature 
+milf 
+divx 
+divx ita 
+the.it.crowd 
+selen 
+selen 
+eva henger 
+eva henger 
+pirati dei caraibi 
+eva henger 
+pirati dei caraibi 
+tribler 
+tribler 
+the.it.crowd s02e01 
+the.it.crowd s02e01 
+eva henger 
+henger 
+erlang 
+sandoval 
+sandoval 
+arturo sandoval 
+arturo sandoval 
+search files, youtube and liveleak 
+radio recorders 
+radio recorders 
+psp 
+the it crowd 
+the it crowd 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+search files, youtube and liveleak 
+osama bin laden 
+halo 
+blowjob 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+arturo sandoval 
+guitrasynth 
+guitarsynth 
+guitarsynth 
+mina 
+pantyhose 
+airbag test 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fetish 
+sony mhc 5900 
+brandweer 
+sony mhc5900 
+sony mhc5900 
+brandweer 
+brandweer 
+search files, youtube and liveleak 
+love 
+walker brothers 
+brandweer 
+dion 
+brandweer 
+.rar 
+sampletank- trumpets.rar 
+sampletank 
+sampletank 
+sampletank 
+brandbestrijding 
+sampletank 
+sampletank 
+frankie goes to hollywood 
+sezen aksu 
+50 cent 
+nagios 
+zenoss 
+top 40 
+kardashian 
+hilton 
+heroes 
+breakfast 
+breakfast at tiffany's 
+divertenti 
+pixar 
+sigle pixar 
+sigle pixar 
+sigle cinema 
+sigle cinema 
+sigle deramworks 
+sigle dreamworks 
+sigle dreamworks 
+sigle shrek 
+sigle shrek 
+sigle film cinema 
+sigle film cinema 
+sigle pixar 
+sigle pixar 
+sigle pixar lamps 
+sigle pixar 
+sigle pixar 
+trance 
+pixar 
+pixar 
+trance 
+trance 
+pamela 
+sm 
+bdsm 
+colonne sonore 
+colonne sonore 
+search files, youtube and liveleak 
+colonne sonore film 
+baglioni 
+baglioni 
+baglioni 
+baglioni 
+guitar 
+guitar hero 
+guitar hero ps2 
+guitar hero 1 ps2 
+aman 
+top chef 
+atlantis 
+aman 
+guitar hero iso 
+aman 
+atlantis 
+apple logic studio 
+ps2 iso 
+manhunt 2 
+stargate 
+stargate atlantis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+britney spears 
+inzest 
+inzest 
+inzest 
+gay 
+the way i are 
+house german 
+scubs 
+scrubs 
+stargate 
+candyman 
+star trek german 
+candyman 
+candyman 
+no te pido la luna 
+justin timberlake 
+sexy back 
+sexy back 
+sexy back 
+spice girls 
+chakaron 
+i see girls 
+gwen stefani 
+search files, youtube and liveleak 
+borsato 
+borsato 
+avril lavigne 
+belinda 
+superbad 
+search files, youtube and liveleak 
+ggg 
+utorrent 
+utorrent 
+xxx 
+anal 
+deutsch 
+anal 
+prison break 
+suck 
+xp 
+vista 
+heroes 
+hot 
+crysis 
+kierra kiki 
+chopin 
+knocked up 
+jesse james 
+jesse james robert ford 
+fuck 
+planet terror 
+kim holland 
+chopin 
+fisting 
+bonn 
+nutten 
+huren bonn 
+huren 
+straßemstricj 
+straßemstrich 
+straßenstrich 
+telefonsex 
+lolits 
+lolita 
+kindfrau 
+zierlich 
+hot strip 
+morocco 
+shakira 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+morocco 
+bjsex 
+the mars volta 
+ratatouille 
+spiderman3 
+spiderman 3 
+axxo 
+electric masada 
+german 
+xxx 
+search files, youtube and liveleak 
+nuance 
+half life 
+csi 
+private 
+divx-ita 
+maradona 
+pirates 
+serie a 
+inter 
+udinese 
+football 
+soccer 
+arsenal 
+pirati 
+search files, youtube and liveleak 
+keith urban 
+keith urban 
+keith urban 
+private video 
+keith urban 
+leann rimes 
+brad paisley 
+cs3 
+alan jacksom 
+alam jackson 
+alan jackson 
+alan jackson 
+alan jackson 
+ndesign 
+indesign 
+dance 
+dance 
+dance 
+private 
+
+bbw 
+hard eight 
+310 to yuma 
+310 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+310 to yuma 
+balls of fury 
+metalocalypse 
+search files, youtube and liveleak 
+ebony 
+ebony 
+bang bros 
+toast crack 
+ableton 
+balls of fury 
+balls of fury 
+
+
+divx 
+divx 
+divx 
+divx 
+norton antivirus 2008 ita 
+norton antivirus 2008 
+bibi blocksberg 
+norton antivirus 2007 
+footjob 
+the maitrix 
+the maitrix 
+microsoft office 2007 
+zscudetto 2007 
+scudetto 2007 
+scudetto 2007 
+scudetto 
+scudetto 
+scudetto 
+scudetto2007 
+scudetto 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porno 
+amatoriali 
+ramazzotti 
+miguel bose 
+o'riordan 
+shakira 
+microsoft front page 
+microsoft front page 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+microsoft front page 2007 
+jennifer lopez 
+jennifer lopez 
+microsoft front page 2007 
+adult 
+harry potter order of the pheonix 
+adult 
+anal 
+anal 
+casablanca 
+dragon wars 
+mr woodcock 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+filme 
+anal 
+journey of life 
+ufo 
+area 51 
+search files, youtube and liveleak 
+indian beauties 
+indian beauties 
+pregnant 
+high school musical 2 
+love 
+naked 
+porn 
+south park 
+rally 
+rally 
+rally 
+rally 
+sherk 
+sherk 3 
+sherk 3 
+matrix 
+ocean 12 
+braiana banks 
+jena jameson 
+superbad 
+superbad 
+superbad 
+superbad 
+superbad 
+asstrafic 
+porn 
+electrons 
+moto 
+baglioni 
+games 
+high school musical 2 axxo 
+eros ramazzotti 
+villeneuve 
+duri a morire 
+giochi 
+the best 
+gp 2007 
+gp f1 1978 
+the best 
+lara 
+lara 
+gp f1 1978 
+gp f1 1978 
+smith 
+gp f1 1978 
+gp f1 1978 
+smith 
+noiva 
+smith 
+smith 
+smith 
+smith 
+veritas backup 
+veritas backup 
+x 
+eva 
+bare minerals 
+ass 
+md formulations 
+search files, youtube and liveleak 
+the practice ita 
+mina 
+in tribunale con lynn 
+lomu 
+mina 
+prendilo 
+pane e tulipani 
+terminator 
+itunes 
+veritas backup 
+shemale 
+shemale 
+anal 
+house german 
+german 
+chaienn 
+pregnant 
+masturbate 
+mika 
+queen 
+resident evil extinction 
+pompini 
+female 
+heroes 
+search files, youtube and liveleak 
+rape 
+indian rape 
+indian rape 
+indian rape 
+indian rape 
+indian rape 
+little rape 
+little rape 
+rape 
+splinter cell 
+adult 
+battlefield bad company 
+battlefield bad company 
+stargate 
+stargate atlantis 
+lektor 
+lektor 2007 
+muzyka 2007 
+techno 2007 
+jackyll&hyde 
+akell hyde 
+stargate atlantis 
+electro 
+stargate atlantis 
+potter 
+hip hop 
+blakes 7 
+blakes 7 
+survivor 
+doggy 
+doggy style 
+doggy style 
+kill 
+smith 
+ger 
+war 
+dr who 
+pussy 
+zecchino doro 
+doctor who 
+doctor who invasion 
+dragonball z il legendario super sayan 
+dragonball z il legendario super sayan 
+dragonball z il legendario super sayan 
+dragonball z il film 
+rambo 
+i guerrieri della notte 
+i guerrieri della notte 
+i guerrieri della notte 
+i guerrieri della notte 
+i guerrieri della notte 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rings 
+naomi 
+lineage 
+rammstein 
+tera patrick 
+it crowd 
+rambo 
+rambo 
+nannini 
+rambo 
+titanic 
+titanic 
+i guerrieri della notte 
+i guerrieri della notte 
+titanic italiano 
+titanic italiano 
+titanic italiano 
+titanic italiano 
+doctor who invasion 5 
+world trade ceer 
+world trade center italiano 
+ebook 
+dragon wars 
+doctor who invasion 6 
+rtl 
+ebook 
+watch 
+newsom 
+nelly 
+countrygrammer 
+country grammer 
+country grammer 
+country grammer 
+krecik 
+krecik 
+krecik 
+krecik 
+krecik 
+krecik zegarmistrzem 
+krecik zegarmistrzem 
+krecik zegarmistrzem 
+krecik zegarmistrzem 
+chuck and larry 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+doda znak pokoju 
+dvx ita 2007 
+mauro nardi 
+de wandeling 
+crash 
+paul de leeuw 
+deja vu ita 
+un'impresa da dio 
+yes close to the edge 
+moody blues 
+samim 
+v2i 
+acronis 
+acronis 
+egy makulátlan elme örök ragyogása 
+zene 
+musik 
+zene 
+mika 
+sesso 
+search files, youtube and liveleak 
+mana 
+adobe 
+nds 
+nintendo ds 
+nintendo ds 
+nes 
+top 20 
+nl top 20 
+juiced 2 nds 
+cobra 
+week 38 
+week38 
+speedtv 
+speedtv 
+juiced 2 nds 
+gay 
+divx 
+divx ita 
+casa 
+can't turn you on 
+can't turn you loose 
+can't turn you loose 
+can't turn you loose 
+can't turn you on 
+can't turn you on 
+can't turn you loose 
+can't turn you loose 
+casa 
+casa 
+amore 
+arturo gatti 
+st. vincent 
+sex 
+bang bros 
+bang bros 
+search files, youtube and liveleak 
+metal 
+legal 
+karaoke 
+it crowd 
+sicko 
+rape 
+htid 
+softimage 
+3d studio max 
+linux xp 
+bang bros 
+cameltoe 
+search files, youtube and liveleak 
+german 
+nhl 
+pittsburgh penguins 
+live tv 
+search files, youtube and liveleak 
+porno 
+sex 
+lord of the rings 
+telefon tel aviv 
+prison break 
+gay 
+java 
+korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ben hur 
+ben hur 
+editorial 
+computer editorial 
+how to build a computer editorial 
+how to build a computer 
+how to build a computer 
+polloi 
+house 
+motorola 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+notte 
+search files, youtube and liveleak 
+wrc 
+asian xxx 
+xxx 
+marvel 
+comics 
+asian xxx 
+russian xxx 
+russian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anita meijer 
+eric clapton 
+search files, youtube and liveleak 
+premonition 
+premonition eng 
+asian 
+asian teen 
+teen 
+prata vetra 
+prata vetra 
+prata vetra 
+prata vetra mp3 
+prata vetra mp3 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+883 
+883 
+883 
+883 
+ps2 
+harry potter ita 
+spiderman 
+spiderman 
+spiderman 
+grow grow grow 
+bdsm 
+k3 
+k3 
+goo goo dolls 
+iris 
+sex 
+sleepers 
+brad pitt 
+carmen electra 
+angeline jolie 
+hard fi 
+goo goo dolls 
+boris kovac 
+dom lomp 
+opposites 
+fiction plane 
+bruce willis 
+die hard 
+die hard 
+die hard 
+die hard 4 
+die hard 4 
+angeline jolie 
+angeline jolie 
+angeline jolie 
+angelina jolie 
+elle mcpherson 
+stolen tape 
+homemade videos 
+tatort 
+tatort 
+doggie style 
+madonna 
+madonna 
+madonna 
+madonna 
+madonna 
+syllart 
+search files, youtube and liveleak 
+
+close to home 
+fracture 
+fracture movie 
+rush hour 3 
+fatman 
+fatman 
+blue 
+street 
+street line 
+street lan 
+fatman scoop 
+fatman scop 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+prison break 
+prison break ita 
+chiara 
+linux 
+sara 
+windows 
+hirens 
+bourne 
+heroes episode 19 
+search files, youtube and liveleak 
+when the mountains tremb 
+when the mountains tremble 
+shrak 3 
+house german 
+shrek 3 
+german 
+gloria 
+win-agenda 1.0 
+vrt 
+food 
+appletv 
+it crowd 
+dragonball z il film 
+monstertruck 
+shemale 
+sean paul 
+fuck 
+ass 
+ass 
+jenna 
+kingdom under fire 
+kingdom under fire 
+kingdom under fire xbox 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gay sex video 
+gay sex video 
+manciano 
+ace ventura 
+anita meijer 
+anita meijer 
+margie ball 
+andress 
+creedence clearwater 
+streisand 
+avinu 
+streisand 
+i simpson 
+i simpson il film 
+i simpson il film ita 
+superbad 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+flash business templates 
+flash business templates 
+flash templates 
+flash templates 
+search files, youtube and liveleak 
+300 
+flash 
+300 
+i go my wah 
+i go my way 
+i'll go my way 
+bob dylan 
+black christmas ita 
+black christmas 
+vasco rossi 
+vasco rossi 
+divx ita 
+soundtrack 
+pearl harbor 
+pearl harbor 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+l'ultima legione 
+desperado 
+desperado dvx 
+desperado dvix 
+desperado ita 
+desperado ita 
+dvd ita 
+heroes 
+heroes 2x 
+veronica zemanova 
+veronica zemanova 
+veronica zemanova 
+porno 
+sex 
+xxx 
+dvd ita 
+veronica zemanova 
+xxx 
+xxx 
+xxx 
+xxx 
+dvd ita 
+keba 
+hazes 
+hazes 
+hazes 
+hazes 
+hazes 
+hazes 
+l'ultima legione 
+boy on boy 
+crtani 
+crtani xxx 
+divx 
+xxx 
+divx ita 
+dragana 
+divx ita 
+divx ita 
+dragana 
+dragana 
+shrek 3 
+film ita 
+it crowd 
+dvd ita 
+the great world of sound 
+closing escrow 
+self medicated 
+kill bill 
+tnt 
+divx ita 
+tarkan 
+dvd ita 2007 
+turk 
+gigi d'agostino 
+sean paul 
+kelis 
+dvd ita 
+bones ita 
+dvd ita 
+jimmy reed 
+csi las vegas 7 
+divx ita 
+divx ita 2007 
+divx film ita 2007 
+shrek 3 
+search files, youtube and liveleak 
+porn 
+asian 
+shrek 3 film 
+asia carrera 
+asia carrera 
+lesbian 
+lesbian ebony 
+the simpson movie 
+the simpson movie ita 
+porno 
+the simpson ita 
+film 
+film ita 
+i simpson il film 
+film ita 
+il sette e l'otto 
+ficarra e picone 
+ficarra e picone 
+il sette e l'otto 
+foot 
+oceans thirteen 
+oceans thirteen 
+oceans thirteen ita 
+divx ita 
+film ale e franz 
+dutch 
+doctor 
+infermiere 
+pornostar 
+pornstar 
+movie ita 2007 
+movie ita 2007 
+lesbian foot 
+movie 2007 
+film 2007 
+lesbian 
+ed2k://|file|[divx%20ita]%20porky%20college%202%202007%20by%20minyc%20dj.avi|1408829714|6923a49c1d90a5e4677ba62e6394a2a0|/ 
+film ita 
+parky college 
+porky college 
+porky college film 
+porky college film 
+dvd ita 
+film ita 
+xxx 
+youporn 
+porno 
+sex 
+nl subt 
+pop 
+search files, youtube and liveleak 
+halo 3 
+sexe 
+deadwood hun 
+deadwood s2 
+teen 
+mozart 
+beethoven 
+paul simon 
+lightroom 
+simon and garfunkel 
+bee gees 
+phantom of the opera 
+les miserable 
+chicken shoot 
+jonathan livingston seagull 
+bikini 
+die hard 4 
+les miserables 
+phantom of the opera 
+shrek 3 
+shrek 3 ita 
+shrek ita 
+simon and garfunkel 
+simon and garfunkel 
+simon and garfunkel 
+simon and garfunkel 
+dvd ita 
+dvd il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+ficarra e picone 
+ficarra e picone 
+ale e franz 
+planet terror 
+dvd-ita 
+dvd-ita 
+planet terror ita 
+divx ita 
+planet terror ita 
+porn 
+divx 
+il dolce e l'amaro 
+games 
+il bacio che aspettavo 
+il bacio che aspettavo 
+manuale d'amore 2 
+album 
+manuale d'amore 
+manuale d'amore 
+manuale d'amore 2 
+manuale d'amore 2 
+manuale d'amore 2 
+c# 
+south park 
+southpark 
+linux 
+ubuntu 
+doctor who 
+photoshop cs3 
+resident evil 
+blonde 
+tntvillage 
+dvd ita 
+dvd ita 
+ita 
+search files, youtube and liveleak 
+sexcams 
+sexcams 
+ita 2007 
+xvid ita 
+sexcams 
+sexcams 
+sexcams 
+sexcams 
+sexcams 
+cartao de visita corel 
+cartao no corel 
+tania russof 
+ccorel 
+mika 
+mika 
+severance ita 
+vi dichiaro marito e marito 
+navigon 
+premonition ita 
+ita 
+ratatouille 
+ratatouille it 
+nomadi 
+search files, youtube and liveleak 
+laura 
+cazzi 
+hard 
+clasical 
+vista 
+twin peak 
+twin peax 
+twin 
+twinpix 
+twinpeax 
+vista 64 
+vista 64 
+vista 64 
+vista 64 
+twin 
+twin peaks 
+search files, youtube and liveleak 
+xxx 
+tanita 
+tanita 
+tanita tikaram 
+el polaco 
+red hat 
+mi pc 
+red hat linux 
+beos 
+3gp 
+arina 
+matrix os 
+game pc ita 
+game pc ita 
+game pc 2007 
+game pc 
+photoshop cs3 
+game 2007 
+game pc 
+tits 
+game pc 
+game pc 
+pes 7 
+pes 
+pes 
+pc game 
+pc game ita 
+pc game trailer 
+game trailer 
+alvaro vitali 
+lesbo 
+simpson 
+simpson ita 
+ita 
+shrek 3 
+shrek 3 ita 
+shrek ita 
+skinny 
+skinny 
+big clit 
+search files, youtube and liveleak 
+spiderman 3 
+rammstein 
+rammstein 
+moviemania 
+commodore 64 
+rammstein 
+amiga 
+blues brothers 
+electric six 
+electric six 
+wagner 
+wagner 
+wagner 
+wagner 
+wagner 
+battles atlas 
+battles atlas 
+ferrari 
+deluxe paint 
+amiga emulator 
+arkanoid 
+ratatouille 
+ho'oponopono 
+http://br.youtube.com/watch?v=mijqhp--od0 
+ass 
+dancing 
+booty meat 
+booty shake 
+ass 
+ass shake 
+lapdance 
+blowjob 
+booty meat 
+booty meat 
+fruit loops 
+tropa de elite 
+indra 
+playboy 
+soccer 
+bourne 
+bangbros 
+pussy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kylee reese 
+asian 
+xxx movie 
+halloween 1978 
+halloween 1978 
+chicken shoot 
+chuck 
+chuck -pronounce 
+heroes -s01 
+loose change 
+silk 
+spears 
+css 
+php 
+php 
+porn 
+private xxx 
+animal 
+zoo sex 
+zoo sex 
+sex farm 
+evergreen 
+evergreen 
+andy william 
+boulevard 
+scarface 
+biagio antonaccci 
+biagio antonacci 
+gigi finizio 
+lucio dalla 
+finizio 
+foto 
+video porno 
+eva henger 
+freedom 
+colonne sonore 
+monella 
+biancaneve sotto i nani 
+biancaneve sotto i nani film porno 
+biancaneve sotto i nani film porno 
+pinocchi porno 
+pinocchio porno 
+hungarian dance no. 8 
+hungarian dance no. 8 
+brahms 
+sex 
+brahms 
+film porno 
+brahms 
+brahms 
+brahms 
+sex 
+brahms 
+scarface ita 
+cuby 
+biancaneve film porno 
+sesso in classe 
+professoresse nude 
+the simpson 
+the simpson italiano 
+porno 
+eva henger 
+film porno 
+film porno 
+candy store 
+candy dulfer 
+love is gone 
+gemelli diversi 
+napoli 
+gemelli diversi 
+yoga 
+оÑÃ\90¾Ð±Ð¾Ðµ Ð¼Ð½ÐµÐ½Ð¸Ðµ 
+minority report 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+çÄ\84修正 
+電マ 
+電マ 
+distretto di polizia 6 
+see dick run 
+fun with dick e jane 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ghost raides 
+rocky 
+giochi playstation 2 
+playstation 2 
+distretto di polizia 6 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tre metri sopra al cielo 
+playstation 2 giochi 
+cultura moderna 
+cultura moderna 
+cultura moderna 
+cultura moderna gioco 
+cultura moderna gioco 
+cultura moderna gioco 
+gran turismo 
+moto gp ps2 
+spider man ps2 
+shrek 3 
+spider man ps2 
+jazz 
+metal gear ps2 
+thbled 
+the bled 
+new amsterdams 
+new amsterdams 
+search all files 
+new amsterdams 
+new amsterdams 
+new amsterdams 
+new amsterdams 
+pdf 
+ubuntu 
+kde4 
+music 
+bourne 
+chopin 
+chopin 
+classic 
+classic 
+classic 
+classic 
+classic 
+css 
+leg 
+photography 
+
+cock 
+topgear 
+seventeen 
+seventeen 
+seventeen 
+boobs 
+boobs 
+mediadef 
+mediadefender 
+public 
+ratatouille 
+mediadefender 
+public 
+defender 
+kaze no stigma 
+darker than black 
+tokyo majin 7 
+http://www.liveleak.com/view?i=c66_1176846833 
+"family tree maker 2008 
+"family tree maker 2008" 
+"familytree maker 2008" 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+no reservations 
+batman 
+wedding daze 
+bourne ultimatum 
+bourne 
+wedding daze 
+no reservations 
+"family tree maker 2008" 
+"family tree maker 2008" 
+family tree maker 2008 
+sosft 
+sisoft 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+http://www.liveleak.com/view?i=c66_1176846833 
+search files, youtube and liveleak 
+defender 
+cats 
+cats the musical 
+les miserables 
+andrew loyd webber 
+andrew loyd webber cats 
+zadornov 
+search files, youtube and liveleak 
+acronis migrate 
+teen 
+883 
+karaoke 
+karaoke brasil 
+883 
+karaoke brasil 
+prison break 
+brotherhood 
+search files, youtube and liveleak 
+californication 
+dvd ita 2007 
+dvd ita 2007 
+dutch 
+dvd ita 
+l'ultima legione dvd 
+mp3 
+xvid 
+xvid ita 
+xvid de luxe 
+italian subbed 
+zwartboek 
+high school musical 
+high school musical ita 
+xvid imc 
+tettone 
+tettone 
+tettone 
+movie 
+dutch movie 
+britney 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xvid ita 
+xvid 
+xvid 
+xvid 
+kurdistan 
+eruh 
+emmanuelle 
+objects of her desire 
+confessions of a groupie 
+confessions of a groupie 
+naked encounters 
+sin city diaries 
+fantasmes 
+my first female lover 
+bound cargo 
+playboy 
+oral 
+teenage cavegirl 
+teenage cavegirl 
+amateurs 
+miss italia 
+vanessa 
+death note 
+torrentsmd.com 
+torrentsmd.com 
+toots 
+thielemans 
+death note 
+mike 
+search files, youtube and liveleak 
+ocean 
+hot fuzz 
+hot fuzz ita 
+anita blonde 
+sylvia saint 
+timbaland 
+youtube 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cracked 
+emule 
+deutsch 
+[anime-ita] 
+original flavor 
+original flavor 
+cumshot 
+kenzie kenner 
+kenner 
+raven riley 
+eva angelina 
+lamas 
+lamas 
+lamas 
+rhcp 
+dirt collin\ 
+dirt collin 
+thai 
+soilwork 
+feet 
+feet lick 
+wrestle 
+oil 
+feet lick 
+teen 
+tokyo majin 7 
+halo 
+loretta goggi 
+jaguar 
+navtech 
+terminator 
+tv total 
+shrek terzo films 
+pirates of silicon valley 
+erotik 
+squirt 
+dragonball 
+pirates 
+pirates of silicone 
+dragonball gt 
+720p 
+720p king kong 
+king kong 
+king kong xvid 
+720p 
+hdtv 
+jade 
+1080p 
+search files, youtube and liveleak 
+dragon baal 
+1080i 
+pantyhose 
+omas 
+mature 
+lessen to love 
+level 2 
+granny 
+playstation 2 
+fifa 2008 
+bmx playstation 2 
+playstation 2 
+i listen to love 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+oma 
+tomb raider anniversary 
+tomb raider anniversary ps2 
+spider man 3 ps2 
+tomb raider anniversary ps2 
+zero assoluto 
+dragon ball z ps2 
+dragon ball z psp 
+giochi psp 
+giochi psp 
+psp 
+ps2 
+navteq 
+brazzers 
+bush 
+gary moore 
+q cad 
+dream theater 
+ubuntu 
+worried ray 
+pam tommy 
+incest 
+heroes 
+heroes s02 
+prison break 
+prison break s03 
+prison break s03e02 
+heroes s02 
+heroes s02 
+mom 
+mature.nl 
+mature 
+old 
+grandmother 
+grandma 
+grandmas 
+lusty grandmas 
+lusty 
+mature woman 
+alt 
+gilmour 
+fortzen 
+vieille 
+femme mure 
+femme 
+femme 
+mure 
+hand job 
+stroke 
+jobs 
+jobs 
+jobs 
+jobs 
+suck 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+arcangel 
+50 cent 
+good luck chuck 
+the game plan 
+dragon wars 
+office 2007 
+walking tall 2 
+walking tall 2 
+wrong turn 2 
+drivergenius 
+photoshooting 
+best of fashion tv 
+sex 
+女åÄ\92 
+女åÄ\92 
+sex 
+victoriassecret 
+natacha peyre photoshooting 
+natacha peyre 
+fotoshooting 
+kader loth 
+fotoshooting 
+prison break 
+prison break hr 
+voz propia 
+inxs 
+inxs 
+rock 
+zz top 
+zz top 
+sex 
+angelina jolie 
+pamela 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+te it crowd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ghulam ali 
+ghulam ali 
+ghulam ali 
+ghulam ali download 
+ghulam ali download 
+fener 
+blue planet 
+arab 
+arab 
+game 
+adult 
+arab 
+kuwait 
+shemale 
+asian 
+teen 
+kuwait 
+arab 
+Ø´Ù�ู�Â\84 
+عرب 
+heros s02e01 
+muft.tv 
+ghulam ali 
+ghulam ali 
+mmovie 
+family guy 
+house season 4 
+house alone 
+house s4ep1 
+divx 
+divx 
+divx 
+divx 
+divx 
+bartoli 
+selen 
+japan girl xxx 
+japan girl 
+japan teen 
+japan 
+russian incest 
+porno 
+insegnanti 
+insegnanti xxx 
+asian xxx 
+asian teen 
+xxx 
+simpson 
+simpson ita 
+sex asian 
+zoosex 
+japanese teenn 
+japanese teen 
+serenity 
+serenity ita 
+serenity ita 
+hot asian 
+hot asian xxx 
+asian xxx 
+maria - the album 
+maria - the album 
+maria 
+bartoli 
+zetsubou raw 
+shrek 
+pirates 
+wam 
+wet and messy 
+piss 
+pee 
+wam 
+selen 
+trasformer 
+sompson 
+i simpson 
+i simpson ita 
+i simpson 
+moana 
+rizzo 
+gessica 
+novita 
+*.* 
+dvd 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+trasformer 
+dvd 
+dvd ita 
+pirati dei caraibi 
+hotfax 
+dvd ita 
+dvd ita 
+dvd ita 
+dvd ita 
+friends 
+tra 
+friends ita 
+nansy 
+nansy 
+madonna 
+sparagna 
+sparagna 
+friends ita 
+friends ita 
+divx ita 
+divx ita friends 
+dvd ita 
+sex 
+search files, youtube and liveleak 
+xbox 
+dreamcast 
+xbox 
+divx ita 
+divx ita 
+divx ita 
+foo 
+foo 
+tango 
+legal 
+legaltorrents 
+legaltorrents 
+bbc ansi 
+bbc 
+bbc ansi 
+ansi 
+ansi bbc 
+bbc 
+korea 
+matia bazar grido 
+aqua teen hiung 
+aqua teen hiun 
+aqua teen hiun 
+aqua teen hiun 
+aqua teen hunger force 
+mac os 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+compiz 
+wolfram 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vmware 
+madonna 
+top gear 
+hack 
+malmsteen 
+malmsteen 
+yngwie 
+malmsteen 
+creative cow 
+schaak 
+german 
+xxx 
+seinfeld 
+cum 
+girls 
+seinfeld 
+girls 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sigur ros 
+sigur ros 
+erik mongrain 
+seinfeld 
+seinfeld 
+rey leon 
+licencia para casarse 
+adobe 
+todo poderoso 
+todo poderoso 
+slayer 
+crazy 
+crazy 
+orgy 
+lesbian orgy 
+chevelle 
+chevelle 
+medieval 2 total war 
+medieval 2 total war ita 
+lesbian orgy 
+hentai 
+teens 
+medieval 2 total war 
+medieval 2 total war 
+medieval 2 total war 
+phill collins 
+genesis 
+hp unix 6.05 
+phil collins 
+hpunix 6.05 
+lesbian 
+longman 
+lara pausini dove l'aria è polvere 
+laura pausini dove l'aria è polvere 
+private 
+private 
+chevelle 
+movie zwartboek 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+greatest opera show on earth 
+greatest opera show on earth 
+greatest opera show on earth 
+si ministre 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+acrobat 8 professional 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+die hard 
+die hard 
+laurel hardy 
+laurel hardy 
+beatles 
+ratatouille 
+inter sampdoria 
+inter sampdoria live 
+inter sampdoria live 
+foo 
+sugarbabes 
+grönemayer 
+grönemeyer 
+grönemeyer 
+720p 
+laurel hardy 
+tango 
+300 
+dvx ita 
+riverdance 
+riverdance 
+allo allo ep3 
+capleton 
+strumpfhose 
+strumpfhose 
+capleton 
+personal branding 
+bareback 
+fantastici 4 
+gay speedo 
+speedo 
+slingblade 
+lanois 
+cash 
+the war 
+fantastic four 
+cum guzzlers 
+vista 
+gay bareback 
+search files, youtube and liveleak 
+most evil 
+most evil 
+brotherhood 
+most evil 
+gay bare 
+gay spiderman 
+american history x 
+placebo 
+axxo 
+ubuntu 
+automated trader 
+automated trader 
+automatedtrader 
+trader magazine 
+trader magazine 
+trader magazine 
+trader and magazine 
+automated trader 
+claymore 
+search files, youtube and liveleak 
+die hard 4 
+teen 
+steeleye span 
+steeleye span 
+vista 
+hitzone 43 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 42 
+hitzone 
+joan of arcadia 
+joan of arcadia 
+joan of arcadia 
+joan of arcadia 
+lost 
+shine mitchell 
+friday night lights 
+groningen 
+transformer 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+
+
+search files, youtube and liveleak 
+python 
\8f�¢é\87\91å\88\9a 
+alicia keys 
+turnstall 
+kd 
+thurn 
+walt disney 
+kurnstall 
+kt turnstall 
+meatloaf 
+vinyl 
+1080 
+1080 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+naruto 
+britney spears 
+tango 
+porno 
+porno 
+elisa 
+hd 
+atlantis hd 
+stargate hd 
+vivaldi 
+atlantis hd 
+hd 
+manuela arcuri 
+vivaldi 
+electric six 
+casino royal 
+pc game 
+csi 
+csi 
+csi 
+c.s.i 
+figa 
+figa 
+porno 
+gta 
+moto gp 
+i soprano 
+porno 
+ac/dc 
+sex 
+settlers the game 
+hentai 
+hentai 
+maura 
+hentai 
+hentai 
+riverdance 
+hentai school 
+robin trower 
+hentai rape 
+immoral sisters 
+immoral sisters 
+hentai 
+hentai rape sex 
+hentai rape 
+hentai sex 
+rape hentai 
+schoolgirl hentai 
+hentai movie 
+immoral sisters 
+schoolgirl sex 
+schoolgirl sex 
+sex 
+hentai 
+the bourne ultimatum 
+the office 
+the bourne ultimatum 
+bangles 
+harry potter 
+busty 
+sigur ros 
+300 spartan workout 
+search files, youtube and liveleak 
+demonoid 
+dummies 
+torrent 
+bittorrent 
+bittorrent 
+dummies 
+torrent 
+torrent 
\8f�¢é\87\91å\88\9a 
\8f�¢é\87\91å\88\9a 
+blade 3 
+blade 3 
+blade 3 kvcd 
+blade 3 avi 
+blade 3 kvcd 
+blade 3 movie 
+blade 3 
+blade trinity 
+24 
+24s06e17 
+24s06e17 
+28 weeks later 
+24 
+24 
+24 
+24 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+24 hdtv 
+mr woodcock 
+weeds 
+weeds season 1 
+mac os x 
+harry potter mac 
+harry.potter mac 
+harry potter 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/-jmgjdesjwy"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/-jmgjdesjwy" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/-jmgjdesjwy"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/-jmgjdesjwy" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+http://br.youtube.com/watch?v=-jmgjdesjwy 
+styx 
+billy squier 
+complete discography 
+greatest hits 
+bourn 
+bionic 
+transformers 
+teen 
+asian 
+elton john 
+santana 
+coldplay 
+coldplay 
+bleach 
+wicked 
+vivid 
+vivid 
+inernet 7 
+bittorrent 
+dummies 
+vso 
+bittorrent 
+kvcd 
+run fat boy 
+matrix 
+prova 
+girl 
+asian 
+mp4 
+.mp4 
+.mp4 
+fossett 
+
+maxed out 
+algebra 
+algebra 
+course 
+course 
+1984 
+pokerface 
+tits 
+fix 
+asdlfkjs 
+musica 
+ipaq 
+you kill me 
+you kill me 
+elephants 
+knocked up 
+the 11th hour 
+fataal 
+the 11th hour 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fossett 
+legal torrents 
+america's next top 
+van der graaf 
+iceclimbing 
+kiwi flight 
+fossett 
+eurosong 
+fantastic four 
+neverwinter 
+bionic woman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mediadefender 
+lexx 
+prison break 
+24 
+morrissey 
+smiths 
+ayo 
+dj sy 
+mylene farmer 
+mylene farmer 
+cellfactor 
+cellfactor 
+cellfactor 
+mylene farmer 
+conan 
+nova 
+bill moyers 
+cum 
+xxx 
+history of disbelief 
+tony rice 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hold you 
+linkin park 
+hold you 
+hold you 
+hold you 
+spanish 
+hold you atb 
+hold you atb 
+hold you atb 
+enemy mine 
+linkin park 
+you tube 
+atb 
+simpson 
+simpson 
+mom 
+l 
+a 
+a 
+lily 
+pontiac gto 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+robin trower 
+van halen 
+mr. brooks 
+mr. brooks 
+eastern promises 
+search files, youtube and liveleak 
+lilly thai 
+gto 
+metalocalypse 
+tony moser 
+kraftwerk 
+milton ericksson 
+milton erickson 
+hypnos 
+search files, youtube and liveleak 
+diner spinner 
+the 11th hour 
+half life 
+joe rogan 
+de curtis 
+medioevo 
+ita 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+stanlio 
+powerthirst 
+powerthirst 
+ramazzotti 
+grignani 
+war 
+grignani 
+grignani 
+gianluca grignani 
+venditti 
+ramazzotti 
+acdc 
+acdc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+gianluca grignani 
+gianluca grignani 
+to love somebod 
+to love somebody 
+gianluca grignani 
+3gp hard 
+3gp xxx 
+gianluca grignani 
+gianni celeste 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+gianni togni 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+acrobat 
+kenner 
+kinzie kenner 
+photo edit 
+photoshop 
+rape bride 
+rape 
+rape bride 
+bestman bride 
+bride 
+incest 
+incest drug 
+incest rape 
+school days raw 
+incest 
+flag raw 
+rape 
+search files, youtube and liveleak 
+rape 
+totò 
+veritas backup 
+rape 
+rape 
+jennifer lopez 
+britney rears 
+britney spears 
+naruto episodes 
+search files, youtube and liveleak 
+policy 
+policy 
+the policy 
+de filippo 
+borat 
+eureka 
+age of empire 
+bionic woman 
+asian 
+tia tanaka 
+blowjob 
+asian blowjob 
+blowjob 
+jap 
+eragon 
+jap 
+jap 
+rape 
+public 
+sex 
+sex 
+sex 
+porn 
+amateur 
+flag 
+gils 
+girls 
+girls 
+girls 
+girls 
+sheen and rockets 
+sheena and rockets 
+sheena 
+hentai 
+sex tape 
+sims 2 
+photoshop 
+lucky star 
+lucky star 
+adobe cs3 
+zetsubou 
+lucky star 
+ultraman mebius 
+ladron que roba a ladron 
+andres lopez 
+house 
+purcell 
+russian girls 
+rocco 
+pick up artist 
+lucky star 
+lucky star raw 
+zetsubou 
+zetsubou sensei 
+debian 
+debian etch 
+debian iso 
+powerpc linux 
+stalin 
+silvia 
+silvia 
+star club 
+rebecca 
+anarchy 
+anarchy 
+blowjob 
+burst city 
+teacher 
+russia 
+silvia saint 
+hellsing 
+hellsing 
+hellsing raw 
+little april 
+little april 
+teen 
+wet pussy 
+vmware 
+knoppix 
+pc88 
+ayayo 
+ayayo 
+negima 
+negima 
+luciano pavarotti 
+nerima 
+chechen 
+resident evil 
+wet pussy 
+corel 
+jessica rizzoi 
+jessica rizzoi 
+jessica rizzo 
+jessica rizzo 
+jessica rizzo 
+porno 
+helvis 
+elvis presley. 
+elvis presley 
+linux 
+free energy 
+resident evil 
+codegear 
+photoshop 
+killing 
+spears 
+aha 
+dance hall days 
+linux 
+titans 
+bolin 
+underage 
+iphone 
+teen 
+teen cum 
+teen cum 
+bolin 
+moray 
+moray valve 
+moray valve 
+freaks of nature 
+madonna 
+madonna 
+jesus christ super star 
+jesus christ super star 
+jesus christ super star 
+traveling wilburys 
+the traveling wilburys 
+the travelling wilburys 
+the travelling wilburys 
+search files, youtube and liveleak 
+south park 
+mickey mouse 
+search files, youtube and liveleak 
+stargate sg1 
+stargate sg1 
+stargate sg1 
+stargate sg1 
+black cock 
+avast 4.7 professional 
+avast 4.7 professional 
+avast 4 
+eastern promises 
+desi 
+leviation 
+search files, youtube and liveleak 
+
+animal 
+ana ivanovic 
+tesla coil 
+induction coil 
+dream theater 
+rainbow 
+kicking coil 
+hookup wire tesla 
+eros ramazzotti 
+mice parade 
+blood brothers 
+blood brothers 
+blood 
+the office 
+across the universe 
+del amitri 
+madonna 
+madonna 
+erasure 
+erasure 
+erasure 
+erasure 
+joni mitchell 
+erasure 
+dexter 
+bbw 
+è¨ï¿½Â\97æ\88\91å\97\8e 
+james bond 
+james bond 
+james bond 
+james bond 
+piratebay 
+1408 
+ubuntu 
+jenna 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+tears for fears-mad world 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+napoli 
+napoli 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tori & dean 
+tori & dean 
+adobe 
+matlab 2007a 
+matlab 2007a 
+search files, youtube and liveleak 
+nadine 
+ad-aware 
+avg 
+scarlett 
+ellen 
+ellen 
+brass 
+tinto brass 
+grace 
+tirol 
+tirol 
+britney 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pc game 
+tiziano ferro 
+xxx 
+dunstan 
+fifa 
+fifa psp 
+psp fifa 08 
+moric 
+as marcianas 
+as marcianas 
+as marcianas 
+zeze di camargo e luciano 
+niplpe 
+nipple 
+heroes ita 
+heroes ita 
+tiesto 
+ita 
+a94ebd82a188beeac84272cc6e44762f5b3e7292 
+http://inferno.demonoid.com:3389/announce 
+indesign cs2 mac 
+medio 
+adobe indesign 
+adobe indisign 
+clapton 
+underage 
+300 
+chal 
+mountains of makkah 
+trailers 
+trailers 
+patner 
+war 
+lula 
+battle 
+tycoon 
+medieval 
+age of 
+vista 
+paganini 
+paganini 
+300 
+wagner 
+zuppy 
+. 
+ita 
+333 
+battle 
+battle 
+ling 
+king 
+corso 
+corso 
+corso 
+sail 
+ubuntu 
+300 
+wi-fi 
+wi 
+print 
+rise 
+300 
+knight 
+lula 
+napoletana 
+trans 
+grey 
+battle 
+star wars 
+ita 
+transformers 
+miss potter 
+transformers 
+gatos que hablan 
+blazing angels 2 
+tits 
+jan smit 
+f16 
+placebo 
+eragon 
+simpsons 
+resident evil 
+resident evil german 
+resident evil 3 
+get high 
+männer wie wir 
+proxyway 
+avg antispyware 
+fanfare 
+fanfare 
+haanstra 
+madonna 
+novarip 
+vibrazioni 
+vibrazioni 
+madonna 
+blazing angels 2 
+collin mcrea 
+collin mcrea dirt 
+colin mc rea dirt 
+dtm race driver 3 
+come and get it 
+2pac 
+2pac.mp3 
+changes.mp3 
+changes.mp3 
+transformes 
+transformers 
+oceans 13 
+shrek 
+halo3 
+halo 3 
+games 
+test 
+flyboys 
+flyboys 
+flyboys 
+deutsch spanisch kurs 
+deutsch spanisch 
+spanisch 
+501 verbs 
+501 
+vista 
+spanish verbs 
+travler 
+traveler 
+501 spanish verbs 
+winrar 
+hamilton 
+heroes 
+trans 
+no results 
+notpresentonanysystemfile 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666 
+checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666checking_the_maximum_string_length_in_the_remote_search_feature_and_if_it_will_crasch111111111111111111111111111111111222222222222222222222222222222222222222233333333333333333333333333333333333333334444444444444444444444444444455555555555555555555555555566666666666666666666666666666666 
+alive 
+xxx 
+big black 
+big black 
+kean 
+katja kean 
+beethoven gilels 
+katja kean 
+search files, youtube and liveleak 
+young 
+hamilton 
+david hamilton 
+parker posey 
+1000 
+gay boy 
+milf 
+adobe indesing 
+adobe indising 
+dreamweaver cs3 
+dreamweaver cs3 
+adobe 
+esc 1990 
+terminator 
+terminator 
+blond 
+open source 
+finger 
+masturbate 
+esc 1989 
+database 
+adobe 
+slutty hot 
+adobe indesing 
+adobe indesing 
+adobe indesing 
+slutty 
+black 
+eurowings 
+adobe cs3 
+redacted 
+paprika 
+animal sex 
+horse 
+shemale 
+pthc 
+horse dick 
+wife 
+hairy 
+hairy 
+terry tate 
+search all files 
+terry tate 
+little children 
+kusturika 
+mr brooks 
+click 
+click 
+click 
+non siamo soli 
+ihra drag racing 
+bella menclova 
+fados carlos saura 
+search files, youtube and liveleak 
+nelly furtado 
+nelly furtado 
+little miss sunshine 
+justim timberland 
+justim timberland 
+justim timberland 
+pimsleur 
+woodwork 
+bob baumeister 
+search files, youtube and liveleak 
+ju 
+ju 
+ju 
+plaquinha magica 
+plaquinha magica 
+heleem 
+heleem 
+filmes 
+filmes 
+sex 
+deja.vu 
+deja 
+die siedler vi 
+gina 
+die siedler vi 
+sopranos 
+hoi 
+deja 
+public nudity 
+* 
+fishing 
+bind 9 
+bind9 
+o'reilly 
+windowblinds 
+corel paintshop pro 
+norton internet security 
+big tits 
+vetorizar corel 
+big tits 
+renato zero 
+renato zero 
+.avi 
+big 
+take that 
+vetorizar corel 12 
+vetorizar 
+heros ita 
+heros i 
+heros 
+heros ita 
+robbie 
+james blunt 
+high school musical 2 
+james blunt 1973 
+corel 12 
+lafee 
+pink floyd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+corel 12 vetor 
+vetor 
+video 200 
+video 2000 
+les 
+ls 
+heros ita 
+visual 
+visual studio 
+book 
+audio 
+audio 
+audio 
+audio 
+audio 
+audio 
+audio 
+audio 
+stephen king 
+noam chomsky 
+politic 
+korn 
+britney 
+nadine 
+one three hill 
+amsterdam 
+snd-webcam.monitor.3.48 
+webcam.monitor.3.48 
+webcam.monitor.3.48 
+webcam 
+webcam 
+lindsay lohan 
+lohan 
+lohan 
+3js 
+ilahi 
+3j's 
+het nieuws 
+midnight club 2 
+midnight club 2 
+midnight club 2 
+itil 
+smallville 
+itil 
+itil 
+smallville 
+smallville full 
+amara slide shows builder 
+amara slide shows builder 
+amara slide shows builder 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+thompson-necu izdat ja 
+thompson-necu izdat ja 
+thompson-necu izdat ja 
+thompson 
+necu izdat ja 
+necu izdat ja 
+thompson-necu izdat ja 
+roff 
+roff 
+nero 
+ubuntu 
+desperate hosewive 
+desperate housewive 
+desperate housewive s3 fr 
+aqu 
+desperate housewive s3 
+hits 
+hits 
+naruto fr 
+naruto 
+narut 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+linux xp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+linux 
+video 
+video xxx 
+video xxx 
+7.10 final 
+7.10 final 
+xxx 
+7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+gusty 7.10 final 
+xxx 
+gusty 7.10 
+parol pou ri 
+parol pou 
+parole pou 
+morne a l'eau 
+guadeloupe 
+search files, youtube and liveleak 
+lv 
+black 
+what a feeling 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+search all files 
+300 
+die hard 4 
+ratatouille german 
+binic woman 
+bionic woman 
+paris 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+journeyman 
+pirates 
+pirates worlds 
+reason 
+world in conflict 
+new 
+nokia 
+converter 
+ulead 
+u-lead 
+[2007] 
+popcap 
+nl 
+prison break 3x01 
+bourne 
+fat women 
+fat womens 
+fat 
+search files, youtube and liveleak 
+ana reverte 
+dvd5 
+dvd5 ita 
+ita dvd5 
+nude 
+infernal affairs 
+tristar 
+eva longoria home video hot milf 
+eva longoria 
+surf's up 
+prison break 
+i simpson - il film 
+search files, youtube and liveleak 
+ben amin 
+ben amin 
+ben amin 
+sherine 
+anna vissi 
+sam asadi 
+lilian haddad 
+katia abbas 
+may matta 
+warwa 
+najwa sultan 
+najwa 
+chevel 
+mazicana 
+boys 
+oscar athie 
+oscar athie 
+oscar athie 
+pasteles verde 
+los pasteles verde 
+los iracundos 
+sex 
+santo & yohnny 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+medieval 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+paulina rubio 
+paulina rubio 
+paulina rubio 
+thalia 
+rihanna 
+rihanna 
+rihanna 
+rihanna 
+rihanna 
+topless 
+naked 
+cumshot 
+cumshot 
+cumshot 
+stargate atlantis 
+stargate atlantis ita 
+stargate atlantis ita 
+stargate atlantis ita 
+stargate atlantis 
+ghost 
+fantasmi 
+fantascienza 
+crisys 
+crysis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+halo 2 
+search files, youtube and liveleak 
+suriname 
+ita 
+lamu 
+sport 
+live 
+pompino 
+tv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+suriname 
+suriname 
+suriname 
+ita 
+search files, youtube and liveleak 
+live sport 
+live tv 
+pornography 
+dennis p 
+hand job 
+chuck und larry 
+pornography 
+porno 
+rotterdam 
+suriname 
+tribler 4.1.4 
+tribler 
+tribler 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lamu 
+superbad deutsch 
+superbad german 
+superbad 
+murcof 
+murcof 
+zero 
+murcof 
+superbad ger 
+ascensore per l'inferno 
+tori amos 
+mauro astolfi 
+mauro astolfi 
+dr house 4x 
+aap 
+aap 
+aap 
+die 
+die 
+eros 
+eros xxx 
+eros 
+die 
+video erotici 
+erotismo 
+die 
+hustler 
+xxx 
+pthc 
+preteen 
+kinderficker 
+kinderficker 
+könig der wellen 
+hindi 
+johnny gaddar 
+johnny gaddar 
+sex 
+die 
+antiarp4.3.1 
+die 
+antiarp4.3.1 
+antiarp 
+anna karina 
+hallo jelle 
+poetry 
+spoken words 
+spoken word 
+audio book 
+couple 
+hardcore 
+black 
+group 
+group 
+transfoermer 
+transformers 
+group 
+simpsons 
+la ink 
+linux 
+la ink 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+la ink 
+die 
+die 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+hallo jelle 
+dashboard 
+dashboard 
+kiss 
+kiss 
+kiss 
+kiss 
+all 
+kiss 
+kingdom 
+pinnacle studio 
+teen 
+cum 
+southpark 
+southpark 
+girlfriend 
+girlfriend 
+ex 
+live leak 
+coelers 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+tozzi masini 
+horror 2007 
+horror 2007 
+horror 2007 
+horror 2007 
+horror 2007 
+horror 2007 divx 
+wraak 
+truhutje 
+tietjes 
+boobs 
+lotte 
+tietjes 
+wraak 
+divx horror 2007 
+voyeur 
+divx horror 
+divx ita horror 
+latex 
+die 
+latex 
+porno 
+svr 2008 
+alles is liefde 
+lucha libre 
+lucha libre 
+lucha libre 
+lucha libre 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+smackdown vs raw 2006 
+latex 
+latex 
+smackdown vs raw 2008 
+search files, youtube and liveleak 
+dvd 
+close combat 
+kiss 
+rubber 
+kiss 
+wwe 
+wwe smackdown vs raw 2007 
+wwe smackdown vs raw 2008 
+ku 
+kut 
+slikken 
+sperm 
+teen 
+wwe smackdown here comes the pain 
+amateur xxx 
+die 
+transformer 
+transformer 
+lost 
+fetish 
+lyx 
+adobe indesign 
+adobe indisign 
+adobe indisign 
+game 
+game 
+tombraider 
+commando 
+dreamwreaver cs3 
+dreamwreaver 
+dreamwreaver 
+adobe 
+serj tankian 
+to feggaraki 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stardust 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+daron malakian 
+serj tankian 
+stardust 
+mika 
+mika 
+akon 
+serj tankian 
+die 
+to feggaraki 
+californication 
+to feggaraki 
+halo 2 
+halo 2 
+greece 
+desparate housewives 
+transformer 
+school 
+mango 
+to feggaraki 
+mango 
+mango africa 
+to feggaraki 
+tiziano ferro 
+adaware 
+nino d angelo 
+harry potter 
+senza giacca e cravatta 
+gemelli diversi 
+insieme 
+dj francesco 
+o'reilly 
+o'reilly 
+o'reilly 
+o'reilly 
+salta 
+gigi d alessio 
+bob seger 
+dove sei 
+gigi finizio 
+bob seger 
+bob seger 
+wwe 
+ninananna 
+ni na nanna 
+alabama 
+alabama 
+alabama 
+alabama 
+office 2003 br 
+battlestar galactica 
+ancilia 
+cable guy 
+aero flux 
+surf up 
+ww2 
+sex4idiots 
+fenderù 
+fender 
+the crow 
+hot action cofee 
+counter strike 
+scarface 
+here comes the pain 
+jackass the game 
+smackdown just bring it 
+smackdown vs raw pc 
+fever for the flava 
+stuntman ignition 
+stuntman ignition game 
+erot 
+here comes the pain game 
+smackdown vs raw 2008 game 
+pro evolution soccer 2008 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+she male 
+rise and fall 
+pro evolution soccer 2008 game 
+hi dear delaila 
+hi dear delaila 
+sex 
+dear delaila 
+education 
+dear 
+dear 
+dear 
+dear 
+dear mr president 
+dear mr president 
+fetish 
+dear delaila 
+dear delilah 
+dear delilah 
+there delilah 
+hey there delilah 
+hey dear delilah 
+scooter 
+special d 
+old harrys game 
+cum drinking 
+clips 
+soriety 
+soriety 
+hazing 
+hazing 
+paris hilton 
+little children 
+eyes wide shot 
+search files, youtube and liveleak 
+meeuwis 
+fogerty 
+sting 
+sting 
+sting 
+sting 
+sting 
+sting 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tull 
+tull 
+tull 
+tull 
+vicky love 
+vicky love 
+vicky love 
+windows 
+eminem 
+a 
+sting 
+dog 
+the unit 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+negramaro 
+negramaro 
+sig oppa 
+big oppa 
+dj 
+fear and loathing in las vegas 
+fear and loathing in las vegas spanish 
+fear and loathing in las vegas espanol 
+fear and loathing in las vegas 
+the red violon 
+the red violin 
+the red violin 
+the red violin 
+the red violin 
+search files, youtube and liveleak 
+le violon rouge 
+dirty dancing 2 
+windows 
+kama sutra: a tale of love 
+the motorcycle diaries 
+search all files 
+the motorcycle diaries 
+the life is beautiful 
+v for vendetta 
+spy game 
+casablanca 
+pontello 
+faust 
+dildos 
+candy 
+bonn 
+pressing catch 
+cidade de deus 
+taxi driver 
+big fish 
+pedofilia 
+amores perros 
+memoirs of a geisha 
+xxx 
+osx 
+la cage aux folles 
+witch hunter 
+malua 
+elvis 
+elvis 
+kinky 
+frans bouwer 
+adobe 
+adobe bridge keygen 
+adobe bridge 
+adobe bridge 
+adobe bridge 
+tv west 
+ass collector 
+search files, youtube and liveleak 
+tv west 
+yes-r 
+yes-r 
+aziz el berkani 
+search files, youtube and liveleak 
+sean kingston 
+latex 
+cable guy 
+battlestar galactica 
+windows 2003 
+ecstasy rising 
+bianca buchamp 
+windows 2003 r2 
+ancilia tilia 
+lolita 
+lost s04 
+sex 
+russian 
+lolita 
+gigi finizio 
+metallica 
+gigi 
+dire straits 
+dire straits 
+ass 
+fist 
+pre-teen 
+preteen 
+childsex 
+teensex 
+interacial 
+interracial 
+interracial 
+surf's up: i re delle onde 
+dfx 
+gay sex wrestgay sexling 
+gay sex 
+gay sex 
+muscle wrestling 
+booty 
+muscle wrestling 
+muscle wrestling 
+transformers 
+muscle wrestling 
+gay hump 
+southpark 
+southpark 11 
+la ink 
+24 hours 
+24 hours 
+adamo 
+adamo 
+francais 
+fr 
+babyface 
+babyface 
+slightly stoopid 
+pinnacle studio 11 ultimate 
+spiderman 3 
+until death van damme 
+until death 
+air force two 
+xxx 
+xxx asian 
+dvd fab platinum 
+xxx asian teen 
+xxxjapanteen 
+xxx japan teen 
+russian incest 
+nina harley 
+nina hartley 
+sensual 
+russian 
+nudist 
+russian xxx 
+russian incest 
+asian teen 
+nicky 
+zett 
+gurren 
+.mpg 
+mpg 
+mpg 
+coitus 
+sexy 
+pthc 
+incest 
+r@ygold 
+interracial sex 
+interracial 
+rush hour 
+caramel 
+pthc 
+http://www.mybittorrent.com/info/968638/ 
+rush hour 
+kinder 
+kinderporn 
+svay pak 
+cambodia 
+passwordlist 
+lolita 
+mpg 
+californiacation 
+rail simulator 
+emma shapplin 
+learning 
+rush hour 
+learning 
+barbie 
+ita i soliti ignoti 
+i soliti ignoti 
+learning 
+lara fabian 
+lara fabian 
+learning 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+spoken english 
+il gladiatore 
+web 
+johnny 
+how i met your mother 
+how i met your mother s3e1 
+how i met your mother 301 
+how i met your mother 301 
+how i met your mother 
+music 
+wintersun 
+penn & teller 
+
+johnny mneumonic 
+white girl dancing 
+search files, youtube and liveleak 
+esx 
+tbs 
+tbs 
+diamonds are forever 
+james bond 
+ps2 
+search files, youtube and liveleak 
+dutch 
+fantastic four 
+windows 
+ocean 13 
+ubuntu 
+kaspersky 
+autoccad 
+autocad 
+bokito 
+ls 
+windows 
+dj 
+dalla 
+video angels 
+james bond jr 
+dragon ball 
+dragon ball digitally remastered 
+dragon ball remastered 
+dragon ball remastered 
+dragon ball remastered 
+james bond 
+never say never again 
+psp 
+james bond .cbr 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+.cbr 
+ita 
+cbr 
+bond cbr 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cbr 
+007 cbr 
+spy cbr 
+xbox360 
+free running 
+free running 
+free running 
+free running 
+xbox360 
+irish 
+big heat 
+big booty white girls 
+warcraft 
+big booty white girls 
+diamonds are forever 
+rotterdam 
+rotterdam 
+coelers 
+the kingdom 
+creampie 
+breaking benjamin 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tvu:\\53060 
+beata dubasova 
+karmen 
+dance 
+pavarotti 
+search files, youtube and liveleak 
+aloha shooby-doo 
+hight school musical 2 
+hight school musical 2 
+hight school musical 2 
+bear vs shark 
+bear vs shark 
+terror hawk 
+terrorhawk 
+bear vs shark 
+beck 
+beck 
+porno 
+xxx beast 
+xxx beast 
+beastiality 
+beastiality 
+beastiality 
+beastiality 
+incest 
+achi 
+a chi 
+sidera 
+sidera 
+ridera 
+diavolo biondo 
+diavolo biondo 
+diavolo biondo 
+dite a laura che l amo 
+dite a laura che l amo 
+boobs 
+di te a laura che l amo 
+dirte laura che l amo 
+laura che l amo 
+franco 1 franco4 
+ho scrito tamo sulla sabia 
+ho scrito tamo sulla sabia 
+ho scritto tamo sulla sabia 
+ho scritto tamo sulla sabbia 
+ho scritto t amo sulla sabbia 
+tanta voglia di lei 
+gino d angelo 
+nino d angelo 
+senza giacca e cravatta 
+i ricordi dell cuore 
+i ricordi dell cuore 
+ricordi dell cuore 
+amedeo minghi 
+i ricordi del cuore 
+ricordi del cuore 
+clowns und helden 
+und helden 
+ndw 
+neue deutsche welle 
+hackers 
+silicon valley 
+das modul 
+norton 
+caligula 
+caligula 
+caligola 
+the silence of the lambs 
+prison break flight 
+crocodile dundee 
+dangerdoom 
+danger doom 
+prison break go 
+prison break 21 
+crocodile dundee 
+g-unit 
+g-unit 
+g-unit 
+moby 
+orkester sound bank 
+the office 
+moby 
+asia 
+moby 
+orkester sound bank 
+moby 
+tenacious d pick 
+tenacious d pick 
+tenacious d pick 
+g-unit 
+tusenee 
+anita blonde 
+porno 
+sex 
+spiderman 3 
+kitchen nightmares 
+mythbusters 
+gpethes erben 
+goethes erben 
+goethes erben 
+asia 
+heatha hunter 
+heather hunter 
+teen 
+search files, youtube and liveleak 
+fifa 
+moby 
+vacanta mare 
+beastiality 
+search files, youtube and liveleak 
+private 
+search files, youtube and liveleak 
+you tube 
+ratat 
+il buio nell'anima 
+300 
+300 ita 
+bourne ultimatum 
+dance 
+ebony feet 
+reason 
+muschi 
+muschi 
+fotze 
+sex 
+lesbian 
+mega mindy 
+shrek 
+sports 
+ps3 
+playstation 
+naughty 
+paris letterman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tenacious d pick 
+
+letterman paris 
+pick of destiny 
+nero 
+frans bauer 
+andre hazes 
+ita 
+ita 
+search files, youtube and liveleak 
+forro 
+sex 
+mpg 
+dmg 
+andre hazes 
+andre hazes 
+andre hazes 
+andre hazes 
+andre hazes 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+muziek 
+software 
+software 
+audio 
+search files, youtube and liveleak 
+films 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+the seekers 
+seekers 
+the seekers 
+arabic 
+arabic music 
+shakira 
+<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/niua9kozgra"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/niua9kozgra" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object> 
+caribean 
+boy 
+boy nude 
+boy nude 
+boy nude 
+young boy 
+this boy's life 
+spanish 
+autos locos 
+8 
+shemale fuck guy 
+pedo boy 
+the simpsons 
+friends 
+straightheads 
+game plan 
+system of a down 
+wax 
+wax korea 
+wax korean 
+radio head 
+papa roach 
+papa roach 
+ratatouille 
+indonesia sex 
+1200 
+til lies do us part 
+southpark 
+tarzan x 
+seether 
+last legion 
+scientist 
+bjork 
+lost so3e17 
+ac/dc 
+the office 
+the office the injury 
+http://torrents.thepiratebay.org/3725005/top_1000_pop_hits_of_the_80s_(4.32gb)__.3725005.tpb.torrent 
+search files, youtube and liveleak 
+dexter 
+threes company 
+the prisoner 
+teens 
+wil ferell 
+sexy 
+barbara dennerlein 
+russian incest 
+pablo neruda 
+poesie 
+hungarian 
+linkinpark 
+linkin park 
+uverworld 
+uverworld 
+uverworld 
+stanlio 
+teen 
+stanlio 
+stanlio 
+teen in school 
+snowboard 
+naruto 
+naruto folge 1 
+cage fighting fury 
+midi 
+ps2 
+asian 
+last legion 
+the brave one dvdrip 
+last legion 
+rape 
+saw dvdrip 
+lilly the world 
+netball 
+sisster 
+sister 
+povia 
+superman return 
+simson 
+simson il film 
+zucchero 
+henti 
+hentai 
+gynslip 
+gymslip 
+forced th 
+forced tg 
+forced 
+femdom 
+swollow 
+swollow 
+swallow 
+nappy 
+transformed 
+pool change 
+pool 
+pool girl 
+pigtails 
+panties 
+panties 
+shopping gitl 
+shopping girl 
+shopping gitl 
+shopping girl 
+shopping girl 
+skirt pink 
+melua 
+anime feet xxx 
+kiss feet anime 
+http://www.fulldls.com/download-others-31710-test_drive_unlimited_pal_multi5_ps2dvdkudos.torrent 
+http://www.fulldls.com/download-others-31710-test_drive_unlimited_pal_multi5_ps2dvdkudos.torrent 
+chet baker 
+snuffx 
+snuffx xxx 
+vagina torture 
+mfx feet 
+xxx extreme 
+femdom 
+ptff 
+child feet 
+feet mature 
+feet soles 
+mikayla feet 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+padova 
+search files, youtube and liveleak 
+sex 
+squrting 
+squrting xxx 
+squrti xxx 
+squirt xxx 
+squirt xxx you tube 
+pumping pussy 
+pump pussy 
+layla 
+stretch 
+speculum 
+speculum 
+votze 
+muschi 
+pussy 
+open 
+mcdonalds 
+yanni 
+jesse cook 
+axxo 
+dvdrip 
+dvdrip 2007 
+scrubs 
+aerosmith 
+babyshambles 
+flamengo 
+search files, youtube and liveleak 
+linkin park 
+live in texas 
+album live in texas 
+album live in texas 
+album live in texas 
+knocked up 
+album live in texas linkin park 
+live in texas linkin park 
+linkin park 
+linkin park 
+linkin park 
+timbaland, timberlake, 50 cent 
+timbaland 
+timberland 
+timberlake 
+bondage 
+ponygirl 
+spears gimme 
+spears gimme gimme 
+javascript:deliverjwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny');search files, youtube and liveleak 
+jwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny') 
+jwlxxo3uve2bjosnksmfbyrqvfe3pfny('jwlxxo3uve2bjosnksmfbyrqvfe3pfny') 
+pussycat 
+spacehog 
+pussycat dolls 
+tatu 
+sex 
+halo 
+diy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stardust 
+precious 
+etheridge 
+alice bondage 
+ballbusting 
+leash 
+puppyplay 
+ponyplay 
+speaking 
+billy ocean ultimate collection 
+resturant empire 
+empire 
+afromania 
+gurren 
+php 
+photoshop 
+batman 
+tarzan 
+battle athletes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+daiundoukai 
+* 
+* 
+moana 
+drunk 
+sex 
+night cam 
+night cam 
+spy cam 
+spacehog 
+american gang 
+new 
+corrupção 
+corrupção 
+earth and fire 
+mix 
+steel 
+steel 
+steel 
+highschool 
+rounders 
+frankie goes 
+veronica 
+maksim 
+radio veronica 
+maksim 
+maksim 
+maksim 
+radio caroline 
+resident evil 
+symbian 
+crash 
+ghostrider 
+stunt 
+command 
+swat 
+disney 
+fatherland 
+lightness 
+disney 
+llead 
+ulead 
+code pack 
+upskirt 
+sister 
+naughty america 
+milf 
+granny 
+mature 
+older 
+search files, youtube and liveleak 
+mother 
+in law 
+spycam 
+sister 
+toilet 
+ringtone 
+realtone 
+handyman 
+battle 
+tomtom 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+francuski 
+regsupreme 
+swingers 
+swinger 
+hip hop hood 
+15 
+mature 
+mature 
+incest 
+incest 
+sharon stone 
+sharon stone 
+winxp black 
+black xp 
+rihanna 
+noriko sakai 
+ecdh 
+elliptic 
+elliptic curve cryptology 
+search all files 
+prive 
+pink 
+voyeur 
+search files, youtube and liveleak 
+change room 
+lockeroom 
+hellman 
+lockeroom 
+changingroom 
+showeroom 
+showe 
+shower 
+change room 
+change room 
+change room 
+hidden movies 
+hidden movies 
+handjobs 
+handjobs 
+handjobs 
+the kingdom 
+the brave one 
+browulf 
+the dark knight 
+30 days of night 
+dragon wars 
+hitman 
+the invasion 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+handjobs 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+we're getting nowhere 
+imagine me & you 
+penus 
+planet 
+pissen 
+kind 
+boys 
+pink handjobs 
+fucking 
+imagine me & you 
+fucking machines 
+device bondage 
+sex submission 
+hogties 
+hogtied 
+imagine me & you 
+jumpstyle 
+search files, youtube and liveleak 
+office 2007 
+microsoft 
+vuze 
+feed 
+sybian 
+supermoto 
+foto 
+a 
+a 
+a 
+a 
+die hard 4 
+divx 
+divx 
+jetzt oder nie jodie 
+jetzt oder nie jodie 
+jetzt oder nie 
+encarta 
+lost 
+u2 
+romanzo criminale 
+mirc 
+ts 
+shemale 
+incest 
+asian 
+tiesto 
+tiesto 
+å°Ã¦¾¤å\9c\93 
+tiesto 
+radiohead 
+oops 
+tomtom 
+tottenham 
+mature black 
+mature black 
+mature 
+celeb 
+appleby 
+portugal 
+orgasm the faces of estacy 
+davideo 
+vivid 
+lolo 
+emily18 
+snapfire 
+snapfire 
+picasa 
+de andre 
+windows xp 
+windows vista 
+spiderman 3 
+spiderman 3 nl 
+criminal minds 
+i'm a boy anorexic 
+anorexic 
+boy anorexic 
+when a man loves a woman 
+reason 
+dying to dance 
+gba rom ita 
+gba rom 
+i'm a boy anorexic 
+pirate radio 
+minotaur 
+squ 
+spiderman 
+annie lenox 
+annie lennox 
+hd dvd 
+annie lennox 
+pic 
+720p 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sport 
+ubuntu 
+hd dvd movies 
+wrestling 
+catfight 
+hd dvd 
+catfight 
+batman begins hd 
+slipknot 
+casting 
+darering 
+poker 
+sexgame 
+assparade 
+sexgame 
+mixed fight 
+assparade 
+dww 
+fkk 
+casting 
+ballet 
+nipple slip 
+spong 
+five finger 
+five finger 
+five finger 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+five finger death punch 
+speed up my pc 
+shrek 
+black ps2 
+atreyu 
+atreyu 
+kill bill ru 
+as i lay dying 
+flybar 
+lord of the rings 
+shrek 
+kurasawa 
+clubland extreme 
+clubland 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kagemusha 
+shichinin 
+samurai 
+samurai 
+the heartbreak kid 
+tool 
+avatar 
+tool 
+xbox360 
+xbox360 halo 3 
+tool music 
+snowboard 
+prison break season 3 
+robot chicken 
+robot chicken season 3 
+boondock 
+the boondock 
+boondocks cartoon 
+franky tunes 
+family guy 
+pink floyd 
+jan smit als de morgen karaoke 
+jan smit als de morgen 
+password list 
+junior songfestival 2007 
+torrent 2.0 
+roisin murphy 
+airport tycoon 
+happy feet jumpstyle 
+gokkast 
+gokkast 
+gokkast 
+force 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+renato zero 
+u2 
+trample 
+face 
+santana 
+trampling 
+trampling 
+trampling 
+search files, youtube and liveleak 
+bon jovi 
+visual basic 6 
+visual basic 6 
+visual basic 6 
+vb6 
+vb6 
+heroes ita 
+heroes 11 ita 
+heroes 11 ita 
+sports 
+pro evo 
+pro evo 08 
+pro evo 08 
+pro evo 08 
+pro evo 
+pee 
+pis 
+666 
+pis 
+ggg 
+thompson 
+thompson 
+xp 
+pee 
+ggg 
+alles 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+harry potter ita 
+adult 
+roboform 
+roboform 
+stardust memories 
+annie hall 
+annie hall 
+piss 
+piss 
+hwk 
+hwk 
+ufs 
+surf's up ita 
+surf's up ita 
+surf's up ita 
+she wants revenge 
+saras soft 
+sarassoft 
+sarassoft 
+sarassoft 
+n-box 
+n-box 
+500 person sex 
+this is forever 
+in rainbows 
+500 person sex 
+dave gahan kingdom 
+csi 4 1 
+csi 4 
+csi 6 
+ita divx 
+dave gahan hourglass 
+divx ita 2007 
+surf's up: i re delle onde 
+halle barry 
+jenna 
+porn 
+mac 
+anal teen 
+korea 
+anal teen 
+world of warcraft 
+elan 
+madona 
+games 
+warcraft 
+firefly 
+amv 
+loli 
+womens wrestling 
+tampa 
+womens pro wrestling 
+brother bear 
+bob and doug 
+search files, youtube and liveleak 
+abby winters 
+abby winters 
+nwwl 
+viv thomas 
+girlfriends films 
+twilight women 
+rsp2psharespy 
+ultimato burner 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+smallville 
+satellite tv 
+smallville 
+fifa 
+fifa 2006 
+ambient 
+winning elleven 
+cable tv 
+tv 
+nero 8 
+chama 
+internet satellite tv player 
+firefly 
+firefly 
+dubai 
+eva green 
+sexo 
+craig david 
+sexo 
+eva green 
+eva green 
+eva green os sonhadores 
+copo de colera 
+la fabbrica illuminata 
+vista 
+vista 
+eva green 
+eva green dreamers 
+lindberg 
+christian lindberg 
+eva green dreamers scene 
+de la nux 
+joseph alessi 
+os 
+p v de la nux 
+mac os 
+nicole graves 
+office 
+office 
+office2007 
+office2007 
+office2007 
+nicole graves 
+nicole graves 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+imagine me & you 
+secret 
+public nudity 
+naked 
+rosenberg 
+topless 
+jimmy rosenberg 
+less than three 
+brawl 
+brawl nintendo 
+streghe 03x06 
+korea 
+diablo 
+planetes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pthc 
+search files, youtube and liveleak 
+ivana 
+pedo 
+grillo 
+francesco e chiara 
+korea 
+weezer 
+placebo 
+prince 
+prince live 
+amateur home 
+amateur home 
+amateur home 
+amateur 
+prince 
+porn] 
+porn 
+metallica 
+metallica 
+porn 
+raped 
+black 
+raven riley 
+nicole graves 
+next door nikki 
+next door nikky 
+next door nikky 
+next door nikki 
+next door 
+kate's playground 
+kate 
+heroes ita 
+tottenham 
+interpol 
+heroes ita 
+dvd ita 2007 
+search files, youtube and liveleak 
+live 
+sexy 
+couple 
+cleevage 
+cleevage 
+cleevage 
+lavasoft 
+pop 
+goes mi heart 
+goes mi heart 
+goes mi heart 
+amateur 
+goes mi heart 
+goes my heart 
+goes my heart 
+led 
+24 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pthc 
+sekt 
+pee 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gimme more 
+alessia merz 
+valeria marini 
+valeria marini 
+ls magazine 
+ls magazine 
+15 y 
+14y 
+14 y 
+web cam teen 
+web cam preteen 
+eva henger 
+ambrus 
+gitarre 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fiest 
+feist 
+feist 
+anal 
+feist 
+jannes thialf 
+jannes thialf 
+smallville 
+smallville 6 ita 
+smallville ita 
+castellammare di stabia 
+castellammare di stabia 
+castellammare di stabia 
+castellammare di stabia 
+night cam 
+castellammare 
+jill 
+jaime pressly 
+castellammare 
+hidden camera 
+the unit 
+metallica one 
+click 
+cameltoe 
+cameltoe 
+cameltoe 
+guns n' roses 
+metalica 
+metallica 
+metallica 
+nothing else matters 
+tobias regner 
+i stlii burn 
+i still burn 
+lolita models 
+lolita models 
+13 y 
+eat pre 
+eat pre 
+young 
+young 
+painkiller 
+judas priest 
+dune 
+pairs skating 
+dune 
+amateur 
+do you know 
+homemade 
+kuschelrock 
+over there episode 2:roadblock duty part 4 
+enrique iglesias 
+reign 
+adamski 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias 
+enrique iglesias - love to see you cry 
+enrique iglesias 
+enrique iglesias - love to see you cry 
+love to see you cry 
+love to see you cry 
+dune 
+dune 
+evita 
+op zoek naar evita 
+feist 
+ist 
+fist 
+fist 
+feist 
+xxx 
+xxx 
+xxx 
+13 
+eminem 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rapala 
+rapala pro fishing 
+fish 
+simtractor 
+john deere 
+fishing 
+fish 
+search files, youtube and liveleak 
+babel 
+a mighty heart 
+feist 
+squirt 
+hot party 
+mmf 
+mff 
+mff 
+laura pausini 
+laura pausini 
+snatch 
+fight club 
+a mighty heart 
+anal 
+breast 
+pocketwijzer 2007 
+rocco 
+breast 
+breast 
+amy winehouse 
+sugarbabes 
+amy winehouse 
+george michael 
+haze 
+samatha 
+samantha 
+silvia 
+spanking 
+99bb 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+fsi 
+barbie sexy 
+barbie sexy 
+nomadi 
+nomadi 
+sugarbabes 
+totò 
+totò 
+xfiles 
+toto' 
+totò 
+totò 
+mollo tutto 
+harry potter 
+harry potter ita 
+sylvie vartan ma liberte 
+sylvie vartan ma liberté 
+300 
+300 iso 
+300 
+masterbation 
+sugarbabes 
+sugarbabes 
+female masterbation 
+snow patrol - run 
+bimaxx 
+bisexual 
+snow patrol - run 
+blur 
+in the hands of god 
+daniel bedingfild 
+a 
+uitzending gemist 
+m 
+billy talent 
+ice baby 
+top20 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tuning car 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+feist 
+fiat punto 
+punto tuning 
+prison break 
+sex 
+pop goes my heart 
+porno 
+drifter 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+twsited sisterç 
+twsited sister 
+twsted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+divx ita horror 
+divx ita horror 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+twisted sister 
+divx ita rush hour 
+search files, youtube and liveleak 
+long feet girl 
+o 
+korn 
+molotov 
+office2007 
+pop goes my heart 
+download molotov 
+download molotov 
+pop goes my heart 
+office2007 
+office2007 
+office2007 
+download molotov 
+download molotov 
+office2007 
+office2007 
+pop goes my heart 
+pop goes my heart 
+microsoft vista 
+microsoft vista 
+microsoft vista 
+microsoft vista 
+large tongue 
+vista 
+vista 
+pop goes my heart 
+chicago 
+biggest xxx 
+dvx ita 
+dvx ita 
+snuff 
+snuffx 
+snuff xxx 
+unfaithful 
+large vagina 
+large pussy 
+nero 
+bebe long tongue 
+bebe long tongue 
+babe long tongue 
+live for speed 
+save ryan 
+search files, youtube and liveleak 
+onvert 
+convert 
+convert 
+babe long tongue 
+shemale 
+big dick 
+shemale 
+shemale 
+squirt 
+dramatic chipmunk 
+model 
+dramatic chipmunk 
+dramatic chipmunk 
+defrag 
+o&o 
+juanita 
+search files, youtube and liveleak 
+lou reed 
+heroes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+britney 
+britney 
+xxx 
+it crowd 
+jenna haze 
+lisa 
+amanda peat 
+betty 
+christy 
+naughty 
+legal 
+barely legal 
+campbell 
+anetta 
+erica 
+erica 
+sunny 
+amy 
+stardust 
+vivid 
+private 
+the invation 
+the invation 
+the invation 
+object 
+search files, youtube and liveleak 
+dr who 
+brozman lumiere 
+brozman 
+brozman 
+lumiere 
+lumiere bro 
+tanita tikaram 
+tanita tikaram 
+tanita tikaram 
+tanita tikaram 
+tanita tikaram 
+prison break s03e04 
+heroes s03e03 
+prison break s03e04 
+anime 
+earth world 
+vmware 
+vmware 6 
+vmware esx 
+sexy 
+sexy girl 
+sexy love 
+sexy love 
+sexy make love 
+make love 
+sex love 
+csi 4 
+gangbang 
+gangbang 
+gangbang 
+gangbang 
+search files, youtube and liveleak 
+gay 
+gay 
+search files, youtube and liveleak 
+nederland 
+hard 
+motor 
+rape 
+hard 
+porno 
+sesso 
+green 
+asia 
+heroes s02e03 
+gateway 
+manuale 
+manuale d'amore 
+manuale d'amore 
+greys anatomie 
+greys anatomie 
+search files, youtube and liveleak 
+live free or die hard 
+dvd 
+dvd ita 
+pussy 
+nirvana 
+tool 
+un impresa da dio 
+sweet tongue 
+sweet long tongu 
+sweet long tongue 
+sweet feet 
+50 cent 
+black label 
+sixx 
+nin 
+nine 
+nine 
+black label 
+nero 
+nero 8 keygen 
+foo fighters 
+german 
+dio 
+exceed 
+britney 
+skat 
+gladiator 
+gladiator 
+fonts 
+search files, youtube and liveleak 
+ita divx 
+pandora tomorrow 
+blonde 
+adobe creative cs3 premium 
+adobe creative cs3 premium italiano 
+adobe creative cs3 premium ita 
+adobe creative cs3 premium 
+adobe creative cs3 premium it 
+adobe creative cs3 premium ita 
+tits 
+nomadi 
+rolling stones 
+pussy 
+phoneix 
+oblivion multiplication bug 
+oblivion multiplication 
+oblivion 
+oblivion 
+oblivion 
+cumshot 
+vagina 
+berkeyley course 
+berkeyley course 
+berkeyley course 
+filerecovery 3.1 
+filerecovery 3.1 
+filerecovery 3.1 
+filerecovery 3.1 
+file recovery 3.1 
+file recovery 
+filerecovery 
+file recovery 
+one piece 
+ita 
+ladies tailor 
+train 
+game pc 
+game pc 
+ita 
+alma chua 
+titanic 
+titanic 
+zoo 
+pc games 
+pc adult games 
+beastiality 
+divx ita enterprise 
+snake 
+dog 
+horse 
+enterprise ita 
+pc games 
+train 
+nepali 
+meluha 
+melua 
+black friday 
+train model 
+virutal 
+virtual 
+pinnacle ultimate 11 
+pinnacle studio 11 
+album 
+vangelis 
+syngress 
+books 
+desi 
+exdesi 
+cherry ghost 
+grey anatomy 
+chateau 
+young harlots 
+preteen 
+animal 
+train crach 
+nepalese cultural evening 
+nepalese cultural evening 
+nepalese cultural evening 
+nepalese cultural evening 
+wah maiya 
+massive r & b 
+massive r & b winter collection 
+massive r & b 
+amy winehouse 
+prison break 
+rape 
+cherry ghost people 
+dylan dog 
+futurama spanish 
+300 
+300 
+shrek terzo 
+futurama sp 
+prison break s03e03 
+complete 
+completa 
+l'esorcista ita 
+madame butterfly 
+l'esorcista ita 
+l'esorcista ita 
+los simpson 
+300 
+300 spa 
+the kingdom 
+death sentence 
+trade 
+xxx 
+dragon naturally speaking serial 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+dragon naturally speaking ital 
+the kingdom 
+dragon naturally speaking ital 
+alles is liefde 
+shoot 'em up 
+rape 
+feet 
+foot 
+tintin en america 
+red dragon - before the silence 
+8mm 
+the agency 
+the agency 
+korn 
+the missing 
+porno 
+300 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+crak magix foto cd e dvd 6 
+2600 
+isazi 
+heroes 
+hacker 
+webcam 
+handjob 
+brasilian 
+brasilian 
+brasilian 
+brazilian 
+harry potter 
+harry potter and the order of the phoenix 
+wwe 
+tna 
+wwe 
+xxx 
+wwe 
+wwe 
+wwe 10 
+match 
+goo goo 
+deadwood 
+xxx 
+shrek 
+pirates of the carribean 
+adobe photoshop 
+fuck 
+sex party 
+sex party 
+parkour 
+parkour 
+johny cash 
+office:mac 
+search files, youtube and liveleak 
+gay 
+film 
+gay dad 
+dad son 
+father figure 
+gay 
+craymo 
+gay 
+resident evil extinction 
+colton ford 
+anal 
+project vector 
+fall out boy 
+gay 
+gay 
+simple plan 
+gay 
+high heels 
+beethoven 
+la figa 
+search files, youtube and liveleak 
+dutch movie 
+animal 
+scott 
+tony scott 
+ulrich 
+southpark 
+scientology 
+prison break s03e01 heb 
+prison break s03e01 
+× ×�×Â\98 
+where the boys 18 
+where the boys 
+styles yamaha 
+anal 
+italy anal 
+italy 
+italy 
+psr 
+transformers 
+transformers 
+psr 8000 
+napoli xxx 
+napoli 
+napoli 
+napoli 
+foggia 
+ragazze 
+sexy girl 
+webcam italy 
+telecamera nascosta 
+star wars kid 
+php 
+php 
+gladiator 
+2pac 
+dr. dre 
+dr.dre 
+dre 
+vno ncw 
+black 
+creampie 
+wii ps3 
+php 
+ghost footage 
+28 weeks later 
+madonna 
+the gossip 
+lost 
+lost sub nl 
+lost sub dutch 
+wii ps3 
+tolle 
+byron katie 
+columbo 
+course of miracles 
+dutch sub 
+dutch sub 
+leg 
+art of noise 
+art of noise 
+mvc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+csi 
+rail road tycom 3 
+csi 
+bassie en 
+rihnna 
+rihanna 
+paris hilton 
+arie en 
+le fate ignoranti 
+le fate ignoranti 
+le fate ignoranti 
+le fate ignoranti 
+naak 
+naakt 
+mindy vega 
+trombone 
+leann rimes 
+barely legal 
+le fate ignoranti 
+star trek 
+er 
+ragazze a beverly hills 
+search files, youtube and liveleak 
+http://www.youtube.com/ 
+eckharrt tolle 
+eckharrt tolle 
+eckhart tolle 
+http://www.youtube.com/ 
+http://www.inbox.lv/ 
+http://www.inbox.lv/ 
+search files, youtube and liveleak 
+china 
+chinesse 
+chineese 
+chinese 
+korea 
+terror 
+thai 
+helo 
+pc games 
+radiohead 
+music 
+jannes 
+macromedia 
+serial 
+snow 
+prison break 
+prison break 304 
+snow 
+saw 
+trombones 
+trombones 
+trombones 
+trombones 
+trombone 
+trombone 
+snow queen 
+snow queen 
+snow 
+top gear 
+wild hogs 
+massive attack 
+audio books 
+lost 3 
+heroes 
+ipod 
+stargate 
+lost episode 14 
+lost episode 14 
+lost season 
+keiser chiefs 
+just jinger 
+big brother 
+mac 
+results 
+lilly 
+mature 
+bangbros 
+game video 
+biggest 
+biggest 
+big 
+ines 
+big tits 
+mein kampf 
+mein kampf 
+verboten 
+verboten 
+banned 
+wwe 
+taxi4 
+footloose 
+milk inc 
+regi 
+regi pinxten 
+milk inc 
+search files, youtube and liveleak 
+toca race 3 
+tokio hotel 
+toca race 3 
+fergie 
+fergie 
+ome henk 
+ome henk 
+preuteleute 
+spanish 
+pc wolfenstein 
+pc wolfenstein 
+pc olimpics 
+pc olympics 
+pc submarine game 
+pc silent hunter 
+pc silent hunter 
+pc submarine 
+pc submarine hunter 
+pc tour off duty 
+umbarella 
+madworld 
+r.kelly 
+queen 
+search files, youtube and liveleak 
+arkadia 
+basso 
+katie melua star 
+search files, youtube and liveleak 
+star wars 
+katie+malua+star 
+katie+malua 
+katiemalua 
+katie malua 
+katie melua 
+katie melua star 
+katie melua 
+porn 
+hamburg 
+hamburg 
+bush 
+sailboot katie 
+fuck 
+crash 
+u pana boga w ogródku 
+alex agnew 
+shemales 
+transsexual 
+sheila 
+sybian 
+hayden 
+hayden panettiere 
+hayden panettiere 
+hayden panettiere 
+jenna jameson 
+jenna jameson 
+amateur 
+shemale 
+hayden panettiere 
+multiagent systems 
+andre van duin 
+duin 
+witte 
+front 242 
+front 242 
+klinik 
+mazÄÂ\81 dvÄÂ\93selÄ«te 
+jordan 
+seu merda 
+thundercats 
+resident evil 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+sex 
+sex 
+sex 
+sex 
+girls 
+young 
+certe notti 
+creepa kid 
+silverhawks 
+dexter 
+dexter s02e02 
+4400 
+"marc collin" 
+scooter wheely on motorway 
+csi 
+gay 
+multiagent systems 
+multiagent systems 
+multiagent systems 
+kamal 
+vista wallpapers 
+ebook 
+aisha 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jade 
+microsoft office 2007 
+agent 
+search files, youtube and liveleak 
+team america 
+james uk group 
+an introduction to multiagent systems 
+madness complete 
+michael wooldridge 
+introduction to multiagent system 
+multiagent systems - a modern approach to distributed artificial intelligence 
+james blunt 
+kate melua 
+quantum leap 
+james morrison 
+pablo montero 
+east west quantum leap 
+east west quantum leap 
+symphonic orchestra gold pro 
+bioshcok 
+bioshock 
+quake wars 
+jordan capri 
+sex tae 
+sex tape 
+preteen 
+dr who 
+pedo 
+chamillioaire 
+jimi hendrix 
+in rainbows 
+barenaked ladies 
+syun aika 
+shun aika 
+jason alexander 
+opus iii 
+new order 
+the simpsons 
+red drawf 
+chemistry 
+house of ass 
+xana 
+bi apple 
+the elegant spanking 
+black glove 
+tristan 
+search files, youtube and liveleak 
+sport 
+joey 
+private 
+search files, youtube and liveleak 
+asian 
+japan lesbian 
+asian lesbian 
+asian 
+porno 
+porno russian 
+russian 
+renato zero 
+renato zero il cielo 
+vanishing point 
+era glaciale 
+who pais the ferryman 
+leonard bernstein norton 
+leonard bernstein 
+leonard bernstein 
+classical 
+insomnia 
+funny' 
+funny 
+knocked up 
+anal 
+sinergy 
+blok 
+het blok 
+het blok net 5 
+nicolle 
+nicolle 
+nicol 
+fifa 08 
+fifa 08 pc games 
+fifa 08 pc game 
+fifa 08 
+fishgame 
+fish game 
+jan 
+jan smit 
+smit 
+bzn 
+bzn 
+pavarotti 
+satellite tv player 
+satellite tv titanium 
+satellite tv titanium 
+internet satellite tv player 
+tv players 
+tv 
+heos season 2,1,2,3, 
+heos season 2 
+heros season 2 
+heros season s02e02,03 
+maya 
+heros season s02e02e03 
+xsi 
+heros season s02e02e03 
+heros season s02 
+xsi linux 
+heros season 2 
+3ds 
+blok 
+fall in love with 
+fall in love with 
+fall in love with 
+fall in love with 
+blok 
+heros season 2 
+heros season 2.complete[2007]dvd 
+heros season 2.complete[2007] 
+visual studio 2005 
+prison break 
+heros season 2.complete[2007]dv... 
+heros.season.2.complete[2007]dv... 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lino banfi 
+300 
+resident evil 
+resident evil extincion 
+resident evil exctincion 
+resident evil extinction 
+resident evil extinction ita 
+power 
+massive r&b 
+paris 
+ps2 
+herman van veen 
+mondo marcio 
+mondo marcio 
+mondo marcio 
+rambo 
+bare 
+playboy 
+topless 
+lino banfi 
+mondo marcio 
+mario merola 
+mario merola 
+mario merola 
+mario merola 
+silver stallone 
+silver stallone 
+animali 
+machete ita 
+wildervank 
+playboy 
+red hot chilly peppers 
+red hot chilly peppers 
+red hot chilly peppers 
+search files, youtube and liveleak 
+laura 
+face of death 
+skate 
+se 
+search files, youtube and liveleak 
+csi 
+omnipage 16 activator 
+madness 
+surf's up: i re delle onde 
+sms - sotto mentite spoglie 
+porn 
+pdf 
+haitian tv 
+tivice 
+ti vice 
+tivice 
+sex 
+how to have sex 
+lessons 
+cum inside 
+gay 
+bisex 
+fuckingmachine 
+sex 
+breed 
+sex tape 
+gay inside 
+crampie gay 
+breed 
+bred 
+internal 
+sec 
+sex 
+asian 
+machine 
+group 
+education 
+psp 
+psp movie 
+transformers 
+the simpsons movie 
+transformers 
+surfs up 
+the kingdom 
+exploited 
+day watch 
+hustler 
+school 
+dvd 
+grindhouse 
+pirates 
+dvd 
+rape 
+soundtrack 
+avi 
+prince 
+johnny cash 
+the gateway 
+fith 
+mestre bimba 
+mestre bimba a capoeira ilumunada 
+a capoeira ilumunada 
+capoeira 
+war 
+cfosspeed 
+schranz 
+hide ip 
+ip hider 
+gide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+sweet caroline 
+sweet caroline 
+caroline 
+sweet_caroline 
+sweet caroline 
+hide ip 
+john denver 
+hide ip 
+bound 
+girl 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ps2 
+ps2 
+suzuki bach cantatas 
+ebpny 
+ebony 
+limp bizkit 
+foo fighters 
+john denver 
+gide ip 
+hide ip 
+mask surf 
+lustiges 
+lustiges 
+lustiges 
+microsoft office xp 2007 italiano 
+microsoft office xp 2007 italiano 
+microsoft office xp 2007 italiano 
+tao 
+john denver 
+carpenters 
+k3 
+madonna 
+nnw 
+bbw 
+art 
+bbw 
+windows xp professional 
+bondage 
+motoqueiro fantasma 
+office 2007 
+ghost rider 
+die hard 
+shaun the sheep 
+shaun the sheep 
+shaun the sheep 
+shaun the sheep 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+planet terror ita 
+elisa 
+microsoft office 2007 
+james blunt 
+morrisin 
+morrison 
+james morrison 
+james morrison 
+zbrush 
+resident evil extinction ita 
+mp3 
+mp3 
+mp3 
+ita 
+jericho 
+clive barker's jericho 
+ita 
+ita 
+3d studio max 
+resident evil ita 
+avi ita 
+resident evil ita avi 
+resident evil 
+resident evil ita 
+mask surf 
+proxyway 
+surf 
+madonna 
+kamasutra 
+hide ip 
+hide ip 
+hide ip 
+grindhouse.planet.terror.2007.italian.md.dvdrip.xvid-gold 
+grindhouse.planet.terror.2007.italian.md.dvdrip.xvid-gold 
+planet terror ita 
+ip address changer 
+planet terror ita 
+ita 
+porn 
+ps2 ita 
+xbox ita 
+planet terror 
+far cry 
+resident evil ita 
+resident evil 
+soft image 
+maya 
+stardust ita 
+stardust ita 
+hide ip 
+hide ip 
+stardust 
+isolee 
+search files, youtube and liveleak 
+isolee 
+radiohead 
+stardust ita 
+hide ip 
+hide ip 
+ita 
+ita 
+pcmesh 
+easy hide 
+mp3 
+britney spears 
+search files, youtube and liveleak 
+anonymizer 
+anonymous 
+ghost 
+ita 
+drive snapshot 
+r-drive image 
+valeria marini 
+hide ip 
+il 7 e l'8 
+ik 
+mijn 
+slet 
+fear ita 
+jong 
+hide ip 
+ik 
+ikke 
+f.e.a.r ita 
+f.e.a.r 
+ultraiso 
+hide ip 
+hide my ip 
+ip hider 
+umbrella 
+rihanna 
+hide ip 
+ich und ich 
+gib uns nicht auf 
+matthias reim 
+xavier naidoo 
+eurovision 
+poweriso 
+search files, youtube and liveleak 
+zara whites 
+search files, youtube and liveleak 
+video 
+pictures 
+hidden tiger 
+my name is earl 
+fiest 
+old man leudeck 
+guns of navarone 
+pc silent hunter 
+pc silent hunter 
+american beauty 
+pc silent hunter 
+archive 
+onkelz 
+teen 
+aniamal 
+animal 
+animal 
+animal 
+animal 
+animal 
+animal 
+nudist 
+nudism 
+lolita 
+nudism 
+young 
+zoophilla 
+zoophilla 
+zoo 
+pussy 
+fisting 
+under the tuscan sun 
+adobe 
+adobe indesing 
+adobe 
+mysql 
+php/mysql 
+anal fisting 
+adobe master 
+adobe premium 
+adobe standard 
+adobe standard 
+adobe 
+commando daffy 
+piano 
+mehldau 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+commando daffy 
+action girls 
+commando 
+commando 
+dutch commandos 
+dutch commandos 
+dutch commandos 
+jenna 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ps2 
+pillu 
+shakira 
+fukalot 
+kari sweets 
+planet katie 
+teen 
+jenna haze 
+search files, youtube and liveleak 
+http://www.youtube.com/watch?v=zprznbcrfou 
+http://www.youtube.com/watch?v=zprznbcrfou 
+http://www.youtube.com/watch?v=zprznbcrfou 
+http://www.youtube.com/watch?v=zprznbcrfou 
+http://www.youtube.com/watch?v=zprznbcrfou 
+http://www.youtube.com/watch?v=zprznbcrfou 
+il signore degli anelli 
+i simpson 
+gigi finizio 
+resident evil ita 
+resident evil 
+ita 
+ordine della fenice 
+wow 
+wow illidan 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+overlocked 
+xxx 
+anal 
+prison break 
+prison break ita 
+caligula 
+bikini 
+inconvenient truth 
+strip 
+teen 
+30 rock 
+dexter 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+yutube 
+you tube 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube elvis presley 
+you tube 
+rmvb 
+heroes 
+heroes s02e02 
+heroes s02e03 
+heroes s02e01 
+heroes s02e03 
+anna 
+fondamenti di telecomunicazione 
+fondamenti di telecomunicazione 
+fondamenti di telecomunicazione 
+fondamenti di telecomunicazione 
+manuale di java 
+san francesco 
+san francesco 
+san francesco 
+san francesco 
+guitar 
+search files, youtube and liveleak 
+seeker 
+manuale di java 
+manuale di java 
+white teens 
+teens 
+bitreactor.to 
+bitreactor.to 
+bitreactor.to 
+granular 
+gag 
+bitreactor.to 
+08 
+roemenie 
+08 pc 
+18yo 
+abba 
+corton 
+carton 
+carton 
+carton 
+flash 
+fifa 
+radiohead 
+transformers 
+granular 
+granular 
+granular 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+operation kingdom 
+search files, youtube and liveleak 
+otorrent 
+torrent 
+teen 
+torrent kongdom 
+torrent kingdom 
+sex 
+teen 
+50 cent 
+operation: kingdom 
+operation: kingdom 
+torrent 
+teen 
+teen 
+dvdrip 
+dvdrip german 
+pes8 
+porn 
+xxx 
+scf 
+euromaster 
+euromaster 
+amsterdam 
+brother bear 
+brother bear 
+mini disco 
+high school musical 2 
+high school musical 2 
+the office 4x03 
+the office 4x02 
+granular 
+tna 
+earth 
+knocked up 
+milf 
+paganini 
+minz 
+minz 
+nero 
+milf 
+the simpsons movie 
+dusche 
+dusche 
+dusche 
+dusche 
+dusche 
+privat 
+software 
+creatief met word 
+creatief met word 
+bf2 
+battlefield 2 
+tribler 
+spiderman 3 ita 
+fantastici 4 
+giuliana lampis 
+giuliana 
+giordanelli 
+sassari 
+sassari 
+sassari 
+sassari 
+granular 
+carbonia 
+korn 
+pedo 
+pedo 
+blowjob 
+tribler 
+house 
+russian 
+pretty 
+ass 
+wwe 
+tna 
+russia 
+cs3 
+perfume: the story of a murderer 
+perfume the story of a murderer 
+shrek 3 
+fazz-jazz 
+fazz-music 
+cs3 production 
+rape 
+windows xp dutch 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+windows vista 
+windows vista 
+partition 
+battlestar galactica s3 
+http://www.youtube.com/watch?v=6yehunaljpg&feature=bz303 
+funkeiras rebolando 
+search files, youtube and liveleak 
+tribler 
+tribler_4.1.7 
+barbara miranda santos 
+bes cristiano ronaldo 
+erotic 
+big dicks 
+blonde ass 
+round ass 
+monster cock 
+urban ninja 
+kathy barry 
+kathy barry 
+kathy barry 
+search files, youtube and liveleak 
+tribler_4.1.7 
+tribler 
+iluzionista 
+iluzionist 
+illusionist 
+beatles 
+era 
+fellowship of the ring 
+lord of the ringd 
+lord of the rings 
+soundtrack 
+soundtrack rings 
+soundtrack lotr 
+soundtrack king 
+soundtrackretun 
+soundtrack retrun 
+soundtrack return 
+soundtrack lord 
+soundtrack ring 
+soundtrack ring 
+fellowship 
+fellowship 
+fellowship 
+tolkie 
+tolkien 
+ebook 
+rings 
+rings 
+rings 
+kryl 
+karel 
+karel 
+nohavica 
+era 
+metalica 
+metal 
+corss 
+corrs 
+the cors 
+dylan 
+dylan 
+steve martin 
+entourage 
+simpson 
+playboy 
+at five in the afternoon 
+at five in the afternoon 
+panj e asr 
+torrents 
+at five in the afternoon torrent 
+shrek terzo 
+porno 
+ita 
+encarta 2007 
+creampie 
+bones 
+rosanna fratello 
+celentano 
+celentano 
+adriano celentano 
+frank sinatra 
+torrent operation: kingdom 
+grindhouse 
+corbin fisher 
+corbin fisher 
+gay kiss 
+gay kiss 
+gay kiss 
+gay kiss 
+gay kiss 
+gay kiss 
+sean and cody 
+sean and cody 
+gimme more 
+bel ami 
+gay asian 
+gay 
+el cor de la ciutat 
+gay pedo 
+green day 
+cum 
+die ärzte 
+fuck 
+enrique iglesias-tired of being sorry 
+enrique iglesias 
+enrique iglesias 
+cff 
+paris hilton 
+brittany spears 
+britney spears 
+tom jones 
+ruggeri 
+tina turner 
+eros ramazotti 
+jan smit 
+nick en simon 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+peter beense 
+andre hazes 
+trasformazione vista 
+windows vista 
+harry potter 
+l'ordine della fenice 
+il dolce e l'amaro 
+catacoms 
+catacombs 
+i fantastici quattro 
+i fantastici quattro e silver 
+i fantastici quattro e silver 
+i fantastici quattro e silver 
+transformers 
+nba 
+nba pc 
+08 pc 
+黑目曈 
+黑目曈 
+黑目曈 
+黑目曈 
+黑目曈 
+水電工 
+éé\83½³¢ 
+av女åÄ\92 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+æ«»äºï¿½¨è� 
+av 
+schoolgirl 
+school girl 
+schoolware 
+baby 
+hentia 
+fazz music 
+jenna jameson 
+dvd rip german 
+bitreactor dvd rip german 
+fazz music 
+fazz music 
+scarface dvd rip german 
+linkin park 
+linkin park 
+pamela 
+sicko 
+sicko german 
+kingdom german 
+deutsch 
+tomtom 
+tomtom nokia n70 
+tomtom 
+pc games silent hunter4 
+need for speed 
+need for speed pro street 
+need for speed pro street pc 
+pc games silent hunter4 
+pc silent hunter4 
+pc silent hunter4 
+silent hunter4 
+baglioni 
+baglioni 
+iron maiden 
+caparezza 
+caparezza 
+kayne wayne sheperd 
+tokio hotel 
+kanny wayne shepherd 
+kenny wayne shepherd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+harry potter 2 
+harry potter 2 german 
+akvis enhancer 
+munich city nights 
+munich city nights 
+munich city nights 
+hardrock 
+hardrock 
+hardrock 
+tomtom 
+tomtom 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+av 
+search files, youtube and liveleak 
+ds9 
+ds9 
+pee 
+wet 
+hard 
+seemannslieder 
+gangbang 
+throat 
+medal of honor 
+medal of honor pc dvd 
+magix music maker 
+magix music maker 
+magix music maker 
+magix music maker 
+akon 
+che guevara 
+dont matter 
+akon 
+a bay bay 
+truespace7.5 
+beyounce 
+truespace7 
+truespace 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+beyounce 
+eva cash 
+t pain 
+gohst rider 
+3d studio max 
+ghost rider 
+ghost rider german dvd rip 
+ghost rider ger 
+sean kingston 
+zbrush 
+don't matter 
+don't matter 
+akon 
+this is why i'm hot 
+magix music maker 
+akon 
+qbase 4.0 
+ayo technology 
+eve 
+spissrotgang 
+danseband 
+danseband 
+danseband 
+elvis 
+rihana 
+rihana 
+rihanna 
+hellbillies 
+hellbillies 
+hellbillies 
+ole ivars 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+get me bodied 
+jamesbond 
+wwe 
+jamesbond 
+film 
+wwf 
+3d studio max 
+boob 
+tribler 
+german 
+bbc 
+physics 
+sport 
+game 
+spss 
+gis 
+linux 
+ubuntu 
+get me bodied 
+cubase studio 
+cubase studio 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+natural 
+liefde 
+j.lo 
+bitreactor 
+ea games 
+pc dvd 
+medvídek 
+tomtom 
+dadyyanki 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+torrent.to 
+cubase studio 
+cubase studio 
+d bo 
+d bo san souci 
+d-bo 
+bushido 
+pcdvd 
+tribler 
+blond 
+blond 
+bruna 
+bionda 
+school 
+school 
+blond 
+girl 
+anita dark 
+ramazzotti 
+ramazzotti eros 
+paprika 
+culo 
+la figa 
+angelica 
+angela 
+sara 
+300 
+get me bodied 
+film azione 
+azione 
+azione 
+commedia sexi 
+commedia sexi 
+azione 
+erica egresia 
+erica egresia 
+erica egresia 
+erica egresia 
+mia martini 
+ghana 
+ghana 
+twi 
+amakye 
+nino d'angelo 
+nino d'angelo 
+sara tommasi 
+jessica alba 
+pompini 
+pompini 
+pompini 
+sucked 
+pompini 
+pompini 
+porn 
+italian 
+sesamstraat 
+italiane 
+italian girl 
+films 
+films 
+magazzino 
+magazzino 
+magazzino 
+school 
+home 
+harryptter2 
+harrypotter2 
+pamela anderson 
+belmondo german 
+rape 
+blow 
+hand 
+home 
+all 
+italian 
+italiane 
+italian film 
+italian 
+handjob 
+italian 
+donna 
+bella 
+sara 
+porn 
+handjob 
+italian porn 
+hot 
+adult 
+belmondo german 
+belmondo german 
+nederlands 
+dutch 
+granular 
+mixw 
+belmondo german 
+hard 
+sola 
+house 
+molto incinta 
+stardust 
+2061 
+impresa da dio 
+resident evil extintion ita 
+resident evil extintion 
+resident evil extintion italiano 
+molto incinta 
+sonata arctica 
+impresa da dio 
+asstraffic 
+kein bund fürs leben 
+bitreactor 
+harry potter e l'ordine della fenice 
+celentano 
+adriano celentano 
+ficarra e picone 
+ita 
+resident evil 
+resident evil ita 
+molto incinta 
+stardust ita 
+common 
+mariah carey 
+sonata arctica 
+stardust 
+vasco 
+ita 
+ita 
+lost season 1 episode 
+invader zim 
+heroes 
+heroes ita 
+heroes sub ita 
+kids in the hall 
+sonata arctica 
+nba 08 
+nba 08 pc 
+nba 08 
+nascar 
+burnout 
+kids in the hall tea please 
+kids in the hall 
+aquarium 
+busbnies 
+business continuity 
+"business continuity" 
+tartaruga 
+zecchino d'oro 
+search files, youtube and liveleak 
+duffle bag boy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+superman 
+xxx 
+angelica bella 
+selen 
+handjob 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+miles davis 
+marley 
+gatto 
+gatto 
+gatto 
+gatto documentario 
+dvdshrink 
+antivirus 
+ma che bella fattoria 
+troy 
+windows xp 
+zuigt 
+zuigt 
+windows vista 
+pirates 
+sleeping policeman 
+sleeping policeman 
+sleeping policeman 
+sleeping policeman 
+michael clayton 
+sleeping policeman 
+sleeping policeman 
+waitress 
+sleeping policeman 
+maroc 
+the grift 
+marocco 
+marocco 
+marocco 
+star trek 
+wwe 
+il vento fa il suo giro 
+handjob 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+trhote 
+deepthroat 
+deepthroat 
+silent hunter 
+screen saver 
+norton 360 
+norton 360 
+norton 360 
+grey's anatomy 
+simpsons 
+youtube 
+youtube 
+cnn 
+zecchino d'oro 
+44 gatti 
+matrix 
+signore degli anelli 
+signore degli anelli 
+signore degli anelli 
+matrix 
+le due torri 
+ritorno del re 
+molto incinta 
+le ragioni dell' aragosta 
+porn 
+xxx 
+mp3 
+divx ita 
+joshua 
+hot summer in the city 
+next 
+next ita 
+next italian 
+next 
+invasion 
+invasion ita 
+"hot summer in the city" 
+28 settimane dopo 
+trojan 
+heritage 
+tosh 
+iron maiden 
+madonna 
+muse 
+over the hills and far away 
+picturing the past 
+vinicio 
+capossela 
+google earth 
+kid nation 
+kid nation 
+kid nation episode 1 
+kid nation 01 
+kid nation 02 
+kid nation 
+kid nation s01.e03 
+pirates 
+divx ita 
+divx ita 
+handbook 
+beckman 
+28 settimane dopo 
+ratatuille 
+ratatouille 
+intramontabile effervescenza 
+intramontabile effervescenza 
+c'era una volta in america 
+mp3 
+.mp3 
+marketing 
+divx ita 
+divx ita 
+divx 
+divx ita 
+ratatouile 
+ratatouille 
+invation 
+marketing 
+baglioni 
+e tu 
+astro 
+astro 
+stream flash video server 
+stream flash 
+blade runner 
+sonata arctica 
+iron maiden 
+fear of the dark 
+charlie the unicorn 
+invasion 
+divx ita 
+flight of the conchords 
+720p 
+flight of the conchords 
+720p 
+hd 
+search files, youtube and liveleak 
+nod32 
+hills have eyes 2 
+zuigt 
+apartment 
+korean 
+asian mpg 
+asian 
+dragonforce 
+guitar hero 
+search files, youtube and liveleak 
+gravity 
+lactation 
+creation 
+search files, youtube and liveleak 
+music 
+music 
+beyonce 
+linux 
+beyonce 
+beyonce 
+beyonce 
+rihanna 
+ayo technology 
+chamillionaire 
+nero 8 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+cose molto cattive 
+dvd cose molto cattive 
+dvd-r ita 
+search files, youtube and liveleak 
+dvd-r ita 
+dvd 
+dutch sub 
+dutch sub 
+barbara tucker 
+barbara tucker 
+barbara tucker 
+dutch sub 
+dutch sub 
+barbara tucker 
+barbara tucker 
+dvd 
+dvd 
+austin powell 
+austin powell 
+dvd austin powell 
+dvd austin powell 
+dvd austin powell 
+porco dio 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+hide ip 
+proxyway 
+mask surf 
+dj 
+beyonce 
+ggg 
+helen duval 
+helen duval 
+kim holland 
+handyman 
+sfondi nokia 
+sfondi nokia 
+sfondi nokia 
+sfondi nokia 
+mariniers kapel 
+marinierskapel 
+keng 
+keng leiden 
+concordia leiden 
+belladonna 
+belladonna 
+k&g leiden 
+ps2 
+keng leiden 
+angelo ruoppolo 
+beatles 
+amigo leiden 
+dvs katwijk 
+fountainhead 
+sharon stone 
+pat bone 
+barletta 
+alessandria 
+simona ventura 
+divx 07 
+divx 07 ita 
+xvid 07 ita 
+molto incinta 
+invasion ita 
+next ita 
+film 
+film ita 
+rrar 
+search files, youtube and livereelleak 
+reelleak 
+reel 
+rbf 
+new found glory 
+alla deriva 
+porn 
+naked 
+fuck 
+mom 
+rolling stones 
+trompetterkorps 
+michelle marsh 
+freaks of cock 
+huge cock 
+huge cock 
+cock 
+euro 2008 qualifying matches 
+euro 2008 
+soldier destiny's child 
+psp 
+psp iso 
+2008 euro soccer 
+harry potter psp 
+wayne dyer 
+saw 4 
+dance music 
+porno 
+divx ita 
+divx ita 
+divx ita 07 
+lo chiamavano trinità 
+lo chiamavano trinità 
+dark messiah 
+xvid ita 07 
+k3 
+shawn lee's ping pong orchestra 
+soulfood & jadoo 
+ita bud spencer 
+ita divx 
+lali puna 
+motorhead 
+hommade 
+homemade 
+lali puna 
+kaya project-elixir 
+stargate 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dutch movie 
+dutch 
+heros 
+comedy 
+ocean 13 
+crush test dammies 
+navteq 
+navtec 
+navtech 
+vdo 
+dayton 
+search files, youtube and liveleak 
+mirai nagasu 
+mirai nagasu 
+smallville s06e12 
+smallville 6 12 
+smallville 6 11 
+smallville 6 11 
+dayton 
+bokito 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+navtech 
+search files, youtube and liveleak 
+axxo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dvd 
+vista 
+comedy 
+vdo 
+dvd 
+sonata arctica 
+cpu 
+cpu 
+avi 
+44 gatti 
+dragonhart 
+iso 
+divx ita 
+bin 
+damien rice 
+damien rice 
+damien rice 
+damien rice 
+zip 
+damien rice 
+gay 
+gay 
+gay homemade 
+damien rice 
+gay homemade 
+gay 
+damien rice 
+damien rice 
+damien rice 
+beatles 
+gay cum inside ass 
+gay 
+living things 
+living thing 
+bowling for soup 
+axxo 
+shemale bukkake 
+shemale 
+divx 
+divx ita 
+gay mpeg 
+gay mp4 
+gay mpeg 
+gay mpeg 
+gay mpeg 
+gay mpeg 
+mpeg 
+queer as folk 
+gay mpeg 
+texas chainsaw massacre 
+pc silent hunter 
+streets 2008 
+baantjer 
+baantjer 
+baantjer 
+baantjer 
+baantjer 
+search files, youtube and liveleak 
+sleep don't weep 
+sleep don't weep 
+sleep don't weep 
+sleep don't weep 
+il prescelto 
+sleep don't weep 
+film il prescelto 
+il prescelto 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers 
+transformers italiano 
+transformers ita 
+prison break 
+sonata artica 
+sonata arctica 
+
+sonata arctica 
+musiche popolari 
+musiche popolari 
+musiche popolari 
+sonata arctica 
+musiche popolari 
+musiche popolari 
+canzoni popolari 
+search files, youtube and liveleak 
+dog 
+dog 
+dog 
+bike 
+bike 
+bike 
+bike 
+bike 
+sex 
+sex 
+sex 
+sex 
+hot music 
+cat 
+beck the information 
+beck 
+u2 
+rem 
+adobe 
+prison break 
+\ 
+gay 
+normaal 
+gay mpeg 
+gay live 
+gay live 
+gay mpeg 
+father daughter 
+la linea 
+gay homemade 
+rolling stones 
+rolling stones 
+zappa 
+miles 
+jarrett 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+sex 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+microsoft 
+wired 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+barletta 
+steve ballmer 
+scrubs german 
+pixelperfect 
+paris 
+andria 
+talvin singh 
+gay 
+direttoregenerale als bat 
+direttore generale als bat 
+webcam 
+rolling stones 
+giovanni 
+search files, youtube and liveleak 
+telfon telaviv 
+avril 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rise of legends 
+telfon telaviv 
+mature 
+transformers 
+maya 
+maya 
+office 
+transformers 
+heroes 
+teen 
+room 1408 
+absolute 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mary 
+ftv 
+metart 
+jenny rom 
+shemale 
+grindhouse 
+ita grindhouse 
+dvd grindhouse 
+search files, youtube and liveleak 
+dvd ita 
+search files, youtube and liveleak 
+stewart 
+grindhouse 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+windows 
+molto incinta 
+divx ita 
+office2007 
+ratt 
+search files, youtube and liveleak 
+s14e03 
+silverchair 
+s14e03 er 
+ratatouille 
+logos 
+chemical brothers 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+cisco 
+bourne ultimatum 
+l'ultima legione 
+ultima legione 
+amores perros 
+search files, youtube and liveleak 
+house 
+divx ita 
+tribler 
+britney 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ostro sledovane vlaky 
+vlaky 
+vlaky ostro 
+vlaky ostre 
+vlaky ostre 
+vlaky ostre 
+vlaky 
+menzel 
+jiri menzel 
+observed trains 
+observed trains 
+vanity 
+observed trains 
+rocco 
+rocco 
+rocco 
+rocco 
+sindleruv seznam 
+schindleruv seznam 
+lord of the rings 
+lord of the rings 
+lord of the rings 
+lord of the rings extended 
+idee 
+pcm 
+linux 
+pamas 
+rocco 
+rocco teen 
+pamas 
+rocco 
+brokesky 
+brookesky 
+ivana 
+anal plug teen 
+plug teen 
+toy teen 
+dutch 
+bionic woman 
+oblivion 
+ena 
+enea 
+enae 
+era 
+eradion 
+dion 
+the last legion 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+can't stop the beat 
+transformers 
+xbox 
+xbox360 emulator 
+xbox emulator 
+xbox e 
+xbox 
+vista 
+msn 
+fantastic 4 
+osx 
+osx 
+chris brown 
+chris browncole 
+cole 
+keyshia cole 
+sky 
+kiss kiss 
+nero8 
+nero8 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sting 
+sting 
+sting 
+sting 
+4 hour work week 
+4 hour work week 
+four hour work week 
+the police 
+four hour work week 
+vivid 
+sex 
+story of o 
+dlery 
+clery 
+troy 
+call on me 
+if you leave me now 
+jaques brel 
+search files, youtube and liveleak 
+pirates of caribean 
+fatih akin 
+crossing the bridge 
+pirates of caribean 
+pirates of caribean 
+pirates of caribean 
+pirates of caribean 
+marc almond 
+pirates of caribean 
+pirates of caribean 
+apocalypse now 
+pirates of caribean 
+tomtom 6 
+tomtom 7 
+thai cuties 
+platoon 
+platoon 
+platoon 
+battisti 
+kate bush 
+battisti 
+battisti 
+mina 
+michael stipe 
+mina 
+your ghost 
+tettone 
+tettone 
+tettone 
+tettone 
+tettone 
+tettone 
+big boobs 
+big boobs 
+tomtom 6 
+harry potter 
+harry potter italian 
+celentano 
+adriano celentano 
+morandi 
+baglioni 
+natale a new york 
+natale a new york 
+natale a new york 
+natale a new york 
+natale a new york de sica 
+vacanze di natale a neew york 
+vacanze di natale a neew york 
+natale a miami 
+natale a miami 
+vacanze di natale a miami 
+bella 
+marcella bella 
+300 
+il gladiatore 
+il gladiatore italiano 
+il gladiatorein italiano 
+il gladiatore in italiano 
+il gladiatore 
+la grande abbuffata 
+film 
+film in italiano 
+big breats 
+big breasts 
+italian film 
+striptease 
+beatles 
+beatles 
+ubuntu 
+wpa 
+the queen 
+strip 
+bondage 
+abba 
+wpa 
+i pooh 
+nina private session 
+the pooh 
+nina hartley 
+private sessions 
+the pooh italian 
+audio italian 
+the pooh 
+battisti 
+private sessions 
+mina 
+nina hartley 
+battisti 
+sherek 3 
+4 hour working week 
+shrek 3 
+shrek 3 italiano 
+shrek 3 italiano 
+shrek 3 italian 
+shrek 3 
+dramweaver cs3 
+weeds 
+weeds s03e09 
+shrek 
+heroes 
+search files, youtube and liveleak 
+creatief met kurk 
+dutch subs 
+harry potter 
+bourne 
+harry potter 
+heroes ita 
+mozart 
+bach 
+beethoven 
+debussy 
+ravel 
+stardust 
+beatles 
+stardust ita 
+nero 8 
+n.e.r.d. 
+nerd 
+pharell 
+neptunes 
+13yo girl 
+brian setzer 
+schubert 
+schumann 
+monteverdi 
+segovia 
+lucia 
+paco lucia 
+fitzgerald 
+ellington 
+basie 
+west 
+prison break s02 
+prison break s03 
+nero 
+nero linx 
+nero 3 
+nero v3
+prison break s03 
+video files 
+avril lavigne 
+nero linux 
+nero linux 
+nero ahead 
+ahead 
+pro evolution 
+fifa 08 
+fifa 08 pc 
+tony hawk pro skater 4 
+fifa 08 pc english 
+spiderman 3 ita 
+brian setzer 
+winx 
+winx 
+mark knopfler 
+winx-ita 
+winx 
+14 enne 
+bg audio 
+cronache di provincia 
+tenc 
+whore 
+barbie torrents 
+blowjob 
+search files, youtube and liveleak 
+barbie 
+mackay 
+mackay 
+transformer 
+transformers 
+castellina pasi mp3 
+castellina pasi 
+castellina pasi 
+castellina pasi 
+castellina pasi 
+castellina pasi 
+castellina pasi 
+search files, youtube and liveleak 
+vista 
+transformers proper 
+rescue me 
+xxx 
+vista 
+vista 
+vista 32 
+vista 32 
+big boobs 
+massive meloons 
+massive melons 
+massive melon 
+long tongue 
+rescue me season 
+tettone 
+vista 32 64 
+vista 32 64 
+vista 
+office 
+porno big boobs 
+office 
+scopata big boobs 
+office2007 
+video porno 
+long tongue 
+akon 
+akon 
+dailyshow 
+akon 
+office2007 
+office2007 
+office2007 
+office2007 
+office2007 
+akon boss ac 
+office 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+office 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+akon boss ac 
+office 
+akon boss ac 
+akon boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+boss ac 
+akon 
+office 
+office 
+akon boss 
+office 
+this is why 
+50 cent 
+chris brown 
+kabouterbond 
+kabouterbond 
+kabouterbond ozi 
+ozi 
+ozi top25 
+elvis 
+windows 
+office 
+ubuntu 
+pro evolution 
+fifa 08 eng 
+video 
+youtube 
+youtube 
+hardcore penguin 
+hardcore penguin 
+hardcore penguin 
+hardcore penguin 
+hardcore penguin 
+drama 
+hardcore penguin 
+melue 
+melua 
+joplin 
+kim karda 
+sex 
+drama prairie 
+drama prairie 
+apocalypto 
+elvis 
+drama prairie 
+nora jones 
+drama prairie 
+rob de nijs 
+rob de nijs 
+rob de nijs 
+gerard van maasakker 
+tomtom 
+kabouterbond 
+ozi explorer 
+works 
+travel 
+psr2700 
+psr2700 yamaha 
+yamaha 
+yamaha psr 
+yamaha psr 
+vista crack 
+lulu santos 
+lulu santos 
+vista crack 
+nero 
+nero burning 
+dvd ita 
+italian 
+bollywood 
+two and a half men 
+search files, youtube and liveleak 
+vista 
+heroes 
+tell me more dutch 
+carrie underwood 
+blondes 
+blondes 
+booty 
+ass 
+perfect ass 
+brunette 
+blonde 
+mark knopfler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stones 
+bushido 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nba live 
+nba live pc dvd 
+bollywood 
+search files, youtube and liveleak 
+sextapes 
+eagles of death metal 
+german 
+eagles of death metal 
+küzey rüzgari 
+eagles of death metal 
+absolute linux 
+absolute 
+logan´s run 
+nick simon 
+thai 
+küzey rüzgari 
+saban 
+türksex 
+sextape 
+shab deurne 
+shab deurne 
+shab deurne 
+absloute 
+abslute 
+absolute 
+search files, youtube and liveleak 
+petek 
+hold that train conductor 
+petek dincöz 
+petek dincöz sex 
+petek dincöz sextapes 
+petek dincöz sextapes 
+petek dincöz sextapes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+walt disney 
+tesna koza 
+domaci 
+peter gunn 
+dvd ita 
+italiani 
+italiani 
+sex 
+work song 
+.torrent film download 
+scandal 
+skandal 
+türk 
+reding 
+otis 
+rolling 
+star wars theme 
+star wars theme 
+star wars 
+star wars youtube 
+miles 
+schiffer 
+neu 
+victoria 4 
+star wars theme 
+death sentence 
+death sentence german 
+stolen 
+symphony of hope 
+desperate house wifes 
+mary had a little lamb 
+bush 
+borsato 
+bullock 
+jon gindick 
+symphony of hope 
+prison break 
+sex 
+search files, youtube and liveleak 
+armi 
+hacking 
+victoria 4.1 pro bundle 
+victoria 4.1 pro bundle 
+victoria 4.1 pro bundle 
+victoria 4.1 pro bundle 
+victoria 4.1 pro bundle 
+search files, youtube and liveleak 
+korn 
+cradle 
+star 
+hentai 
+winx 
+winx 
+desperate housewifes 
+sponge bob 
+fgdfg 
+house 
+18 
+18 
+adobe indesign cs2 
+stargate 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+curtis mayfield 
+curtis mayfield 
+search files, youtube and liveleak 
+summer palace 
+italian 2007 
+linux 
+windows vista 
+6.75 
+6.75 
+ufo 
+six feet under 
+simpsons 
+death in june 
+vasco rossi 
+surf's up - i re delle onde 
+surf's up 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+behind the mask 
+vasco rossi 
+vasco rossi 
+surf's up ita 
+i re delle onde 
+vasco rossi 
+vasco rossi 
+invasion 
+behind the mask death in june 
+behind the mask douglas 
+blutharsch 
+graumahd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anna 
+liz vicious 
+suicide girls 
+simpsons 
+suspension 
+ufo 
+lesbo 
+lesbo 
+babes 
+colonna sonora heroes 
+heroes 
+heroes 
+heroes song 
+babes 
+how 
+babes 
+half life 
+exploited college girls 
+half life 
+half life 
+travis 
+jeff dunham 
+rogue wave 
+rogue wave 
+rogue wave 
+babes 
+olga 
+sexy 
+tna 
+youtube 
+transformer 
+girls 
+pamela 
+pamela anderson 
+pamela anderson 
+martina stella 
+jessica 
+300 
+zzz 
+sony vegas 
+cocconut records 
+nighttiming 
+nighttiming 
+cocconut 
+risiko 
+troia 
+xxx 
+risiko 
+magix 
+7.10 
+magix radio 
+radio software 
+dvd 
+software 
+abba 
+flight of the concords 
+flight of the concords hbo 
+hbo: one night stand flight of the concords 
+hbo: one night stand 
+jim reeves 
+akufen 
+estelle joseph 
+alkohol120 
+osx 
+osx adobe 
+alcohol120 
+scurbs 
+keygen 
+allkeygen 
+allsoftware 
+allsoftware 
+search files, youtube and liveleak 
+dvdrip ita 
+ritorno al futuro 
+plug&playsoftware 
+plug&playsoftware 
+plug&play 
+plug&play 
+software 
+software 
+software 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il gladiatore 
+fetish 
+ass 
+ass 
+clone dvd 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+domination 
+domination 
+top 100 juckebox 
+top 100 jukebox 
+top 100 jukebox 
+top 100 jukebox 
+top 100 jukebox 
+the death of cartman southpark 
+awesom-o 
+xxx 
+asia 
+jessica andrews 
+search files, youtube and liveleak 
+bound for glory 
+bryan adams 
+bryan adams 
+tna 
+bryan adams 
+bryan adams 
+adams 
+adams 
+simpsons 
+simpsons 19 
+resident evil 
+dl.tv 
+chines 
+taiwan 
+youtube 
+liveleak 
+arsch 
+halo 
+jack 
+microsoft 
+halo 
+computer 
+spiegel 
+stern 
+bbc 
+windows vista ultimate 
+spiegel 
+pro7 
+china 
+windows vista ultimate activator 
+endgame 
+monday night raw 
+the death of cartman southpark 
+monday night raw 
+monday night raw 
+heroes 02 04 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+monday night raw 
+saving face 
+naruto 
+naruto 251 
+naruto 4th movie 
+samurai champloo song 
+fmily guy 
+family guy ep 156 
+family guy 156 
+family guy 13 
+family guy 
+cartoon newwork 
+current 93 
+prison break 
+windows vista 
+femalestrippers 
+female strippers 
+search files, youtube and liveleak 
+csi 
+csi ita 
+senza traccia 
+zombie holocaust 
+csi miami 
+csi miami ita 
+whore cum 
+eros ramazzotti 
+oracle 
+cameltoe 
+divx - ita 
+die verdammten 
+divx - ita 300 
+divx - ita 300 
+divx ita 
+your inner awakening 
+byron katie 
+ita 
+ita avi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+logan´s run 
+juggs 
+big tits 
+claudia ferrari 
+karma rosenberg 
+suicide girls 
+cum on juggs 
+blade runner 
+nokia 
+ufo 
+dvd ita 
+domplayer 
+domplayer 
+domplayer 
+prison break 
+magicisomaker 
+magic iso maker 
+booty 
+monica 
+monica 
+monica 
+booty 
+booty 
+booty 
+workshop manual volvo v40 2001 
+workshop manual volvo v40 
+workshop manual volvo v40 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+sitter 
+search files, youtube and liveleak 
+heroes 
+heroes s02 e04 
+search files, youtube and liveleak 
+xxx 
+dvdxxx 
+video xxx 
+dvx xxx 
+rocco 
+search files, youtube and liveleak 
+little britain 
+bourn supremacy 
+mr beans holiday 
+avido 
+search files, youtube and liveleak 
+portable apps 
+pinacle 
+pinaclle 
+pinaclle 
+search files, youtube and liveleak 
+brian setzer 
+brian setzer 
+damien rice paradiso 
+damien rice paradiso 
+damien rice paradiso 
+damien rice paradiso 
+damien rice 
+damien rice 
+rogue assassin 
+rogue 
+assassin 
+lions for lambs 
+the kingdom 
+adobe osx kegen 
+adobe osx keygen 
+the invasion 
+osx 
+rendition 
+cartoni animati 
+cartoni animati 
+maxic 
+magix 
+cartoon 
+mac 
+macos 
+mac.os.x.tiger 
+mac.os.x.tiger 
+mac.os.x.tiger 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x.tiger.10.4.9.x86-hotiso 
+mac.os.x. 
+mac.os.x. 
+mac.os.x. 
+mac.os.x. 
+mac.os.x.tiger 
+mac.os.x.tiger 
+mac.os.x.tiger 
+passions 
+dumeter 
+ska 
+shakira 
+nba 08 
+nba 08 pc 
+half life 2 
+perry rhodan 
+perry rhodan 
+perry rhodan 
+perry rhodan 
+perry rhodan 
+perry rhodan 
+perry rhodan 
+rushhour 3 
+stormbreaker 
+rescue me 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+eva 
+milf 
+trei hetcher 
+teri hetcher 
+.avi 
+.avi 
+.avi 
+.avi 
+piraten 
+a chi dice 
+fugalo 
+wwe 
+bzn 
+bzn 
+breath easy 
+tiziano ferro 
+magiciso 
+twins 
+twins 
+celorico 
+casey 
+casey parker 
+shane world 
+surfer girls 
+casey parker 
+casey puerto 
+surfer girls shane 
+surfer girls 
+surfer girls 
+casey parker 
+bnn 
+bnn 
+teens 
+teen 
+abba 
+purcell 
+eros ramazzotti 
+creature comforts 
+bob the builder 
+hollander 
+holland 
+lolita 
+playboy 
+beastiality 
+fist 
+project pitchfork 
+superman return italiano 
+machined 
+machine 
+simpson 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+simpson italiano 
+tokio hotell zimmer 483 
+tokio hotell zimmer 483 
+simpson film italiano 
+simpson film 
+desperados 
+black girls 
+black girls 
+mekong delta 
+mekong delta 
+mekong delta 
+lurking fear 
+high on fire 
+epica 
+epica the divine conspiracy 
+mekong delta fear 
+bi teens 
+divx ita 
+bisexual 
+mero nepal 
+king kong 
+christine debell 
+christine de bell 
+christine debell 
+alice in wonderland 
+independence day 
+logan's run 
+kids from kaper 
+mero nepal 
+mero nepal 
+fifa 2008 
+new zoo review 
+stargate 
+the man who fell to earth 
+mero nepala 
+spiderman2 
+spiderman2 
+spiderman2 
+transformers 
+prey 
+9 songs 
+caligula 
+alice in wonderland 
+mickey mouse 
+classic disney 
+tchaikovsky 
+oh calcutta 
+debbie does dallas 
+nina hartley 
+traci lords 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ilife 08 
+search files, youtube and liveleak 
+ilife 08 
+beethoven 
+bach 
+nightingale 
+saw 
+mac 
+orphaned land 
+orphaned land 
+orphaned land 
+metallica 
+metallica 
+osx 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+metallica 
+orphaned land 
+kataklysm 
+kataklysm 
+kataklysm 
+walt disney 
+walt disney 
+fellini 
+exploitedteens 
+overkill 
+search files, youtube and liveleak 
+californication 
+overkill wrecking everything 
+rome 
+wrecking everything 
+king kong 
+californication 
+nightingale 
+lord of war ita 
+lord of war 
+romancing the stone 
+lord of war ita 
+search files, youtube and liveleak 
+ilife 08 
+search all files 
+search all files 
+search all files 
+herosse2 
+heros season 2 
+nightingale+ 
+nightingale 
+heros season 2 
+search files, youtube and liveleak 
+banfi 
+ggg 
+pedo 
+sex 
+scat 
+vomit 
+simona ventura 
+simona ventura 
+porca 
+peto 
+ggg 
+kiss 
+search files, youtube and liveleak 
+madonna 
+shrek 
+een eigen huis 
+xxx 
+rene froger 
+redlight 
+porn 
+madonna 
+fun 
+honda 
+sperma 
+scat 
+scat 
+scat 
+search files, youtube and liveleak 
+land 
+devs 
+discrete event systems 
+discrete event systems 
+hed kandi 
+ministry of sound 
+craig david 
+spice girls 
+swing 
+swing jazz 
+blouble 
+bouble 
+bouble 
+swing 
+buble 
+tna bound for glory 
+setzer 
+porno valley 
+porno valley 
+vista 
+abby 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sachlamares 
+porno 
+x 
+ableton 
+cumshots 
+heroes 
+the hallowed ground 
+the hallowed ground 
+squirt 
+hight school 
+high school 
+. 
+heroes 
+house 
+wwe 
+tna 
+encyclopedia britannica 
+search all files 
+blue 
+lawrence of arabia 
+a chi mi dice 
+11th hour 
+breath easy 
+zzz 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+creature comforts 
+pireates 
+worlds end 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+disney 
+30 days of night 
+dltv 
+ecw 
+wwe 
+eztv 
+http://www.youtube.com/watch?v=vntxuspwkpc 
+http://www.youtube.com/watch?v=vntxuspwkpc 
+http://www.youtube.com/watch?v=vntxuspwkpc 
+http://www.youtube.com/watch?v=vntxuspwkpc 
+http://www.youtube.com/watch?v=vntxuspwkpc 
+zzz 
+800 
+nwwl 
+broodwar 
+search files, youtube and liveleak 
+the comebacks 
+the comebacks movie 
+war jet li 
+the come backs 
+the comebacks 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the comebacks 
+the comebacks dvd 
+ratatouille.dvdrip.xvid-spiral 
+ratatouille.dvdrip 
+ratatouille 
+residet evil 
+residet evil axxo 
+residet evil axxo 
+resident evil axxo 
+we own the night *xvid 
+we own the night 
+tom and jerry 
+tom jerry 
+hostel part ii ita 
+hostel part ii ita 
+hostel part ii ita 
+hostel part ii ita 
+simpson 
+simpson ita 
+tom jerry 
+pink panther 
+hostel ita 
+porn 
+hitman 
+hitman 2 
+seventeen 
+sextet 
+alles is liefde 
+franco bagutti 
+southpark 
+wwe 
+lil boosie 
+nero 8 
+bubble gum 
+rasheeda 
+rasheeda 
+wipe me down 
+bubble gum 
+rasheeda 
+nero 
+driver detective 
+tony hawk 
+tambourine 
+beyonce 
+search files, youtube and liveleak 
+mr bean 
+little women 
+little women 
+little women 
+little women 
+little women 
+little women 
+kelly rowlands 
+avril lavigne 
+elephant man 
+shaba ranks 
+shaaba ranks 
+shaba ranks 
+shaba ranks 
+shaba ranks 
+shaba ranks 
+shaba ranks 
+lover man 
+clture 
+culture 
+reggae 
+high school 
+pro evolution 
+burning spear 
+burning spear 
+raven riley 
+raven riley 
+raven riley 
+raven riley 
+rugby 
+pro rugby 
+rugby 
+boys to men 
+boys to men 
+modo 301 
+jonny wilkinson 
+far cry 
+hit & run 
+the simpsons hit & run 
+search files, youtube and liveleak 
+michael jackson 
+cabaret 
+eddy murpy 
+mac os x 10.4.8 [jas amd intel sse2 sse3 ppf1 + ppf2] - for intel and amd/sse2 or sse3 
+mac os x 10.4.8 [jas amd 
+navigon 
+tomtom 
+fifa 08 psp 
+fifa 08 psp ita 
+fifa 08 pc 
+generations 
+fifa 08 pc ita 
+colombo 
+baby boomers 
+search files, youtube and liveleak 
+rio de janero 
+damien rice 9 
+lou reed 
+lou reed perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+tiken 
+tiken 
+tiken 
+tiken 
+tiken 
+tiken 
+tiken jah fakoly 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+perfect day 
+corel 
+perfect day 
+ali b j o 
+ali b met g o 
+ali b met 
+gio 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+gio verdonk 
+pixel 
+pixelperfect 
+pixelperfect-- 
+pixelperfect--00 
+pixelperfect--00 
+pixel 
+foo fighters 
+high definition 
+foo fighters hyde park 
+foo fighters hyde park motorhead 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+nitin sawhney 
+nitin sawhney 
+nitin sawhney nadia 
+nitin sawhney nadia 
+nitin sawhney nadia 
+the cranes 
+the cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+cranes 
+gare du nord pablo 
+gare du nord pablo 
+gare du nord 
+gare du nord 
+gare du nord 
+gare du nord 
+nitin sawhney 
+nitin sawhney 
+u.n.k.l.e 
+u.n.k.l.e 
+unkle 
+amputated gargling 
+marie kiss la joue 
+marie kiss la joue 
+marie kiss la joue 
+air 
+jarod 
+air 
+search files, youtube and liveleak 
+air sexy boy 
+air sexy boy 
+playboy 
+american dad season 
+coldcut 
+softcam.key 
+coldcut 
+coldcut 
+druivensuiker 
+druivensuiker 
+druivensuiker 
+druivensuiker 
+playboy 
+playboy 
+playboy 
+playboy 
+tricky 
+tricky 
+billy joel 
+search files, youtube and liveleak 
+just jack 
+just jack 
+jarod camaleonte 
+ez3kiel 
+wax tailor 
+wax tailor 
+wax tailor 
+wax tailor 
+beth gibbons 
+zucchero 
+zucchero un chilo 
+un chilo 
+un chilo zucchero 
+un chilo zucchero 
+la uno bianca 
+laura pausini 
+gianni nanni 
+franco staco 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adobe 
+laura pausini 
+a broken heart 
+heart 
+leopard 
+tribler 
+hypnosis 
+scrubs 
+scrubs ita 
+hypnography 
+hypnography 
+hypnography 
+laura pausini 
+search files, youtube and liveleak 
+ilife 08 
+elena paparizou 
+cars 
+biagio antonacci 
+search files, youtube and liveleak 
+sport ro 
+sport ro 
+sport ro 
+sport ro 
+partytv 
+party tv 
+riccardo scamarcio 
+my number one 
+nr. one 
+number one 
+elena paparizou 
+
+nba 08 
+lapo elkann 
+2k8 
+austrumi 
+live cd windows 
+sweetes girl 
+babylon 
+fanning 
+tribler 
+michael jackson 
+heat 
+stevie ann 
+teen idol 3 
+exploited 
+japaneese 
+japan 
+asian 
+nagraj 
+superman 
+spiderman 
+ladyboy 
+thai 
+korea 
+china 
+chinesse 
+chinese 
+russian 
+ukraine 
+czech 
+tiny 
+bangbus 
+florida 
+international 
+contest 
+win 
+lolita 
+gang 
+kingdom 
+ladyboy 
+german dvd rip 
+ladyboy 
+alison krauss 
+down to the river to peay 
+down to the river to pray 
+go to sleep you little 
+spiderman 
+operation mindcrime 
+man of constant sorrow 
+man of constant sorrow 
+search files, youtube and liveleak 
+ford mustang 
+queesryche 
+empire 
+queensryche empire 
+led zepplin 
+led zeppelin 
+cramberies 
+cramberies 
+the corss 
+the corrs 
+cramberries 
+cranberries 
+.ita 
+.italian 
+sistem works 
+norton 
+norton 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+ford mustang 
+sugarbabes 
+langerova 
+now dance 2008 
+now dance 2008 
+now dance 2008 
+dj zone 
+dj zone 53 
+hapkido barcelona 
+hapkidobcn 
+hapkidobcn 
+corset 
+search files, youtube and liveleak 
+een vrouw stapte met hevige hoofdpijn een ziekenhuis binnen in mumbai. de dokters schrokken toen ze de patiënte onder de scanner schoven. 
+nelly furtado 
+guns roses 
+lopez 
+beyonce 
+dog 
+dog 
+dog 
+zzz 
+the killers 
+search files, youtube and liveleak 
+porno 
+zzz 
+high school musical 
+kama sutra 
+kama sutra 
+kama sutra 
+angel & airwaves 
+kama sutra 
+kama sutra 
+kama sutra 
+kama sutra 
+kama sutra 
+gloria guida 
+kama sutra 
+kama sutra 
+click 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+300 
+marco paolini 
+marco paolini 
+stardust 
+pan's labyrinth 
+htc 
+mature 
+smartphone 
+paolini 
+kingdom 
+kingdom of heaven 
+last king of scotland 
+paolini 
+genesis 
+genesis 
+genesis 
+marco paolinio 
+marco paolinii 
+marco paolini 
+vajont 
+vajont 
+west side story 
+cyberlink 
+cyberlink ita 
+un'impresa da dio 
+cyberlink 
+vistaclean 
+vistaclean 
+poweriso 
+vistaclean 
+vista clean 
+traffel pro 
+erba di casa mia 
+erba di casa mia 
+beatles 
+adriano celentano 
+adriano celentano 
+adriano celentano 
+celentano 
+celentano chi non lavora 
+pierino 
+beatles 
+winrar italiano + crac 
+unattended 
+vista 
+vista unattended 
+india 
+india sex 
+india fuck 
+lesbian 
+lesbian 
+xp 
+vista 
+axxo 
+ass 
+cum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+2007 
+panzer 
+2007 
+panzer pc game 
+panzer game file 
+search files, youtube and liveleak 
+porn 
+single 
+panzer elite action 
+scottish 
+scotland 
+porn 
+milf 
+nude 
+sexy 
+anal 
+sex 
+*.wmv 
+king crimson 
+heroes ita 
+axxo 
+24 ita 
+divx 24 ita 4x 
+ubuntu 7.10 
+ubuntu 
+divx 24 ita 4x 
+heroes s02 
+heroes s02e04 
+divx house ita 
+ncis 5 
+marco paolini 
+jaco pastorius slang 
+lo svarione degli anelli 
+zzz 
+jaco pastorius slang 
+pink floyd 
+jaco pastorius 
+battlefield gun sound 
+victor vooten 
+victor wooten 
+pulp fiction 
+pulp fiction ita 
+teen 
+pulp fiction 
+cemento armato 
+magic music maker 
+teen 
+one night in paris 
+sex 
+teen 
+abschied nehmen 
+second love 
+pedo 
+boy 
+pain of salvation second love 
+pain of salvation second love 
+pain of salvation second love 
+boy 
+boy gay 
+boy gay 
+pedo 
+pedo 
+pedophilia 
+ubuntu 
+pain of salvation second love 
+pain of salvation second love 
+tribler 
+concha buika 
+tribler 
+tribler 
+tribler_4.1.7.exe 
+muscle men 
+muscle men 
+muscle men 
+muscle men 
+muscle men 
+legaltorrent 
+eigh tlives down 
+eight lives down 
+muscle men 
+muscle men 
+muscle men 
+muscle men 
+u2 
+pinh floyd 
+pink floyd 
+pink floyd, pulse 
+pink floyd, pulse 
+negramaro 
+styx 
+kansas 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+film riccardo scamarcio 
+film roul bova 
+film alvaro vitali 
+biagio antonacci 
+canzoni biagio antonacci 
+canzone iris biagio antonacci 
+nek 
+for downling canzoni di nek 
+download porno 
+download film moana pozzi 
+moana pozzi 
+alles is liefde 
+fish 
+pain of salaion 
+pain of salvation 
+pain of salvation hallelujah 
+any dvd hd 
+search files, youtube and liveleak 
+windows 
+vista 
+the heardbreak kid 
+the heartbreak kid 
+tribler 
+19942 
+1942 
+superman 
+creampie 
+creampie 
+creampie 
+creampie 
+full metal jaket 
+full metal jaket 
+stardast 
+stardast 
+stardust 
+ps3 
+severange 
+severange 
+superman 
+simulatore 
+vasco rossi 
+stardust 
+the last legion 
+ratatouille 
+ass to 
+aqu 
+fetish 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+black adder 
+malcolm in the middle 
+black books 
+laura pausini 
+two and a half men 
+massive r&b 
+dom 
+domination 
+mouth 
+palmaria 
+ass to mouth 
+protal 
+chinese 
+russian 
+sweden 
+private 
+faust 
+food 
+model 
+video 
+nanni 
+palmaria 
+rocco 
+media center 
+saturnino 
+shark tale 
+xilisoft 
+super converter 
+frans 
+nederlands 
+blowjob 
+hardcore 
+rape 
+ass 
+lucy pinder 
+norton 
+kansas 
+ratatouille 
+blof 
+prince 
+jenna jameson 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+shifty 
+spiderman 3 
+nero 
+csi 
+roots 
+james 
+james uk 
+blondie 
+chuck berry 
+heroes sub 
+pc game 
+aerosmith - crying 
+divx 
+u2 
+james best of 
+u2 best of 
+matrix 
+football manager 2008 
+uomo ragno 
+uomo ragno 
+uomo ragno 
+phil collins 
+tette 
+tette 
+dutch 
+tette 
+tettone 
+divx ita 
+playboy 
+playboy nederland 
+divx ita 
+nederland 
+tettone 
+tettone 
+musica 
+musica 
+eros ramazzotti 
+cher 
+cher best of 
+rod stewart 
+vasco rossi 
+vasco rossi 
+deja vu 
+vasco rossi 
+vasco rossi 
+laura pausini 
+laura pausini 
+transformer 
+divx transformer 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nero dvd 
+oblivion 
+search files, youtube and liveleak 
+oblivion 
+nero 6 dvd 
+oblivion 
+oblivion 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+nero dvd 
+nero dvd 
+vista vmdk 
+vmdk 
+nero 
+nero dvd 
+serious business 
+the serious business of happiness 
+serious business of happiness 
+happiness 
+centerfold 
+centerfold dictator 
+estelle 
+estelle 
+soulja boy 
+estelle 
+swiss beatz 
+swiss beatz 
+swiss beatz 
+tna 
+british 
+harry potter 
+linux 
+nero 
+nero 
+www.torrentspy.com 
+russian 
+roger waters 
+smallville 
+torchwood 
+shipudden episode5 
+shipudden episode 5 
+shipudden episode 5 
+russian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lolita 
+search files, youtube and liveleak 
+the 4400 
+spiderma3 
+spiderman3 
+hd 
+hd italy 
+salsa 
+el cantante 
+pthc 
+kabul 
+search files, youtube and liveleak 
+dark shadows 
+smiling in a war zone 
+dark side 
+darkside 
+hd spanish 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+map 
+vmdk 
+hd italy 
+fights 
+el cantante 
+2007 
+paswoord 
+wachtwoord 
+de wereld draait door 
+bus 
+blond 
+black 
+private 
+bus 
+celeb 
+blond 
+search files, youtube and liveleak 
+xxx 
+the wall 
+nude 
+house 
+lost 
+legend 
+cissp harris 
+orange box 
+world in conflict 
+alain delon 
+winrar 3.71 
+iphone 
+300 
+transformers 
+zzz 
+abba 
+iphone 
+search files, youtube and liveleak 
+heros 
+heroes 
+milf 
+small breast 
+puffy 
+rape 
+extreme 
+zzz 
+sex 
+teen 
+amateur 
+http://www.vuze.com/download/ms6c72pvhxgknv2yknobkzjxj6346awx.torrent?referal=cdp 
+amateur 
+homemade 
+drunk 
+sex 
+die hard 
+transformers 
+gouden kooie 
+gouden 
+oops 
+clit 
+fortwo 
+sex 
+teen 
+shrek3 
+lolita 
+teen 
+young 
+zzz 
+skihut 
+die hard 
+gimme more 
+gimme more 
+http://www.vuze.com/download/z3aunbhkksaybzuajlvmmbzt6kpo26y4.torrent?referal=cdp 
+reaper 
+cissp harris 
+cissp 
+sex training 
+cissp 
+jericho 
+orange box 
+orange box 
+girls 
+girls 
+girls 
+three kings 
+orange box 
+orange box 
+search files, youtube and liveleak 
+the 
+blink 
+roots 
+blink 
+shins 
+blink 
+blink 
+gothic 
+gothic 
+s01e01 
+the unit 
+pokemon 
+chess 
+the kingdom 
+linkin park bleed it out 
+crackaddict 
+crack addict 
+crack addict 
+teoman 
+andress 
+guida 
+fenech 
+fenech 
+massive 
+windows 
+floor 
+floorplan 
+turbofloorplan 
+young 
+keine angst 
+overdeveloped 
+electric torture 
+j.river 
+music match 
+shrek ita 
+shrek ita 
+kalifornia ita 
+kalifornia ita 
+kalifornia ita 
+j.river 
+marco paolini 
+marco paolini 
+marco paolini 
+creampie 
+search files, youtube and liveleak 
+metal 
+queen stone age 
+pink floyd 
+steve jensen 
+steve jensen 
+queen stone age 
+queen 
+teen 
+2007 
+ubuntu 7.10 
+2007 
+ubuntu 7.10 
+2007 
+ambient 
+canary effect 
+canary effect 
+canary effect 
+sicko 
+ncis 
+ncis 5 
+ncis 5 
+csi 4 
+criminal minds 
+criminal minds 3 
+spiderman 3 
+qi 
+turistas 
+windows xp for mac 
+le colline hanno gli occhi 
+le colline hanno gli occhi 
+sunshine 
+sunshine ita 
+sunshine 
+superman 
+qtec 
+qtek 
+qtek 
+qtek 
+norton 
+search files, youtube and liveleak 
+öblivion 
+öblivion 
+öblivion 
+dune 
+capturenx 
+capture 
+öblivion 
+key nx 
+forest of death 
+prince 
+blow 
+the ring 
+transformers 
+spiderman 3 
+transformers 
+vista 
+kira reed 
+nederland 
+nederlands 
+strictly 
+norton internet security 
+playboy calender 
+prada 
+ita 
+gay 
+search files, youtube and liveleak 
+high stakes poker 
+porno holocaust 
+ea sports 
+ea games 
+xxx 
+saturnino 
+smith ken 
+smith ken 
+smith ken 
+smith ken 
+smith ken 
+eva longoria 
+radiohead 
+dido 
+blunt 
+java 
+need for speed carbon pc 
+io canto 
+io canto 
+io canto 
+io canto 
+io canto 
+milf 
+l'era glaciale 
+michael franti 
+non aprire cuela porta 
+sms 
+batman begin 
+nuns 
+non aprite quella porta 
+non aprite quella porta 
+porno 
+porno 
+porno 
+porno 
+film - dvx ita - non aprite quella porta - l'inizio 2006 - horror 
+el invensible 
+el invensible 
+el invensible 
+film dvx ita 
+film dvx 
+pc game 
+fifa 08 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+grey's anatomy 
+hentai 
+manga 
+hentai 
+nederlands naakt 
+nederlands 
+nederlands 
+nederlands 
+nederlands 
+tyfla 
+space 1999 
+xxx 
+ufo 
+eastenders 
+spider pork 
+spider pork 
+starsisters 
+spooks 
+network security portable reference 
+illustrated tcp-ip - a graphic guide to the protocol suite 
+illustrated tcp-ip - a hacking the cable modem (2006) 
+illustrated tcp-ip - a hacking the cable modem (2006) 
+illustrated tcp-ip - a hacking the cable modem 
+illustrated tcp-ip - a hacking 
+illustrated tcp-ip - a hacking 
+hacking 
+salsa 
+sasha 
+mature 
+forbidden 
+forbidden 
+mesmer 
+tomtom 
+search files, youtube and liveleak 
+an awfully big adventure 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+five branded women 
+frankie martinez 
+frankie martinez 
+frankie martinez 
+salsa ny 
+salsa ny 
+salsa ny 
+salsa ny 
+salsa ny 
+salsa 
+severage 
+tagli al personale 
+tagli al personale 
+tagli al personale 
+tagli al personale 
+un impresa da dio 
+molto incinta 
+the simpson 
+search files, youtube and liveleak 
+sms 
+sotto mentite spoglie 
+cemento armato 
+hairspray 
+kama sutra 
+film ita 
+ubuntu 
+bath 
+film spanish 
+lesbian 
+kryštof 
+kryštof 
+ubuntu 
+2pac - hit 
+fatlip - the salmon dance 
+akon - swete 
+akon 
+kanye west - the best life 
+kanye west-the best life 
+kanye west 
+onyx - slam harder 
+slam harder 
+fm concepts 
+stacy keibler 
+tarkan 
+tarkan 
+tarkan 
+nicole 
+candy 
+kurtlar vadisi 
+kurtlar vadisi 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+kenza 
+vista 
+mac os x 
+mac os x 
+nfs u2 mod 
+nfs u2 mod 
+bikini 
+source code 
+source code 
+source code 
+source code 
+source code 
+justin timberlake 
+ayo technology 
+justin timberlake 
+dark rising 
+harlot 
+harlots 
+young harlots 
+search files, youtube and liveleak 
+lesbian 
+lesbian 
+search files, youtube and liveleak 
+potter 
+young harlots 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lesbian 
+vanoza 
+vanoza 
+vivere 
+vanoza 
+search all files 
+der eisige tod 
+search files, youtube and liveleak 
+system of a down 
+ratatouille 
+yelle 
+aztec camera 
+ligtning seeds 
+lightning seeds 
+lightning seeds 
+roddy frame 
+bitreactor.to 
+bitreactor.to 
+sal solo 
+search files, youtube and liveleak 
+high school musical 
+enttarnt 
+high school musical dvd rip 
+enttarnt 
+enttarnt 
+enttarnt 
+postal 
+postal 
+postal 
+ratatouille 
+enttarnt 
+planet terror 
+planet terror deutsch 
+der kleine könig macius 
+alton towers 
+alton towers 
+der sternwanderer 
+age of empires 
+imperialism 
+caeser 
+swinger 
+der sternwanderer 
+die drei räuber 
+die vorahnung 
+die vorahnung 
+enttarnt 
+tutorial 
+lynda.com tutorial 
+linux+tutorial 
+linux+ tutorial 
+ltutorial 
+the invasion 
+the invasion 
+the invasion 
+könige der wellen 
+enttarnt 
+enttarnt 
+chuck und larry - wie feuer und flamme 
+nero dvd 
+chuck und larry - wie feuer und flamme 
+xmlspy 
+ratatouille 
+spider man 3 
+bizar 
+pervers 
+transformers 
+breath easy 
+sperm 
+transformers 
+transformers 
+transformers 
+squirt 
+transformers 
+harry potter und der orden des phönix 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+next 
+a chi mi dice 
+achmed the terrorist 
+das perfekte verbechen 
+das perfekte verbechen 
+das perfekte verbechen 
+insurting 
+evan allmächtig 
+flyboys - helden der lüfte 
+how i met your mother 
+insirting 
+high school musical 2 
+motel 
+roots 
+disturbia 
+are you afraid of the dark 
+are you afraid of the dark voiceover 
+tiziano ferro 
+die liebe in mir 
+hände weg von mississippi 
+rennschwein rudi rüssel 2 
+s.h.i.t. - die highschool gmbh 
+goodbye bafana 
+spider man 3 
+spider man 3 german 
+andrea bocelli 
+dezember boy german 
+dezember boys german 
+dezember boys 
+dezember boys 
+dezember boys 
+ratatouille german 
+enttarnt german 
+tha dogg pound 
+snoop 
+john frusciante 
+john frusciante 
+john frusciante 
+slim shady lp 
+slim shady 
+for a friend 
+the chronic 
+chronic 
+the chronic 
+ice cube 
+friday 
+ren som 
+fucking 
+the office 
+resident evil extinction 
+atheism 
+dirt 
+heheryt 
+sum 41 
+google 
+system 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+orkestbanden 
+beddingfield 
+john stamos 
+superbad 
+jenna 
+red vs blue 
+die hard 
+die hard fr 
+die hard french 
+transformer french 
+bsg 
+mac os x 
+l'arc en ciel 
+tits 
+larc en ciel 
+muse 
+dutch 
+nederlands 
+harry 
+starwars 
+pirate 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rise the vieno 
+mrs robinson 
+mrs robinson 
+mrs robinson 
+mrs. robinson 
+me and julio down by the schoolyard 
+me and julio down by the schoolyard 
+me and julio down by the schoolyard 
+hey jude 
+metalocalypse 
+let it be 
+metalocalypse 
+metalocalypse 
+the office 
+criminal minds 
+metalocalypse season 
+half life 2 
+scarface 
+half life 2 orange box 
+psv 
+foo pretender 
+pretender 
+search all files 
+metalocalypse 
+photoshop ita 
+photoshop tutorial 
+muse 
+muse 
+photoshop video 
+photoshop tutorial 
+photoshop video 
+photoshop sphere 
+photoshop 
+photoshop video 
+30 days of night 
+homeworld 
+demonoid 
+young harlots 
+trasformer 
+.mp3 
+metalocalypse 
+metalocalypse 
+metalocalypse 
+melissa 
+simpsons 
+quicken 
+windows 
+shrek 
+minisode 
+metalocalypse 
+naked 
+nacked 
+naked 
+eros 
+breast 
+amateur 
+pics 
+house 
+video 
+search files, youtube and liveleak 
+the officec 
+the office 
+search files, youtube and liveleak 
+final fantasy 
+final fantasy movie 
+final fantasy 
+charles darwin 
+search files, youtube and liveleak 
+dawkins 
+massage 
+atlantis 
+breasts 
+breasts 
+ubuntu 7.10 
+babysitter 
+babysitter 
+maid 
+massage 
+nurse 
+redhead 
+marshall 
+marshall mathers 
+marshall mathers 
+marshall mathers lp 
+breast exam 
+breast 
+alicia 
+lennox 
+cerberus 
+prey 
+invisible 
+private 
+transformers 
+private 
+women loving women 
+search files, youtube and liveleak 
+nos 
+cargo train 
+nederlands 
+najib 
+netwerk 
+edoardo bennato 
+charles 
+dawkins 
+big 
+amateur 
+anal 
+feist 
+playboy 
+domplayer 
+hd 
+rape 
+bond 
+dom player 
+axxo player 
+va house 
+dana 
+breasts 
+pregnant 
+dog 
+kitchen 
+public 
+breasts 
+massage 
+dorm 
+sister 
+ridley walker 
+massage 
+search files, youtube and liveleak 
+a 
+babysitter 
+a 
+x 
+remixland 
+remix 
+the dark is rising 
+usenext 
+tribler 
+andreas dorau 
+c64 
+search files, youtube and liveleak 
+idol 2007 
+kaspersky 7 
+search files, youtube and liveleak 
+preteen 
+lolita 
+dylan 
+kaspersky 7 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+christofer hiding 
+christoffer hiding 
+christoffer idol 
+master chief 
+chris daughtry 
+master chief sucks 
+http://tracker.prq.to/announce 
+rtatouille 
+ratatouille german 
+ratatouille german 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+live nterior 3d 
+alias 
+supernatural 
+lost 
+lost ita 
+lost 
+resident evil ita 
+ita 
+ventures 
+ventures 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tom tom 6 
+surf's up 
+robyn 
+colin mc rae dirt 
+colin mc rae dirt 
+sicko 
+search files, youtube and liveleak 
+pirates of the carribean 
+aria 
+porno 
+hey there delilah 
+funk 
+jeff dunham 
+jethro 
+elisa 
+soul 
+skye edwards 
+jazz 
+kispál és a borz 
+maicrosoft encarta 2007 
+search files, youtube and liveleak 
+shrek 
+reggae 
+u-roy 
+u-roy 
+u-roy 
+big youth 
+picture 
+reggae 
+hip hop 
+funk 
+funk 
+nephew 
+funk 
+funk 
+szabadság szerlem 
+jazz funk 
+return to house haunted hill 
+php 
+traktor 
+heroes 
+senza parole 
+heroes s2e5 
+vasco 
+heroes s2 e5 
+lily allen 
+batép 
+batép 
+*.mp3 
+*.mp3 
+shoot 'em up 2007 
+shoot 'em up 
+supra 
+supra 
+gps 
+windows 
+metrowerk 
+popolus 3 
+rail simulator 
+ubuntu 
+linspire 
+php 
+x51r 
+windev 
+windev 
+windev 
+windev 
+windev 
+asus notebook 
+windev 
+windev 
+windev 
+windev 
+windev 
+windev 
+windev 
+pcsoft 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+windows 
+cognos 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+overpowered 
+the office 
+rammstein 
+voyeur 
+girls gone wild 
+apple 
+ashlynn 
+south park 
+search files, youtube and liveleak 
+lost 
+lost tv 
+half life 2 
+bud driver 
+bus driver 
+diggnation 
+diggnation 
+bizottság 
+diggnation 
+topo 
+waszlavik 
+rail 
+home video 
+trance 
+yaoi 
+yaoi 
+yaoi 
+resident evil 
+porn 
+the seeker 
+xxx 
+perfect keylogger 
+levees 
+levees 
+when the levees broke 
+when the levees broke requiem four parts 
+senza parole 
+senza parole 
+senza parole vasco rossi 
+vasco rossi 
+vasco rossi 
+mystery 
+privateville 
+private ville 
+big fish 
+jar head 
+jar head 
+bruce springsteen 
+robin van persie 
+senza parole 
+space 1999 
+blake's 7 
+the mighty boosh 
+blood diamonds 
+blood diamonds 
+blood diamonds 
+search files, youtube and liveleak 
+nin 
+moroe 
+strap on 
+baris manco 
+baris manco 
+baris manco 
+baris manco 
+xxx 
+kemal sunal 
+bbw porn 
+porn 
+porn bbw 
+assfuck 
+search files, youtube and liveleak 
+the.magnificent.butcher.(dvd.rip.rus.by.gigon) 
+dario argento 
+the simpson movie 
+suspiria 
+film 
+rude girls 
+filprison break 
+prison break 
+rude girls 
+prison break ita 
+stardust ita 
+running 
+strip 
+bang bros 
+baby got boobs 
+my sisters got friend 
+my sisters hot friend 
+naughty america 
+toon boom 
+rape 
+surf's up 
+surfs up 
+surf's up 
+incest 
+drunk 
+orgy 
+office 
+creampie 
+revenge of the nerds 
+why did i get married 
+handel 
+tremors 
+tug 
+candid 
+vanoza 
+vanoza 
+gray-man 
+xxx 
+karma 
+tutti frutti 
+t bed 
+j holiday 
+chris brown 
+luther vandross 
+blue magic 
+the kingdom 
+hentai 
+gossip girl 
+chris rea 
+half life 
+ufo 
+oktoberfest 
+dfx audio 
+parachutes 
+horse 
+horse 
+dvd burner 
+dvd ripper 
+piss 
+piss 
+jericgo 
+jericho 
+search files, youtube and liveleak 
+snehurka 
+south park 
+futurama 
+cairo 
+hongkong movie 
+mirigy 
+nike 
+caliban 
+arab 
+like it hot 
+arab 
+marwa 
+marwa 
+harry potter goblet 
+harry potter half 
+death note special 
+search files, youtube and liveleak 
+tomtom one 
+house s04 
+the office s04 
+flac 
+jazz 
+warhammer 
+cuore 
+donna 
+stefania sandrelli 
+stefania sandrelli 
+laura antonelli 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+filmes dublados 
+filmes dublados 
+selen 
+wii 
+selen 
+selen 
+selen 
+selen 
+selen 
+sexy luna 
+sexy luna 
+sexy luna 
+wii rockstar 
+cristiano malgiolio 
+cristiano malgiolio 
+wii pal 
+883 
+883 
+883 
+883 
+883 
+selen 
+selen 
+selen 
+selen 
+selen 
+selen 
+selen 
+sexy luna 
+wii pal 
+house s04 
+movie 
+pinnacle 
+pinnacle hun 
+pinnacle 
+lessons 
+lessons 
+lessons 
+lessons 
+daft 
+kraftwerk 
+zagar 
+zagar 
+drawn s04 
+drawn s03 
+house s04 
+tera patrick 
+the office s04 
+office mac 
+naruto shippuuden 31 
+naruto shippuuden 31 ita 
+house 4 4 
+search files, youtube and liveleak 
+scrubs s04 
+scrubs s03 
+scrubs s02 
+scrubs s01 
+scrubs 
+simpsons s19 
+teen 
+teen porn 
+kispál 
+lunch 
+search files, youtube and liveleak 
+ferry corsten junk 
+family guy 
+vista 
+search files, youtube and liveleak 
+die hard 4 
+tiesto 
+resident evil extinction 
+family guy 
+teen 
+resident evil extinction 
+tiesto 
+milf 
+ocean's 13 
+heroes 
+south park 
+family guy 
+funny 
+honda crf230 
+pacman on crack 
+pacman on crack - blood hound gang 
+blood hound gang 
+lordi 
+red dwarf 
+hand 
+gay 
+hogtied 
+kink 
+panties 
+p.o.v. 
+pov 
+voyeur 
+spy 
+babe 
+trainspotting 
+blow 
+oral 
+suck 
+boa 
+john q 
+heros 
+heroes s02 
+naked 
+natural 
+pretty 
+faitless 
+faithless 
+search files, youtube and liveleak 
+surfs up 
+homem aranha 
+homem aranha 
+homem aranha 
+homem aranha 
+homem aranha 
+homem aranha 
+not another teen movie 
+jackass2 
+search files, youtube and liveleak 
+simpsons 
+search files, youtube and liveleak 
+tits 
+tits 
+boobs 
+porn 
+tits 
+arcade fire 
+sex 
+sex 
+games 
+stuff 
+the girl next door 
+the girl next door 
+the girl next door movies 
+the girl next door movies 
+knocked up 
+knocked up 
+girls 
+sesame street 
+mázs 
+ubuntu 
+stafkaarten 
+wilco 
+lynda.com 
+eureka 
+dexter 
+smallville 
+kozure ôkami 
+zion train 
+zion train 
+ufo 
+k ville 
+kiscsillag 
+call of duty 
+search files, youtube and liveleak 
+kiscsillag 
+search files, youtube and liveleak 
+usmle 
+usmle 
+usmle 
+sex 
+girs 
+girls 
+regclean 
+uninstaller 
+http://video.google.com/videoplay?docid=3896964583024194307search files, youtube and liveleak 
+http://video.google.com/videoplay?docid=3896964583024194307search files, youtube and liveleak 
+linkin park 
+donna 
+cemento armato 
+flachau 
+sylvian 
+queensryche 
+sylvian 
+lisa lipps 
+comedy 
+hofi 
+fripp 
+fripp 
+sylvian 
+handjob 
+geoff tate 
+busty dusty 
+loloferrari 
+maxi mount 
+chelsea charms 
+plenty up top 
+big clit 
+triky 
+vanessa del rio 
+buddha bar flac 
+triky 
+triky 
+big clit 
+genesis 
+lolo ferrari 
+anal 
+starcraft 
+starcraft 2 
+requiem 
+massive atack 
+che guevara 
+gib 
+massive atack 
+massive atack 
+300 ita 
+sex 
+battisti 
+chinesepod 
+transcript 
+nella 
+300 ita 
+yuri's 
+heroes 
+chinese 
+l'ultima legione 
+halo 3 
+annácska 
+kelemen anna 
+i fantastici 4 
+anna kelemen 
+méhkirálynÅÂ\91 
+300 
+thai 
+thai 
+michel clayton 
+good night and god luck 
+good night and good luck 
+domino 
+domino ita 
+la spada nella roccia 
+pornó 
+pornó 
+goog night and goog luck 
+porn 
+non aprite quella porta 
+scary movie 
+scary movie ita 
+300 ita 
+molto incinta 
+300 
+300 ita 
+star wars ita 
+star wars 
+search all files 
+search all files 
+search all files 
+sex 
+la spada nella roccia 
+game 
+tankcsapda 
+tankcsapda 
+search files, youtube and liveleak 
+resident evil 
+007 
+apple 
+xxx 
+awr 
+suzuki 
+suzuki 
+suzuki 
+suzuki 
+ipod 
+resident evil 
+ebook 
+monteverdi 
+monteverdi 
+superbad 
+mpl-studios 
+met-art 
+source code 
+squirt 
+squirt 
+modflow 
+ulver 
+ulver 
+landdesktop 
+land desktop 
+xxx 
+sunshine 
+divx ita sunshine 
+007 
+007 casino royal 
+ankara 
+ankara 
+ankara 
+ankara 
+ankara 
+nero 8 
+search files, youtube and liveleak 
+duran duran 
+sylvia saint 
+magyar 
+shakespeare sisters 
+shakespeare sisters 
+shakespeare sisters 
+army of lovers 
+windows 
+ratatouille 
+extinction 
+fallen 
+halo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+heroes 
+matrix 
+armstrong 
+trough it all 
+little big adventure 
+"bill evans" 
+little big adventure 
+iconix 
+iconix 
+uml 
+uml 
+uml 
+the wire 
+black sun empire 
+"bill evans" 
+amateur 
+"bill evans" 
+"bill evans" 
+"bill evans" 
+"bill evans" 
+e-book 
+"bill evans" 
+"charles mingus" 
+mp3 
+system of a down war 
+system of a down 
+system of a down 
+kozmix 
+"charles mingus" 
+hun 
+katie melua 
+szabadon 
+madonna 
+baez 
+hungarian 
+michelle wilde 
+bros 
+inxs 
+inxs 
+tokyo drift 
+trust 
+martin donovan 
+once were warriors 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+karib 
+pirates 
+ida 
+keygen 
+galaxy hitchhikers 
+galaxy hitchikers 
+oasis 
+hitchikers 
+emil rulez 
+steam 
+portal 
+lesbian 
+shemale 
+mature 
+kaiji 
+incest 
+regulate 
+portal 
+portal 
+xxx 
+tranny 
+pacman 
+tranny 
+pacsteam 
+mike oldfield 
+david 
+shemale 
+milf 
+goth 
+portal 
+model 
+gofast 
+paccafe 
+tom waits 
+anal 
+mac 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+teens 
+deep purple 
+lady 
+search files, youtube and liveleak 
+born to run 
+http://tracker.megashara.com:2710/b21eb57047ec49648358693885c15fa0/announce 
+xxx 
+postal2 
+prison break 
+belladonna 
+katsumi 
+prison break s03e05 
+clara 
+nin 
+portal 
+portal 
+nine inch 
+nine inch 
+south park 
+portal 
+south park s11e10 
+handcuffs 
+handcuffs 
+deep purple 
+bondage 
+msp 
+afetishpage 
+girls cuffed 
+south park s11e10 
+jethro 
+paccafe 
+pacda 
+pacsafe 
+crack 
+vista 
+shrek 3 hungarian 
+shrek 3 hun 
+shrek 3 
+playboy 
+playboy 
+playboy 
+portal 
+nero linux 
+heroes linux 
+portal 
+portal 
+portal 
+portal 
+portal 
+portal 
+winamp 
+arsen 
+urine 
+pissing 
+pissing 
+hdtv 
+deep purple 
+colbert 
+deep purple 
+deep purple 
+deep purple 
+sigur heima 
+heroes 
+hey you 
+angel 
+beggars opera 
+squirt 
+clip 
+mike oldfield 
+beggars opera 
+prison break 
+it crowd 
+joes vera 
+jenna jameson 
+come back pearl jam 
+pearl jam 
+come back 
+winning eleven 11 
+winning eleven 11 
+immagine in cornice 
+image in a frame 
+pearl jam 
+immagine in cornice 
+radiohead 
+curvead air 
+ac mega 
+curvead air 
+curvead air 
+curvead air 
+curvead air 
+deep purple 
+deep purple 
+battlestar season 
+barbie il lago dei cigni 
+barbie il lago dei cigni 
+toy story 
+toy story 
+toy story 
+pynk floyd 
+torture 
+pink floyd 
+cum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bmw 
+tiesto 
+led zeppelin 
+rosetta 
+bmw navigation 
+rosetta 
+rosetta 
+metallica 
+slayer 
+slayer 
+matthew garrison 
+barbie 
+barbie ita 
+mercedes navigation 
+die hard 
+david lynch 
+la foresta magica 
+need for speed 
+need for speed pro street 
+akos 
+republic 
+trixie 
+search files, youtube and liveleak 
+jojo mayer 
+lost 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+machine head 
+trixie 
+topanga 
+the silence of the lambs 
+the silence of the lambs 
+tipper 
+republic 
+fodera 
+akos 
+deep purple 
+vad fruttik 
+beatles 
+dan hartman 
+hypnotized 
+mario bart 
+sibelius 
+license to wed 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rolling stones 
+film 2007 ita 
+paris 
+vst 
+britney 
+hd 
+hd 
+doors 
+pogues 
+rocco 
+toto 
+styx 
+rocco 
+strawbs 
+sicko 
+big wet 
+big rack attack 
+big wet 
+big wet 
+big wet 
+asia 
+big rack attack 
+bella 
+rory gallagher 
+vanessa 
+mathilde 
+mathilde 
+draghixa 
+rocco 
+search files, youtube and liveleak 
+moody blues 
+spooky tooth 
+van der graf generator 
+rocco 
+rocco 
+la strategia della lumaca 
+lost 
+adriano celentano film 
+paolo rossi 
+paolo rossi 
+ufo 
+carmelo bene amleto 
+carmelo bene 
+procol harum 
+atomic rooster 
+chitarra classica 
+sandro penna 
+rolling stones 
+ungaretti 
+deep purple 
+cartoni animati 
+led zeppelin 
+kansas 
+catene film amedeo nazzari 
+curvead air 
+catene film amedeo nazzari 
+amedeo nazzari filmi 
+amedeo nazzari filmi 
+curved air 
+barry ryan 
+painful 
+steeleye span 
+blackmores night 
+yes 
+tekken2 iso 
+tekken2 iso 
+king crimson 
+tekken2 iso 
+tekken2 iso 
+tekken2 iso 
+vanilla fudge 
+gentle giant 
+credence clearvater revival 
+credence clearwater revival 
+sasha 
+sasha 
+search files, youtube and liveleak 
+black rain 
+celebrity porn 
+jethro 
+red hot jam 
+bbc 
+jethro 
+jethro 
+jethro 
+jethro 
+doors 
+pink floyd 
+jackas 3 
+stri[tease 
+striptease 
+striptease 
+from hand to mouth 
+hand to mouth 
+tits 
+stockings 
+fuck 
+search all files 
+stockings 
+xxx 
+windows xp sp2 nl 
+you got the key 
+heroes 
+house 
+seems to be the hardest word 
+tera patrick 
+love is the key 
+an inconvenient truth 
+google 
+google 
+google 
+cute 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jetro tull 
+cartoon 
+search files, youtube and liveleak 
+hentai 
+jetro tull 
+jetro tull 
+big.rack 
+jetro tull 
+jetro tull 
+search files, youtube and liveleak 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+jetro tull 
+the beatles 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il destino di un guerriero 
+ferry corsten 
+ferry corsten junk 
+ferry corsten junk 
+gray-man 
+manu chao 
+the angrez 
+il destino di un guerriero 
+mettalica 
+manu chao 
+manu chao 
+manu chao 
+eldest 
+britney spears 
+xxx hd 
+xxx2007 
+american girl in paris 
+jack johnson 
+brook april 
+il gladiatore 
+film ita 
+prison.break 
+prison.break 
+prison break 
+mtv scarred 
+dave gahan paper monster 
+dave gahan papermonster 
+dave gahan paper monster 
+paper monsters 
+south park 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rauch 
+rauch 
+black 
+brook april 
+mature 
+jazz 
+riley mason 
+wacky races 
+nightwish amaranth 
+nightwish 
+nightwish 
+manhunt 
+spongebob 
+lesbian 
+teen 
+shivering isles 
+halo 2 xp 
+shivering isleshivering isles 
+shivering isleshivering isles 
+shivering isle 
+shivering isles 
+breakdance 
+shivering isles 
+teen 
+naked 
+amateur 
+ederbringhausen 
+bromskirchen 
+annika 
+yannik 
+sahra 
+kevin 
+luca 
+sims 
+wild hogs 
+befor 
+all for one 
+ein engel fliegt in die nacht 
+ben 
+glashaus 
+monrose 
+einen stern 
+gangsta 
+the invasion 
+seeker 
+my name is earl 
+terror 
+shoot em u 
+trixie 
+topanga 
+little april 
+teen 
+trixie 
+fela 
+golf 
+search files, youtube and liveleak 
+little april 
+search files, youtube and liveleak 
+dark star 
+dark star one 
+e book 
+mehkiralyno 
+kelemen 
+anna 
+14 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+teen 
+nigerian 
+nigerian 
+beast 
+entrepreneurship 
+arte dokumentation 
+thirakwa 
+dusche 
+umkleide 
+umkleidekabine 
+private 
+pornochic 
+olivia mojica 
+il ruffiano 
+pthc 
+vsti 
+michele strogoff 1975 
+lino banfi 
+business 
+michele strogoff 
+business 
+ita 
+la frontiera del drago 
+nero 
+winrar 
+manifold 
+cmd32 
+dm32 
+32 
+porn 
+nod32 
+dreamgirls 
+beyonce 
+nexus 
+nexus jupiter 
+nexus jupiter 
+supernatural 
+black sluts 
+black 
+culture club 
+sakuraba rampage 
+sakuraba 
+rampage jackson 
+pride 
+abba 
+felecia 
+bbc 
+culture club 
+culture club 
+culture club 
+culture club 
+felecia 
+sonja 
+sonja 
+baez 
+lolita 
+preteen 
+florida teen 
+florida 
+sandra 
+nastia 
+culture club 
+ogniem i mieczem 
+porno 
+sport 
+gina wild 
+stargate 
+stargate 
+stargate atlantis 
+in da club 
+mixmania 
+saw4 
+hitbox 
+brad pitt 
+brainiac 
+brainiac 5 
+alazai 
+2pac 
+avril lavigne 
+phil eagles 
+florida 
+florida teen 
+sweet melonee 
+melonee 
+modelingdvds 
+phil eagles 
+toxicity 
+radiohead in rainbows 
+radiohead in rainbows 
+stan getz 
+preteens 
+supergirl 
+naruto 
+naruto porn 
+porn 
+naughty america 
+naughty america 
+brazzers 
+tricosta 
+axx0 
+colin 
+diamanton 
+diamanden cop 
+diamanden 
+diamanten 
+gimme more 
+prison break 
+prison break s03e05 
+heroes s03e05 
+heroes s02e05 
+heroes s02e05 
+heroes 
+hitman 
+justice - dance 
+justice - dance 
+justice - dance 
+justice - dance 
+teen 
+tottenham 
+horse 
+shrek 3 
+shrek 3 rip 
+heros of might and magic 
+search all files 
+transformers 
+club life 028 
+weeds 
+kyle 
+28 weeks later 
+radiohead ok computer 
+sex 
+radiohead ok computer 
+walt disney 
+walt disney ita 
+walt disnay 
+walt disnay 
+walt disnay 
+walt disnay 
+horror ita 
+horror ita 
+sandra ptsc 
+sandra ptsc 
+ls magazine 
+whores 
+ls magazine 
+black sluts 
+black whores 
+black sex 
+avatar 
+avatar 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+30 rock 
+arrested development 
+crime and punishment 
+avena lee 
+shaving 
+shaves 
+shaved 
+japanese 
+asian 
+japanese 
+shaving 
+shaving 
+shaving 
+shaving 
+exam 
+lamb of god 
+psp 
+yello 
+belew 
+belew 
+king crimson 
+bob marley 
+bob marley 
+bob marley 
+bob marley 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+californication 
+bourne 
+nude 
+house 
+the shining 
+scrubs 
+fiatal 
+csisztu 
+csisztux 
+magyar 
+bang bros 
+poltergeist 
+dawn of the dead 
+scrubs 
+achool 
+alchool 
+40 inches plus 
+big tits round asses 
+btra 
+young 
+college fuck fest 
+milf 
+alcohol 
+halloween 
+halloween 
+return of michael myers 
+muse 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+girls 
+girls 
+the evil dead 
+bmw 
+search files, youtube and liveleak 
+mom 
+jethro 
+vivid 
+sasha grey 
+daemon tools 
+gloria guida 
+gloria guida 
+gloria guida 
+teagan presley 
+gloria guida 
+teagan 
+bigfish 
+blowjob 
+curb 
+daemon 
+daemon 
+avere vent'anni 
+film 
+film ita 
+transformer il film ita 
+transformer ita 
+film ita 
+transformer ita 
+heroes ita 
+heroes stagione2 ita 
+heroes stagione 2 ita 
+heroes stagione 1 ita 
+heroes stagione completa ita 
+heroes stagione completa 
+transformer ita 
+prison break 3 
+prison break 3 sub ita 
+prison break 3 ita 
+prison break 3 sub ita 
+prison break 3 completa 
+prison break 3 
+search files, youtube and liveleak 
+zagar 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+zagar 
+zagar 
+chris cunningham 
+grand funk 
+* 
+imogen heap tiesto hide and seek 
+* 
+tiesto 
+codecharge studio 3 
+sanskrit 
+sanskrit 
+sanskrit 
+sanskrit 
+sanskrit 
+sanskrit 
+vedic 
+vedic 
+vedic 
+diventerai una star 
+umbrella 
+finley 
+queen 
+queen greatest hits 
+kitt 
+all night long 
+lionel riche 
+k.i.t.t. 
+k.i.t.t. 
+kitt 
+love today 
+love today 
+guns and roses 
+saturnino 
+hide and seek 
+elect the dead 
+kitt 
+touran 
+finding beauty in negative spaces 
+fifa crack 
+imogen heap 
+search files, youtube and liveleak 
+ruzsa magdi 
+prison break 
+ruzsú 
+rúzsa 
+visual webdev 
+import-export 
+search files, youtube and liveleak 
+könig der wellen 
+könig der wellen 
+die vorahnung 
+guitar pro key 
+a href='http://torrentz.ws/torrent/156966/hokkabaz-2006-dvd-rip-xvid-61' title='hokkabaz (2006) dvd-rip xvid-61 torrent ownload'>hokkabaz (2006) dvd-rip xvid-61</a> 
+die alten rittersleut 
+a href='http://torrentz.ws/torrent/156966/hokkabaz-2006-dvd-rip-xvid-61' title='hokkabaz (2006) dvd-rip xvid-61 torrent ownload'>hokkabaz (2006) dvd-rip xvid-61</a> 
+search files, youtube and liveleak 
+andrew sisters 
+divx heroes ita 
+heroes ita 
+heroes ita 1x01 
+die blechbüchsenarmee 
+die blechbüchsenarmee 
+die blechbüchsen 
+king arthur 
+search files, youtube and liveleak 
+barbarella 
+heroes ita 1x0 
+heroes ita 
+lecce 
+dialetto leccese 
+pirates 
+tette 
+troy 
+troy hans zimmer 
+troy 
+kalifornia 
+troy 
+troja 
+teen 
+sesso 
+spogliarello 
+spogliarello 
+ratatouille ita 
+streep tease 
+streep tease 
+streep tease 
+streep tease 
+search all files 
+ray charles 
+good 
+andrew sisters 
+andrew sisters 
+nella 
+nella redtube 
+kings singers 
+boroka balls 
+search files, youtube and liveleak 
+kisvakond 
+resident evil 
+manhunt 
+gorillaz 
+andrew sisters 
+manhunt 
+halo 
+resident evil 
+reon 
+lili carati 
+canaglie 
+al volante 
+al volante 
+ford 
+una ford 
+una ford 
+al volante di una ford 
+succo d'arancia 
+canaglie 
+prison 
+sigla 
+sigla 
+sigla 
+house ita 
+monkey island 
+legend of kyrandia 3 
+legend of kyrandia 
+space quest 
+kelly brook 
+lord of the rings 
+axxo 
+pirates of the caribean world's end 
+pirates of the caribbean world's end 
+jennifer lopez 
+jennifer lopez 
+jennifer lopez 
+picture ace 
+search files, youtube and liveleak 
+scooter 
+nsked 
+naked 
+scooter 
+scooter 
+ita 
+nl 
+high school musical 2 
+high school musical 2 ita 
+monrose 
+monrose 
+jimi blue 
+alex c 
+du hast den geilsten arsch der welt 
+geilster arsch der welt 
+sexy 
+alex c. 
+striptease 
+strip tease 
+playboy 
+another world 
+bachman 
+planet terror 
+schönster arsch der welt 
+schönsten arsch der welt 
+sha 
+evan almighty scr avi 
+the question is what is the question 
+evan almighty full 
+monrose 
+american gangster 
+any dvd hd 
+search all files 
+open season 
+napoleon hill 
+landa 
+sims2 
+sims2 
+sims2 
+2007 
+2007 
+search files, youtube and liveleak 
+mrbigdickshotchicks 
+domination zone 
+phantom limb 
+my first black monster cock 
+wehrmacht 
+beckett 
+public 
+gay 
+gay 
+folsom 
+jimi blue 
+stardust 
+search files, youtube and liveleak 
+rtl klub 
+serial mom 
+acrobat 
+acrobat 8 ita 
+shower 
+porntube 
+porntube 
+porntube 
+justin dance 
+monrose 
+radiohead 
+tool 
+the thing 
+gorillaz 
+justice dance 
+the thing game 
+dopeman 
+hitman 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dopeman 
+top gun 
+norton 
+kamasutra 
+s 
+a 
+mai thuli bhaichu re 
+blowjob 
+von löwen und lämmern 
+ndour 
+n'doure 
+worldmusic 
+worldmusic 
+world music 
+world music 
+arabic 
+arabic 
+limp bizkit 
+gabriel 
+billy talent 
+peter gabriel 
+prison break 
+game 
+transformers 
+simpsons 
+anneke grohnlo 
+ben x 
+anneke gronloh 
+ben x 
+plop 
+freddy got fingered 
+freddy finger 
+girl next door 
+girl next door 
+battlefield 
+battlefield 
+home alone 
+brave one 
+encarta 2008 
+vier minuten 
+resident evil 
+painted viel 
+knocked up 
+encarta 
+evening 
+georgia rule 
+office 
+office 2003 
+grandma's boy 
+bleach 
+hors de prix 
+dvd 
+horror 
+horror 
+horror 
+horror 
+horror 
+horror 
+horror 
+grindhouse 
+spiderman 
+next 
+porn 
+porn 
+porn 
+porn 
+adobe after effects 
+adobe a 
+adobe 
+adobe 
+2007 
+adobe 
+adobe after 
+black porn 
+nero 
+godfather 
+black porn 
+perfume 
+porn 
+porn 
+jessica rizzo 
+porn 
+porn 
+50 cent 
+american gangster 
+sex 
+house m.d. 
+house 
+musica araba 
+araba 
+canzoni arabe 
+heroes 
+heroes 1e18 
+heroes 18 
+heroes 18 ita 
+heroes 18 fft 
+big tit 
+heroes 1 18 
+heroes 1 18 ita 
+xxx 
+cum tit 
+heroes 1 18 ita 
+heroes 1 18 fft 
+musica danza del ventre 
+heroes 1 18 fft 
+justice dance 
+joggs 
+big bow 
+big tits 
+heroes 
+heroes ita 
+heroes 18 ita 
+eastern 
+gay 
+eating out 
+naruto 60 episode dubbed english 
+naruto episode 60 (3/3) english dubbed 
+silver surfer 
+rape 
+naruto episode 60 (3/3) english dubbed 
+naruto episode 60 english dubbed 
+naruto episode 61 english dubbed 
+naruto episode 61 english dubbed 
+siberia 
+syberia 
+nederlands 
+dutch 
+50cent 
+50cent 
+50cent 
+eating out 
+cubase 
+hamam 
+the hamam 
+dont taze me 
+gay 
+tom & jerry 
+linkin park 
+invasion 
+imogen heap 
+sopranos 
+sopranos season 1 
+sopranos 
+porn 
+girl masturbating 
+movie 
+into the wild 
+into the wild 
+home alone 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+opeth 
+david gilmour 
+skateboarding 
+porn 
+tia tanaka 
+ass parade 
+opeth 
+circa survive 
+john mayer-kid a 
+kid a 
+ok computer 
+kinski 
+kinski 
+kinski 
+kinski 
+kinski 
+kinski 
+pocketpc 
+freedom project 
+family guy s06 
+nannie diaries 
+nanny diaries 
+diamond 
+mighty heart 
+import/export 
+import/export 
+operation: kingdom 
+the kingdom deutsch 
+the kingdom ger 
+the kingdom 2007 
+the kingdom 2007 ger 
+keira knightley 
+the kingdom 2007 
+enttarnt 
+abbyy 
+abbyy 
+abbyy corporate 
+abbyy corporate 
+abbyy 
+iraq 
+radio 
+raygold 
+ra@gold 
+stereo mcs 
+porn 
+dp anal 
+crow 
+crow angelica 
+angelica 
+crow 
+crow 
+crow 
+crow 
+trailer 
+rambo trailer 
+vidcast 
+search files, youtube and liveleak 
+itaù 
+ita 
+űrbalekok 
+teens 
+young 
+the dark is rising 
+house ita 
+traveler 
+heros 
+atb 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+puke 
+vomut 
+vomit 
+scat 
+thugsjob 
+babysit 
+thugsjob 
+thugs job 
+thugs 
+il gladiatore in italiano 
+il gladiatore 
+il gladiatore in dvd 
+film il gladiatore 
+kodomo 
+matrix 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+leaf 
+aaliyha 
+aaliyah 
+rapsodia romana 
+search files, youtube and liveleak 
+sergio 
+frans bauer 
+u2 
+sergio mendes timeless 
+sergio mendes timeless 
+sergio mendes 
+search files, youtube and liveleak 
+xxx 
+search files, youtube and liveleak 
+program 
+xxx 
+video 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nero 
+cum facial 
+cum compilation 
+bicho de sete cabeças 
+gay 
+gay 
+gay 
+gay 
+gay 
+diggnation 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+blackadder 
+matrix 
+matrix ita 
+ratatuille 
+ratatouile 
+ratatouile 
+ratatouile 
+ratatouile 
+kent 
+kent ingenting 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+blackadder season 2 episode 2 
+grindhouse 
+ed edd eddy 
+grindhouse 
+ed edd eddy ita 
+sternwanderer 
\83Ù\83Å\81ãÅ\9fã\83\8bãĵãÄ°ã³ã\83\89 
\83ãÂ\83�Â\83Â\94ãĵã\83\9bãĵã³ã\83γÂ\83�Â\83�Ä\9fã\83\91ãÄ´ãĵã\83\89 
+chuck und larry 
+grand history of the realms 
+ghostrider 
+orc king 
+catia crack 
+catia 
+heroes 
+home of the brave ita 
+baabrika 
+super demo 
+super 
+timeattack 
+time attack 
+jorma 
+jorma kaukonen 
+neuromarketing: is there a 'buy button' in the brain? selling to the old brain for instant success 
+neuromarketing 
+sleepy kitten 
+simpson 
+kari tapio 
+kari tapio 
+kari tapio ja ville valo 
+kari tapio 
+simpson 
+simpson 
+simpson 
+simpson 
+kari tapio ville valo 
+pohjantähden 
+kari tapio 
+sex 
+skirts 
+search files, youtube and liveleak 
+shakatak 
+nero 
+ndubz 
+ndubz 
+n dubz 
+lady chatterly 
+lady chatterly 
+lady chaterly 
+lady chatterly 
+naruto 
+naruto 
+naruto 
+naruto 
+naruto 
+brigitta 
+naruto 
+hentai 
+regueton 
+" fast " furiust 
+regueton 
+naruto 
+2fast 2 furiust 
+2 fast 2 furiust 
+emulatore ps2 
+sto para 5 
+the pursuit of happyness 
+the pursuit of happyness 
+exdesi 
+johny gaddar 
+windvd 
+vanaja 
+lady chatterley 
+robin hood connery 
+robin hood 
+robin hood praed 
+film ita 
+billboard 
+heros 5 
+heroes 5 
+eating out 
+heroes s2e5 
+heroes s2e2 
+spiderman 3 ita 
+heroes s2e2 
+heroes 
+mina 
+daily show 
+colbert 
+colbert.report.10.22 
+twilight zone 
+i simpsons 
+i simpsons ita 
+i simpsons 
+history channel 
+traveler 
+shoot 'em up (2007) dvdrip xvid.avisearch files, youtube and liveleak 
+shoot 'em up 
+8mm 
+agnes18 
+arab 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sixth sense 
+resident evil 
+ladies tailor 
+ho voglia di te 
+ho voglia di te 
+metallica 
+300 ita 
+albano 
+cinema 
+rock it 
+search files, youtube and liveleak 
+rihanna 
+mal enseñado 
+leo mattioli 
+leo mattioli 
+future weapons 
+carolina 
+reggeto 
+reggeton 
+pearl jam verona 
+ojalai 
+jesica 
+jesica 
+jesica 
+mal enseñado 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nedved 
+search files, youtube and liveleak 
+nedved 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+south park 
+numbers 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+muppets 
+muppet 
+300 
+2 fast 2 furius 
+boob 
+search files, youtube and liveleak 
+beatles 
+harrison 
+antivirus 
+greg trooper 
+color climax 
+arcade fire 
+ponderland 
+californication 
+webcam 
+nokia 
+balls of fury 
+baby when the light 
+search files, youtube and liveleak 
+3d 
+mature 
+crank that 
+rocky lv 
+utility 
+apologize 
+rocky 4 
+west stronger 
+rocky the film 
+*.iso 
+incest 
+*.iso 
+ratatouille 
+madonna 
+transformers 
+nude 
+saw lv 
+saw 4 
+seed of chucky 
+pre teen feet 
+dave berry 
+childs play 
+ros stewart 
+rod stewart 
+saw 
+snuffx 
+balls of fury 
+snuffx sex 
+snuffx 
+lost 
+lost 
+beppie kortekaas 
+zwartboek 
+paul de leeuw 
+paul de leeuw 
+paul de leeuw 
+buddah bar 
+buddha bar 
+saw 4 
+pedo 
+britney spears - gimme more remix 
+hou 
+regrow hair 
+mp3 
+pearl 
+sex 2007 
+pearl 
+pearl 
+xxx 2007 
+orgy 2007 
+plain white 
+james blunt 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+buddha bar 
+musica orientale 
+orientale 
+nicchigni 
+genere 
+search files, youtube and liveleak 
+resident evil apocalypse 
+offspring 
+traveler 
+spice girls 
+take that 
+mac osx 
+mac osx 
+mac osx 
+madness 
+mac osx 
+mac osx 
+mac osx 
+madness 
+search files, youtube and liveleak 
+the police 
+the police 
+the police 
+the police 
+the police 
+the police 
+the police 
+the police 
+moltoincinta 
+xvid 
+mooltoincinta 
+moltoincinta 
+enrique iglesias 
+molto incinta 
+six feet under 
+six feet under 2 
+mac osx 
+mac osx 
+tribler 
+daft punk 
+cold duck complex 
+ahmad 
+xxx 
+pope town 
+smokin 
+plump djs 
+plump djs 
+plump djs 
+dj icey 
+plump djs 
+search files, youtube and liveleak 
+breakdance techno 
+black holes 
+sailing 
+musa 
+prison break 
+jorge falconcantante 
+jorge falconcantante 
+jorge falconcantante 
+frisky dingo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mac 
+quantum 
+coheed 
+heroes 
+magic tricks 
+jenna jamison 
+search files, youtube and liveleak 
+teen topanga 
+girls gone wild 
+kaiji 
+aika 
+little non 
+power of balance 
+1984 
+burst city 
+hikage 
+star club 
+knyacki 
+knyacki 
+sandy mp3 
+sandy 
+partition magic 
+billboard 
+thomas the train 
+.avi 
+alex jones 
+wiso 
+internet tv 
+dj 
+woodman 
+trailer 
+trailer 
+colbert 
+search files, youtube and liveleak 
+jessica ross 
+anal 
+pregnant 
+lattex 
+jessica ross 
+elena grimaldi 
+sonia eyes 
+rapes 
+spiderman 
+top 100 
+james blunt 
+andrew blake 
+bizzare 
+bizzare 
+dildo 
+toys 
+hungarian 
+magyar 
+hungary 
+giant 
+xxx 
+anna 
+pink prison 
+prison 
+pov 
+hard 
+bdsm 
+lesbian 
+fist 
+metallica 
+metallica 
+metallica 
+chamical brothers 
+end of all hope 
+end of all hope 
+end of all hope 
+end of all hope 
+end of all hope 
+nederlands dutch 
+dutch 
+search files, youtube and liveleak 
+dutch 
+dutch 
+dutch 
+nero 8 keygen. 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+lil wayne 
+nero 8 keygen. 
+nero 8 keygen. 
+nero 8 keygen. 
+birdman 
+nero 8 
+birdman 
+birdman 
+fisting 
+fisting 
+fisting 
+virtually 
+search files, youtube and liveleak 
+devil woman 
+painkiller 
+painkiller 
+ricky gervais 
+painkiller 
+painkiller 
+language tribler 
+boogerd 
+asimo 
+elvis presley 
+wilson pickett 
+wilson pickett 
+wilson pickett 
+charlie rich 
+charlie rich 
+john fogerty 
+john fogerty 
+john fogerty 
+john fogerty 
+john fogerty 
+john fogerty 
+lesbian 
+charlie rich 
+jerry reed 
+jerry reed 
+eagles 
+bob seger 
+bob seger 
+feet lover's 
+feet 
+mikayla miles 
+mikayla miles 
+mikayla miles 
+shemale 
+heroes ita 19 
+heroes ita 
+gay 
+bi 
+femjoy 
+memory 
+nl subs 300 
+http://www.youtube.com/watch?v=r-66cuatjmq 
+http://www.youtube.com/watch?v=r-66cuatjmq 
+ti dumaisch schto ja durak 
+ti dumaisch schto ja durak 
+ti dumaisch schto ja durak 
+malcolm in the middle 
+malcolm in the middle 
+signore anelli 
+prison break s3e1 
+prison break s3e2 
+jiban 
+prison break s3e1 
+jiban 
+angel dark 
+angeldark 
+prison break s3e3 
+angel dark 
+dark 
+teagan 
+prison break s3e3 
+sasha grey 
+vivid 
+the haunted 
+vivid 
+friends 
+jesse jane 
+keutenberg 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cum 
+search files, youtube and liveleak 
+gregorian 
+timberland 
+the who 
+take that 
+tears for fears 
+adam ant 
+housemartins 
+gaisha 
+gregorian 
+geisha 
+neverne bebe 
+arrested development 
+101 80s hits 
+what goes around 
+what goes around 
+simple 
+incest 
+search files, youtube and liveleak 
+momo 
+madonna 
+saw 4 
+saw 4 
+exdesi 
+search files, youtube and liveleak 
+ita 
+ita catwoman 
+divx ita 
+pooh 
+superbad 
+la donna del mio amico 
+superbad 
+il caimano 
+rocky alboa 
+tikaran 
+la donna del mio amico 
+la donna del mio amico 
+la donna del mio amico 
+la donna del mio amico 
+cash 
+search files, youtube and liveleak 
+babysitters 
+sachin deb burman 
+tribler 
+tanita 
+babysitters 
+denver 
+babysitters teaches lolita 
+lolita 
+the oc 
+pes 6 
+natale a new york 
+al di là di tutti i limiti 
+lolita 
+chappelle 
+whose garden is this 
+whose garden is this 
+whose garden is this 
+whose garden is this 
+tribler 
+porn 
+carnhy 
+carny 
+full game 
+blades of glory 
+family guy 5 
+family guy 5 
+family guy 5 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+guns and roses 
+rob zombie 
+shoot em up 
+rob zombie 
+rob zombie live 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+chad baker 
+chat baker 
+yr 
+lolita 
+search files, youtube and liveleak 
+lolita 
+ita 
+south park ita 
+lolita 
+lolita 
+homemade 
+amateur 
+south park ita 
+elephant man 
+otakus 
+beenie man 
+search files, youtube and liveleak 
+ita 2007 
+airpcap 
+airpcap 
+albanien 
+dr. house 
+house.s04 
+house 
+airpcap 
+mario barth 
+james blunt 
+bruce springsteen 
+nightwish 
+ratpack 
+monrose 
+coda 1.0.5 
+mark knopfler 
+rihanna 
+annett louisan 
+annie lennox 
+soundtrack 
+ich + ich 
+bob dylan 
+nelly furtado 
+timbaland 
+david gilmour 
+linkin park 
+jennifer lopez 
+mika 
+benedetti 
+the protector 
+enzo greggio 
+rammstein 
+rostenstolz 
+rosenstolz 
+reinhard mey 
+reinhard may 
+the housemartins 
+katie melua 
+artic monkeys 
+artic monkeys 
+laith al-deen 
+artic monkeys 
+laith al deen 
+artic monkeys 
+vertical horizon 
+simpsons 
+vertical horizon 
+soundtrack 
+roxio 
+nero 
+nl 
+nek 
+giochi w300 
+jar for w300 
+jar for w300 
+w300 
+mandriva 
+suse 
+driver magic 
+simona ventura 
+simona ventura 
+simona ventura 
+google 
+driver magic 
+privat 
+privat 
+driver magic 
+nackt 
+blonde raped 
+driver magic 
+nackt muschi 
+frau 
+led zeppelin 
+vrt disk 
+i simpson il film 
+auto 
+k 2000 
+haite 
+ginger lynn 
+nackt 
+hey you pink floyd 
+roxio 
+sex 
+nackt 
+sex 
+homepage 
+altenkessel 
+hey you 
+nashville 
+gif 
+mac 
+kool and the gang 
+my sims 
+kool and the gang 
+richy martin 
+gigi d'alessio 
+thomson twins 
+neverne bebe 
+heroes 
+italy tatangelo 
+heroes 1x18 ita 
+heroes 1 18 ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+big tit 
+big tit 
+heroes 
+xxx tit 
+heroes 
+office 2007 
+mark ronson 
+pink 
+lemon popcycle 
+lemon popsicle 
+pink 
+pink 
+oceans 12 
+home made gangbang 
+gangbang 
+gangbang 
+worlds deadlest catch 
+gangbang 
+gangbang 
+gangbang 
+gangbang 
+kameleon 
+kameleon schippers 
+nl subs 
+exagerado 
+lolita 
+exagerado - cazuza 
+2005 
+animusic 
+masturbate 
+madonna 
+the bourne ultimatum 
+small jackets 
+teendreams 
+capitan harlock 
+capitan harlock 
+capitan harlock 
+capitan harlock 
+capitan harlock 
+i simpsons.italia 
+simpsons.italia 
+simpsons 
+valentino rossi 
+valentinorossi 
+catacomb 
+film.italia 
+film italia 
+film italia 
+film italia 
+film italia 
+mac os x leopard 
+search files, youtube and liveleak 
+love hina 
+resident evil 
+jennefer lopez 
+j. lopez 
+lopez 
+friends hot mom 
+naughty america 
+big boobs 
+big tits 
+psp 
+fuck tit 
+teachers pet 
+gay 
+gay sex 
+gay hard 
+gay sex movies 
+avatar 
+bending battle 
+movies 
+porn movies 
+gay sex 
+mago de oz 
+ita 2007 spiderman 3 
+spiderman 3 ita 
+cartoni animati 
+cartoni animati 
+camera cafè 
+camera cafè 
+ita 
+camera ita 
+south park ita 
+tropa de elite 
+avatar 
+gay anal 
+two for the money 
+recoil sub ambiente 
+recoil sub ambient 
+recoil sub 
+die hard 
+brother bear 
+brother bear 2 
+uniblue 
+uniblue 
+uniblue 
+weeds 
+hellsing 
+hellsing 
+hellsing 
+sex 
+bookworm 
+bookworm bitches 
+bookworm bitches 
+bookworm bitches 
+bookworm 
+office 
+30 rock 
+kid nation 
+interracial 
+kid nation 5 
+teresa dziuk 
+teen 
+modest mouse 
+search files, youtube and liveleak 
+beatles 
+the pigeon detectives 
+momsonfilm 
+banged 
+the pigeon detectives 
+ally mcbeal 
+barton fink 
+blu notte 
+no country for old men 
+no country for old men 
+no country for old men 
+no country for old men 
+no country for old men 
+beatles 
+beatles 
+beatles 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+black cheerleader search 
+preteen 
+preteen 
+rammstein 
+megaherz 
+nazis 
+search files, youtube and liveleak 
+weißwurscht 
+weißwurscht 
+weißwurscht 
+beatles 
+*ffice 
+ffice 
+wurscht 
+halloween 
+sexy 
+busty 
+big tits 
+boobs 
+fuck 
+titty 
+titty fuck 
+homemade 
+homemade porn 
+porn 
+porn 
+porn 
+porn 
+porn 
+search files, youtube and liveleak 
+* 
+kaena 
+internet tv 
+chemical brothers 
+pavarotti 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+search files, youtube and liveleak 
+renato zero 
+rem 
+pirates of caribbean 
+kaylani lei 
+sabrine maui 
+compagni di scuola 
+prison break 
+ocean's 13 
+ocean's 13 
+ocean's 13 
+beyonce 
+search files, youtube and liveleak 
+video kings 
+brazzers 
+feist 
+* 
+pirates of caribbean 
+crazy frog 
+iceking 
+renedepaula 
+leopard 
+back4win 
+fifa 08 crack 
+maryha carey 
+xp fast 
+fast 
+phil collins 
+mathieu 
+matthieu 
+michael jackson 
+masturbating 
+pussy 
+danni ashe 
+alison angel 
+alison angel 
+alison angel 
+alison angel 
+orgy 
+gordon haskell 
+gordon haskell 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+movi hun 
+movi 
+movi 
+movi 
+resident evil 
+resident evil extinction 
+kaede 
+duncan dhu 
+search files, youtube and liveleak 
+saturday night live 
+saturday night live 
+friday 
+paul mcartney 
+artic monkeys 
+artic monkeys 
+artic monkeys 
+n dubz 
+mango 
+windvd 
+documentary 
+black cat white cat 
+live and let die 
+brazzer 
+brazzer 
+black cat white cat 
+mango 
+mango 
+mango 
+enola gay 
+search all files 
+search all files 
+mango 
+underground 
+underground 
+ti amo cosi 
+ti amo cosi 
+ti amo cosi 
+telugu 
+mango 
+mango 
+mango ti amo cosi' 
+mango ti amo cosi' 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tits 
+tupa 
+tupac 
+weeds 
+underground 
+girls aloud 
+dschingiskhan 
+voyeur - hidden camera 
+lions men 
+220 kg bankdrücken 
+bankdrücken 
+rambo 4 trailer 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+genesis 
+genesis 
+sting 
+still alive 
+dan le sac 
+wrestling 
+latex 
+desperate housewifes 
+search all files 
+desperate housewifes 
+desperate housewifes 
+desperate housewifes 
+desperate housewifes 
+progdvb 
+os x 
+muse 
+produkt key windows xp 
+produkt key windows xp 
+norton ghost 
+my friends hot mom 
+pass the marijuana 
+tits 
+bad brains 
+btra 
+horse 
+horse 
+dog 
+elton 
+portal 
+lost 
+wild 
+girls 
+mp3 
+shakira 
+shak 
+u2 
+tv internet 
+bitter forces and lame race horses 
+cs3 
+elo 
+elooffice 
+elo office 
+wiso börse 
+harry potter 
+microsoft office 2007 
+hörbücher 
+schuld und sühne 
+donna leon 
+lluis llach 
+adobe 
+adobe acrobat 
+lluis llach 
+italiensich 
+latein 
+ragtime 
+microsoft office 
+italienisch 
+hörbuch 
+adobe 
+adobe acrobat 
+lluis llach 
+saegusa 
+saegusa 
+tropa de elite 
+search files, youtube and liveleak 
+teen 
+allamvizsga 
+harry potter und der 
+donna leon 
+elo 
+elo digital 
+elo office 
+microsoft office 
+microsoft 
+cecilia bartholy 
+adobe acrobat 
+adobe photoshoü 
+adobe photoshop 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ratatouille xvid 
+the alamo 
+the alamo 
+the alamo 
+purple turtles 
+pedofilian 
+naturist 
+naturist 
+naturist 
+naturist 
+naturist 
+dancing 
+dancing 
+dancing 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+holynature 
+arctic tale 
+2007 
+2007 
+2007 
+2007 
+funny 
+puppy 
+dog 
+eating 
+dragon 
+simpsons 
+simpsons 
+jesus 
+a 
+2007 
+2007 
+1408 
+lesbian 
+interkey 
+rejected cartoons 
+groove 
+go 
+rejected cartoons 
+rejected cartoons 
+teen 
+lolita 
+pedo 
+preteen 
+logos 
+libronix 
+libronix 
+hebrew 
+csi 
+csi season 8 episode 3 
+csi season 8 
+csi season 8 
+csi 
+greek 
+biblical 
+bart 
+nusamamp 
+nusammap 
+nusammap 
+interkey 
+southpark 
+thumb 
+emperor's new groove 
+tengo thumb 
+tengo 
+tengo 
+tengo 
+tengo thumb 
+11 
+ratta 
+rattatouille 
+xmc 
+mxc 
+deadliest catch 
+pthc 
+scrubs 
+rammstein 
+scrubs 
+mashing pumpkins 
+ÅÂ\9bwiry day 
+mashing pumpkins halloween 
+http://www.liveleak.com/view?i=d4e_1189170009 
+http://www.liveleak.com/view?i=d4e_1189170009 
+mashing pumpkins hallowe'en 
+grandma 
+mashing pumpkins hallowe'en mashup 
+paul potts 
+outkast 
+psycodrama 
+psychodrama 
+psych odrama 
+psycho drama 
+trillian password recovery 
+instant messenger password recovery 
+how to recover trillian passwords 
+trillian 
+ita 
+spazio 1999 
+radiohead 
+radiohead 
+radiohead 
+spazio1999 
+sms sotto mentite spoglie 
+michael clayton 
+michael clayton ita 
+stardust 
+stardust ita 
+resident evil extinction 
+resident evil extinction ita 
+resident evil ita 
+quel treno per yuma ita 
+un'impresa da dio 
+un impresa da dio 
+hairspray - grasso è bello 
+hairspray - grasso è bello 
+hairspray - grasso è bello 
+hairspray grasso è bello 
+hairspray 
+hairspray ita 
+invasion ita 
+sex 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+walpapers 
+program 
+radiohead 
+game 
+radiohead live 
+radiohead live 
+stereophonics you gotta go there to come back 
+stereophonics 
+placebo 
+the spinto band 
+radiohead live 
+belle sebastian 
+kings of leon live 
+kings of leon 
+lma manager 
+trillian 
+scarface 
+dipset 
+starcraft 
+radiohead in rainbows 
+radiohead in rainbows 
+dj who kid 
+coldplay 
+coldplay 
+coldplay 
+coldplay 
+coldplay 
+10000 days 
+10,000 days 
+stereophonics 
+mars volta 
+mars volta frances the mute 
+mars volta amputechture 
+mars volta live 
+mars volta ep 
+mars volta live 
+omar rodriguez 
+se dice bisonte no bufalo 
+mars volta deloused 
+mars volta comatorium 
+stereophonics 
+oasis 
+the deadly snakes 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+football manger 2008 
+football mangaer 2008 
+football manager 2008 
+lma manager 2008 
+lma manager 
+meet the twins 
+where the boys 
+all 
+footballl manager 2008 
+ja so warn's 
+utube 
+utube 
+utube 
+utube 
+beck 
+720p 
+wmv 
+wmv 720 
+footballl manager 2008 
+rattatoi 
+ratatouille 
+ratatouille[2007]dvdrip[eng]-axxo 
+in the army now 
+ratatouille-axxo 
+ratatouille swe 
+missoni 
+tits 
+footballl manager 2008 
+in the army now status quo 
+sofa 
+breast 
+xbox 360 
+xbox 360 
+figa 
+.iso 
+tittyland 
+ass 
+uk sluts 
+uk amature 
+uk amateur 
+hot fuzz 720p 
+hot fuzz 
+slip 
+hot fuzz dvdr 
+pompini 
+bocchini 
+bryan adams 
+perizoma 
+roberta missoni 
+bad manners 
+transformers 720 
+spartiti chitarra classica 
+dvd ita 
+dvd divx ita 
+eva 
+eva enger 
+eva 
+programmi mac 
+eva enger 
+luana 
+boobs 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+sex 
+niples 
+carol 
+carol gold 
+starch 
+oil 
+oil 
+clipfischer 
+clipfisher 
+clipfisher 
+ratatouille 
+ratatouille 
+ratatouille swe 
+ratatouille dvd 
+ratatouille dvdr 
+search files, youtube and liveleak 
+x 
+pinat 
+pinay 
+wmv 720 
+transformers 720 
+tribler 
+boston tea 
+boston tea 
+boston 
+boston tea 
+subsmission 
+submission 
+theo van gogh 
+gone baby gone 
+raffaello 
+raffaello 
+"prison break" 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+the streets 
+a man comes around 
+its too late 
+the streets 
+the sims 
+the streets 
+mars volta de loused 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mars volta amputechture 
+mars volta live 
+mars volta ballroom 
+monrose 
+dj size 
+mars volta 
+blur 
+oasis live 
+oasis live knebworth 
+oasis 
+perfect circle 
+oasis 
+oasis knebworth 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+omar rodriguez 
+mars volta de loused 
+artic monkeys 
+vollidiot 
+highlander 
+eminem 
+eminem 
+eminem 
+fütisent 
+fütisent 
+fütisent 
+fütisent 
+fuftisänt 
+eminäm 
+oasis live 
+funny 
+funny you tube 
+you tube 
+modest mouse 
+porno 
+bagno 
+hitman 2 
+curtis 
+video 
+curtis 
+n dubz 
+bionda 
+blonde 
+lesbian 
+pigon detectives 
+pijpen 
+lma manager 
+lul 
+bloot 
+football manager 
+world racing 2 
+lma managr 
+lma manager 
+world racing 
+world racing 
+fate/stay night 
+fate/stay night 
+fate/stay night 
+fate/stay night, .avi 
+gossip 
+standing in the way of control 
+fate/stay night 
+artic monkeys 
+gossip 
+girls, kiss 
+favourite worst nightmare 
+gossip 
+standing in the way of control 
+the sims 
+sims 
+lucy pinder 
+porn 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+menice too society 
+menice too society 
+dvd ita 
+hentai 
+cartoon sex 
+photoshop 
+photoshop nl 
+photoshop dutch 
+football manager 
+medium 
+nfl 
+the simspons 
+simspsons 
+the simpsons movie 
+haze 
+aurora 
+snow 
+medium 
+ryan dan 
+ryan dan 
+madonna 
+ryan dan 
+ryan dan 
+ryan dan 
+prison break 
+brothers sister 
+brothers sisters 
+sms sotto mentite spoglie 
+vegas 
+led zeppelin stairway 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+my name is earl 
+gigolo 
+4400 
+dirt 
+windows xp 
+office 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kiss 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+k 
+enourmous 
+monster 
+large 
+pirates 
+the witcher 
+medal of honor 
+medal of honor 
+boston 
+myggan 
+jericho 
+bioshock 
+quake 
+quake 
+ubuntu 7.10 dvd 
+bones 
+bones season 3 
+online tv 
+taiwan online tv 
+taiwan tv 
+chinese tv 
+chinese tv 
+wizard of oz 
+wizard of oz 
+rush hour 3 
+ong bak 
+mo5712 
+vdo dayton 
+300 
+leopard 
+leopard 
+batmam 
+search files, youtube and liveleak 
+allie 
+girlfriend 
+sex 
+sex 
+msn 
+msn 
+msn 
+msn 
+girl 
+babe 
+british 
+all murray 
+xxx 
+rocco 
+asstraffic 
+xxx 
+teen 
+all murray 
+teenage 
+sex teenage 
+honey 
+thai 
+thai vedio 
+thai vedio 
+thai vedio 
+teenage sex 
+sex 
+addison-wesley 
+guys 
+zion 
+wisin 
+daddy yankee 
+search files, youtube and liveleak 
+filmes 
+preteen 
+pedo 
+10yo 
+10yo 
+10yo 
+10yo 
+10yo 
+10yo 
+preteen 
+preteen 
+preteen 
+preteen 
+preteen 
+preteen 
+preteen 
+teen 
+teens 
+kiddie 
+teen 
+alternative 
+porn 
+pron 
+porn 
+sex 
+search files, youtube and liveleak 
+google 
+google 
+i now pronounce you chuck and larry 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+antwerpen 
+car 
+air crash 
+air crash 
+air crash 
+air crash 
+dvd ita 
+search files, youtube and liveleak 
+bus xxx 
+bukkake 
+japan 
+nadia ali 
+blowjob 
+time after time 
+vrt 
+xxx 
+time after time 
+japan xxx 
+japan 
+animalsex 
+hotel californa 
+animalsex 
+animalsex 
+hotel california 
+animalsex 
+dog love 
+dog cum 
+suck dog 
+asian porn 
+asian porn 
+extrem sex 
+jenna jameson 
+animal sex 
+24 
+animal cum 
+love cum 
+pompini 
+pompini 
+pompini 
+pompini 
+pompino 
+pompino 
+facial 
+japan 
+asian 
+monday noght raw 22 october 2007 
+suck 
+dvd ita 
+bus 
+train 
+monday noght raw 22 october 2007 
+abuse 
+wwe monday night raw - october 22 2007 
+big cock 
+animalsex 
+animal sex 
+bestiality 
+dog love 
+dog xxx 
+dog cum 
+suck dog 
+sex 
+akon 
+suck horse 
+the protector - la legge del muay thai 
+sex horse 
+divx ita - the protector 
+video films 
+video films 
+18 
+animal 
+jungle 
+gay brutal 
+gay brutal 
+gay rape 
+rape 
+mature 
+mature xxx 
+little 
+suck cock 
+disney 
+love 
+gay 
+oral 
+babe 
+young 
+girls kiss 
+young 
+xxx 
+xxx italian 
+xxx ita 
+search files, youtube and liveleak 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zoofgilia 
+zoofilia 
+zoofilia 
+sex 
+oral gay 
+sperm 
+bukkake 
+bukkake xxx 
+xxx 
+reaping 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asian 
+j 
+polo hofer 
+hofer 
+hofer 
+adult 
+adult 
+cum oral 
+xxx oral 
+xxx 
+cazzo 
+gigi 
+gigi 
+uncensored 
+katie melua 
+blowjob 
+babes 
+xxx 
+triber 
+bob dylan 
+andre nickatina 
+bookmaker 
+abba 
+abba 
+becoming jane 
+greys anatomy 
+greys anatomy season 2 
+abba 
+avatar 
+avatar 1 
+abba 
+abba 
+greys anatomy season 2 
+bones sason 3 
+bones season 3 
+bones season 3 
+bones season 3 
+bones season 3 
+ghost whisperer 
+avatar 
+avatar 307 
+abba 
+abba 
+abba 
+abba 
+cuby en blizzard 
+harry muskee 
+gay 
+blondie 
+search files, youtube and liveleak 
+blondie 
+blondie 
+q65 
+chris rea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+facialabuse 
+clit 
+chris rea 
+tina tunner 
+tina tuner 
+porcupine tree 
+inventor 
+inventor 
+abba 
+golden earing 
+golden earing 
+search files, youtube and liveleak 
+lesbian 
+csi ita 
+lesbian sex 
+csi las vegas 7x ita 
+csi las vegas 7x ita 
+ita smallville 6x 
+pro evolution soccer 2008 
+prison break heb 
+prison break 06 
+prison break 07 
+saint 
+silvia saint 
+ps2 ita pro evolution soccer 2008 
+owen hart 
+wwf 
+exes and ohs 
+morandi 
+monia 
+sassari 
+carbonia 
+cagliari 
+sardegna 
+james brown 
+jenna 
+falcon 
+zwartboek 
+sex 
+fucking 
+de beatles 
+rolling stones 
+rolling stones 
+rolling stones 
+nude 
+eros 
+divx ita 
+el orfanato 
+mana 
+the cats 
+mana 
+cats 
+orfanato 
+cats 
+cats 
+laura pausini 
+laura pausini 
+laura pausini 
+abba 
+abba 
+laura pausini 
+laura pausini 
+laura pausini 
+luv 
+kaly malou 
+kaly malou 
+katy malou 
+madonna 
+katy malu 
+inventor 
+inventor 
+football manager 2008 
+aarts 
+aarts 
+football manager 2008[multi 9] 
+football manager 2008 [multi 9] 
+football manager 2008 
+adobe 
+autodesk inventor 
+football manager 2008 [multi-9] 
+football manager 2008 
+search all files 
+search all files 
+barletta 
+resident evil 3 
+barletta 
+resident evil 3 
+resident evil 3 ita 
+resident evil 3 
+the simpsons 
+sugababes 
+umbrella 
+vomit 
+diamond 
+hitzone 43 
+hitzone 41 
+hitzone 41 
+tomtom 
+morricone 
+mozart 
+britten 
+hitzone 41 
+teen 
+wii 
+brian regan 
+brian regan 
+wii mario 
+3 allegri ragazzi morti 
+3 allegri ragazzi morti 
+wii mario 
+stevie ann 
+stevie ann 
+arab 
+stevie ann 
+closer to the heart 
+closer to the heart ann 
+closer to the heart 
+damien rice 
+diana krall 
+diana krall 
+damien rice 
+john legend 
+avid 
+avid xpress 
+michael clayton 
+madonna 
+jan keizer 
+erik huizebosch 
+erik huizebosch 
+erik huizebosch 
+erik huizebosch 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tomtom6 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zodiac 
+all videos 
+zodiac swe 
+best videos 
+best videos 
+zodiac 
+best videos 
+transformers 
+best videos 
+best videos 
+best videos 
+best videos 
+transformers 720p 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zodiac 
+search files, youtube and liveleak 
+720p 
+transformers 720p 
+007 james bond ita 
+007 james bond 
+007 james bond 
+007 james bond 
+transformers 720p 
+007 vendetta privata 
+007 vendetta privata 
+007 vendetta privata 
+007 vendetta privata 
+007 vendetta privata 
+vendetta privata 
+mission impossible 
+mission impossible ita 
+mission impossible ita 
+mission impossible ita 
+mission impossible 
+bebe lilly 
+adobe photoshop 
+donovan 
+donovan 
+donovan 
+jan smid 
+jan smid 
+jan smid 
+jan smid 
+jan smid 
+jan smid 
+jan smid 
+idols 
+everybody 
+monica bellucci 
+rod de nijs 
+monica 
+sandra bullock 
+monica bellucci 
+rob de nijs 
+maria sharapova 
+sex 
+maria sharapova nude 
+maria sharapova nude 
+monica bellucci 
+monica bellucci 
+monica bellucci 
+search files, youtube and liveleak 
+eric steckel 
+maria sharapova' 
+maria sharapova 
+harry potter 
+harry potter ita 
+die hard ita 
+stardust 
+stardust ita 
+duran duran rio 
+duran duran rio 
+michael claitonita 
+michael ita 
+michael clayton ita 
+hazes 
+eric steckel 
+eric steckel 
+eric steckel 
+eric clapton old love 
+nuggets 
+cbt 
+cbt 
+behind 
+ef tale 
+cbt 
+grey's 
+heroes 
+alex jones 
+sicko 
+search files, youtube and liveleak 
+spiderman 
+search files, youtube and liveleak 
+youtube 
+transformers ita 
+spiderman dvd 
+i fantastici 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+youtube 
+tessa 
+tessa 
+tessa 
+blonde big tits 
+stockings 
+jenna 
+virtual sex 
+sexvilla 
+jenna 
+kruistocht 
+kruistocht 
+bourne 
+dvd 
+dvd 
+2007 
+2007 
+xx 
+rip 
+clemson maryland 
+xc 
+z 
+polaco tiraera 
+polaco 
+polaco 
+polaco 
+don omar 
+good luck chuck 
+good luck chuck 
+vida guerra 
+vida guerra 
+vida guerra 
+pbs 
+north korea 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fun 
+the bourn ultimatum 
+the bourn ultimatum ita 
+spiderman 3 dvd ita 
+spiderman 3 dvd ita 
+spiderman 3 dvd ita 
+dire straits 
+barao vermelho 
+exe 
+nikki anderson 
+federica guttuso 
+guttuso 
+tribler 
+"would that make you love me" david harness 
+lesbien 
+lesbien sex 
+atb-in love with the dj 
+umbrella 
+sex 
+wiedźmin 
+witcher 
+jericho 
+jericho 1 
+linux 
+heroes 1 
+heroes so1 
+heroes s01 
+agepe 
+search files, youtube and liveleak 
+friday night lights 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+em 
+axo 
+rus 
+grey's anatomy 
+saxon 
+saxon 
+saxon 
+liveleak 
+mr brooks 
+porntube 
+porntube 
+porntube 
+porntube 
+porntube 
+porntube 
+xxx 
+garage band 
+sarenna lee 
+sarenna lee 
+sarenna lee 
+sarenna lee 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+julia ann 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+julia ann 
+julia ann 
+julia ann 
+julia ann 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ava lustra 
+ava lustra 
+ava lustra 
+ava lustra 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+on the sunny side of the street 
+sting 
+bill evans 
+julia ann 
+4 non blond 
+4 non blond 
+4 non blonds 
+4 non blonds 
+ballet heels 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anime 
+pthc 
+gilmore girls 
+pthc 
+gilmore girls - season 4 episode 16 
+girls 
+yo 
+10yo 
+preteen 
+nablot 
+smoke 
+preteen 
+incest 
+natasha 
+estonia 
+young 
+peachez 
+prison break 
+search files, youtube and liveleak 
+campeones hacia el mundial 
+endig crono trigger 
+rape 
+omanko 
+ashampoo 
+sammy 
+one three hill 
+one three hill 
+one three hill 
+one three hill 
+one three hill 
+isreal kamakawiwo`ole 
+prostreet 
+fifa 2008 
+ps2 
+over the rainbow 
+jimy hendrix 
+zoo 
+animal sex 
+tory lane 
+alektra blue 
+bakers dozen 
+mp3 
+alektra blue 
+carmella bing 
+pov 
+slut 
+mp3 
+the shadows 
+facial 
+katsumi 
+nashville 
+i am easy 
+nashville, i am easy 
+nashville, i am easy 
+carradine, nashville, i am easy 
+carradine, nashville, i am easy 
+carradine, nashville, i am easy 
+nashville, i am easy 
+nashville, i am easy 
+genesis, supper's ready 
+the beatles 
+lesbian 
+jenna jameson 
+belladonna 
+taryn thomas 
+service animals 
+strap attack 
+strap attack 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tomb raider anniversary 
+tomb raider anniversary saves 
+nanni moretti 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+caro diario 
+sex 
+tomtom per cellulare 
+tomtom per cellulare 
+tomtom 
+caro diario 
+e2 
+eros ramazzotti 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+slumber party 
+ocean's 11 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+plain white t`s 
+plain white t`s 
+plain white 
+blow me sandwich 
+animal sex 
+pron 
+weezer 
+weezer 
+weathus 
+blink 182 
+blink 182 
+blink 182 
+blink 182 
+blink 182 
+blink 182 
+blink182 
+blink182 
+blink182 
+blink182 
+blink182 
+blink 182 
+blink 182 
+enema of the state 
+the cut 
+the cut love 
+love 
+sonic temple 
+weathus 
+the weathus 
+the wheatus 
+the wheatus 
+wheatus 
+goo goo dolls 
+goo goo dolls january friend 
+goo goo dolls 
+c# 3.0 nutshell 
+c# 3.0 
+queen 
+magic 
+hard 
+film hard 
+film hard 
+film porno 
+sonic temple 
+bond 
+hardtied 
+gimpl 
+boerenkapel 
+gimpl 
+office 
+medicopter 
+ps2 game 
+nou toe maar 
+search files, youtube and liveleak 
+ramsey kitchen 
+ruvo di puglia 
+net ff anders 
+bari 
+feest kapel 
+feestkapel 
+noutoemaar 
+dvs 
+dvs katwijk 
+captain 
+captain general 
+concordia leiden 
+exelcior delft 
+concour sciedam 
+concours sciedam 
+love letter 
+party 
+amateur 
+couple 
+sex tape 
+april 
+linux 
+nokia 
+nokia 770 
+nokia 800 
+maemo 
+dvd ita il destino di un guerriero 
+il destino di un guerriero 
+mathland 
+disney magic 
+dvd ita 
+disney cartoon 
+hidden 
+security 
+system of a down 
+system of a down steal this album 
+golden ratio 
+"unsquare dance" 
+"unsqueare dance" 
+"brubeck" 
+security cam 
+security cam 
+freeware 
+linux 
+tribler 
+cute 
+security cam 
+amateur 
+girlfriend 
+1408 
+1408 
+1408 
+search files, youtube and liveleak 
+saw 4 
+saw 3 
+la caccia 
+la caccia 
+pc 
+missione parigi 
+sluts 
+pc 
+missione parigi 
+lay the kat 
+missione parigi 
+missione parigi 
+missione parigi 
+ratatouille 
+cemento armato 
+cemento armato 
+cemento armato 
+ratatouille 
+io non sono quì 
+in questo mondo libero 
+distretto di polizia 7 
+distretto di polizia 7 
+dutchman 
+dutchman 
+dutchman 
+quel treno per juma 
+quel treno per yuma 
+voyeur 
+hidden 
+gallactica 3 season 
+college 
+erotic 
+gallactica season 3 
+porno 
+baccara yes sir 
+baccara yes sir 
+mickey mouse 
+windows vista ultimate 
+sesamstraat 
+ernst en bobbie 
+deep purple 
+route66 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+nero 
+hostel2 
+audio boek 
+audio book 
+voetbal 
+kaylani 
+leanni 
+asian 
+squirt 
+search files, youtube and liveleak 
+eyes wide shut 
+erotic 
+clit 
+stay alive 
+spiderman 
+stay alive dvd ita 
+eyes wide 
+chained 
+chained 
+chained 
+chained 
+blackout 
+chained heat 
+2061 
+2061 
+2061 
+busty teen hardcore 
+leopard 
+maria de filippi 
+maria de filippi 
+maria de filippi 
+barlettai 
+barletta 
+barletta 
+barletta 
+search files, youtube and liveleak 
+trondheim 
+cytherea flower 
+cytherea 
+breast 
+dream theatre 
+dream theatre once in a livetime 
+dream theatre 
+sim city rush hour 
+dream theatre 
+dream theatre 
+dream theatre 
+rush hour 
+rush hour 
+rush hour 
+rush hour 
+sim city 
+sim city 
+lost 
+beatles 
+teen 
+beatles 
+farkas 
+paola turci 
+eng227-compaq 
+hitzone 41 
+tomtom6 
+tomtom6 
+dvd-ita 
+dvx 
+dvd-ita 
+king kong 
+king kong 
+king kong 2005 
+nero7 
+stay alive 
+stay 
+stay alive 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+big fish 
+arthur pieter 
+nazarian arthur 
+kienhuis 
+jan boers 
+boris titulaer 
+boris keep in touch 
+boris things 
+cat stevens 
+cat stevens 
+boris 
+boris 
+boris 
+boris 
+cat stevens 
+skater boy 
+search files, youtube and liveleak 
+turkse meid 
+turkse meid 
+kasbergen 
+turkse chick 
+search files, youtube and liveleak 
+quark 
+prison break 2x14 ita 
+prison break 2x14 ita 
+the messengers 
+excalibug 
+superbad 
+canary effect 
+nero 
+norton 
+norton ghost 
+sim city 4 
+fifa 08 
+avatar 
+party 
+kimberly holland 
+paris hilton 
+party hardcore 
+saw 4 
+weed grow guide 
+weed grow guide 
+weed grow guide 
+sim city 4 
+sha 
+macroquest 
+die hard ita 
+turistas ita 
+horror ita 
+the ring 
+the ring 3 
+invasion 
+smallville 
+madonna 
+dragon ball 
+naruto 
+acdc 
+ac/dc 
+naruto serie 2 
+neruto italiano 
+acdc 
+acdc 
+acdc 
+youtube acdc 
+youtube acdc 
+youtube acdc 
+youtube acdc 
+online 
+big fish 
+death proof 
+invasion ita 
+after dark 
+divx ita 
+corel draw 
+search files, youtube and liveleak 
+compilation 
+compilation 
+compilation 
+the messengers ger 
+24 season 
+bangbus 
+anime 
+video kill 
+oblivion 
+spears 
+vicio latino 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+paris latino 
+fugawi 
+fugawi 
+big fish 
+search files, youtube and liveleak 
+animal sex 
+big fish 
+animal with animal sex 
+paris hilton 
+12 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+13 yr sex 
+14 yr sex 
+15 yr sex 
+16 yr sex 
+17 yr sex 
+17 years old sex 
+10 years old sex 
+11 years old sex 
+12 years old sex 
+13 years old sex 
+best of coldplay 
+frank sintra 
+frank sinatra 
+dean martin 
+search files, youtube and liveleak 
+corel draw 
+reason 
+myseq 
+salsa 
+salsa 
+drunken sex orgy 
+reegeton 
+gavioes da fiel 
+gavioes da fiel 
+music 
+the kingdom 
+resident evil 
+dildo 
+dildo 
+enormous 
+hindi movie 
+hindi movie 
+sextet 
+tivo 
+seria 
+technotronic 
+i cesaroni 
+rebeldes 
+español en nueva york 
+search files, youtube and liveleak 
+the cure 
+video 
+totti 
+search files, youtube and liveleak 
+.mkv 
+mkv 
+pompini 
+sesso 
+paprica 
+paprica 
+hentai 
+pompini 
+pompini 
+breakfast at tiffany 
+sesso 
+sesso 
+moana 
+moana pozzi 
+ard 
+block party 
+the cureculture club 
+culture club 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+debora capriolio 
+lily thai 
+anal 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+hentai 
+scopate 
+scopate 
+reegeton 
+pompini 
+camasntra 
+camasntra 
+reegeton 
+reegeton 
+ratat 
+merenge 
+merenge 
+salsa 
+rock 
+dady yanky 
+dady yanky 
+don omar 
+don omar 
+winsi y yandel 
+american gangster 
+we own the night 
+michael clayton 
+3:10 
+dragon wars 
+dvd.avi 
+dvd rip french 
+industrial landscape 
+fuck 
+battlestar 
+corinne bailey rae 
+corinne bailey rae 
+squirt 
+filipino 
+filipina 
+mexican 
+latina 
+tengo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kiss kiss chris brown 
+good life 
+the office season 1 
+good life 
+good life kanye west 
+baby bash cyclone 
+crank that 
+alicia keys 
+leopard 
+the guardian 
+superman 
+titanic 
+transformer 
+èï½²æ\88\92 
+èï½² æ\88\92 
+tube 
\8f�¢é\87\91å\88\9a 
+prisonbreaker 
+prisonbreak 
+lost 
+èï½² 
+èï½² 
+stock 
+sex 
+sex 
+avatar 09 
+avatar 10 
+chingo bling 
+the simpsons movie 
+perreo 
+la cubanita 
+labyrinth 
+labyrinth piece of time 
+la cubanita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jamie cullum 
+cars divx ita 
+blacksonblondes 
+whiskey in the jar 
+windows xp professional 
+micheal clayton 
+linux.for.dumimes 
+linux.for.dummies 
+linux.for.dummies 
+reason 
+till sky falls down 
+tiesto 
+kanye west 
+monk 
+the monk 
+seduced 
+my friends hot mom 
+my friends hot mom 
+my friends hot mom 
+[divx ita] pathfinder 
+russian rape 
+windows xp professional 
+ring tones 
+windows xp professional 
+final cut 
+fadobe 
+[divx ita] italiano 
+blond 
+sister 
+nl 
+teacher sex 2007 
+teach me sex 2007 
+teacher sex 2007 
+teacher sex 2006 
+teacher 2007 
+sex teacher 2007 
+sexteacher 2007 
+andrea bocelli 
+[divx ita] film 
+[divx ita] coeleone 
+[divx ita] corleone 
+young milfs 
+young milfs 
+milfs 
+[divx ita] 300 
+porno 
+diskeeper 
+dahlya grey 
+andrew blake 
+pc games 
+pc games ita 
+teen 
+hot friends 
+sisters hot friends 
+my sisters hot friends 
+sisters hot friends 
+teen anal 
+anal virgin 
+naughty america 
+latvia 
+search files, youtube and liveleak 
+teen 
+anal teen 
+search files, youtube and liveleak 
+diskeeper 
+teen 
+julie strain 
+misty mundae 
+julie silver 
+anal 
+anal virgin 
+diskeeper 
+lolita 
+misty mundae 
+julie strain 
+penthouse 
+penthouse 
+penthouse 
+stolen tape 
+pet 
+amiga 
+amigaos 
+elizabeth ita 
+windows xp professional 
+dutch 
+driver magic 
+omnipage 
+omnipage 
+omnipage 
+winuae 
+nhl 
+amiga emulator 
+ghostface 
+games 
+call of duty 4 
+need for speed pro street 
+cdis 
+csi 
+csi bloopers 
+modern warfare 
+smokin on hay 
+hay 
+salma hayek.avi 
+salma hayek 
+diggilo 
+bengtzing 
+booty talk 
+jada fire 
+bondage 
+aniston 
+ilse delange 
+ferrari 
+alicia silverstone 
+big tits 
+search files, youtube and liveleak 
+kjjklyuil 
+key gen nero 8 
+search files, youtube and liveleak 
+anal 
+pthc 
+pthc 
+pthc 
+die ermordung des jesse james 
+the assass 
+the assass ger 
+könige der wellen 
+könige der wellen 
+könige der wellen 
+könige der wellen 
+könige der wellen 
+könige der wellen 
+könige der wellen 
+pthc 
+sex and the city 
+porn 
+soldier of fortune 
+xcom 
+rgdgdfg 
+íỲ르ëà¸\98 
+korean slut 
+korean sex 
+windows xp professional 
+simpsons 
+antivirus 
+search files, youtube and liveleak 
+esi bosh 
+topgear 10x03 
+topgear s10 ep03 
+topgear 
+top 
+top gear 
+top gear 10x03 
+search files, youtube and liveleak 
+my name is earl 
+my name is earl ita 
+my name is earl 
+camera caffè 
+babyj 
+babyshivid 
+pthc 
+preteen 
+mimi 
+mariah carey 
+level 42 
+californication 
+californication 
+harry potter 
+harry potter vii 
+a chi mi dice 
+a chi mi dice 
+a chi mi dice 
+a chi mi dice 
+windows xp professional 
+windows xp professional 
+psp 
+metalica 
+metalica 
+metalica 
+metalica 
+metalica 
+metalica 
+simon and garafunkel 
+simon 
+fdhfgdghdgh 
+simon 
+simon 
+simon 
+simon and garfunkel 
+pompino 
+pompino 
+blowjob 
+porn 
+search files, youtube and liveleak 
+bush 
+shakira 
+shakira 
+shakira 
+baby bash 
+wait for you 
+wait for you yamine 
+wait for you yamin 
+no one alicia 
+flo rida 
+flo rida low 
+until the end of time 
+justin timberlake 
+justin timberlake beyonce 
+justin timberlake until 
+like this 
+mims 
+like this 
+hey baby ashanti 
+gay porno 
+hypnotized 
+gay porno 
+gay porno 
+gay 
+shawty is a ten 
+walk it out 
+the office season 1 
+walk it out 
+dj unk 
+big poppa 
+nitorious big 
+blaque 
+poop 
+meow 
+britney spears 
+flledwood mac 
+fleedwood mac 
+fleedwood mac 
+deep purple 
+shakira 
+storage 
+adobe 
+discography 
+search files, youtube and liveleak 
+mon amour 
+shrek terzo 
+shrek terzo 
+peter vries 
+family guy 
+prison break 
+storage 
+ciara 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search all files 
+tom jones 
+mac os x 
+william petersen 
+lee sarenna 
+jenna jameson 
+jenna jameson 
+vba 
+hybrid 
+horror 
+clips 
+virus 
+ppc 
+nokia 
+excel tools 
+clips 
+clips 
+excel 
+50 cent 
+computeridee 
+computer idee 
+50 cent 
+computer 
+idee 
+adobe 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+50 cent 
+timberland 
+timberland 
+timberland 
+timberland 
+timberland 
+timberland 
+timbaland 
+timbaland 
+timbaland 
+timbaland 
+timberland 
+timberland 
+timberland 
+timberland 
+tommy lee 
+tommy lee 
+tommy lee 
+tommy lee 
+convoy 
+games 
+games 
+metroid prime 3 
+games pc 
+games pc 
+1704 
+habitacion 1704 
+habitacion 1408 
+habitacion 1408 
+habitacion 1408 
+1408 
+zimmer1408 
+zimmer 1408 
+tiger woods 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+chills and fever 
+kiss prince 
+kiss tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+tom jones 
+bbc 
+beatles 
+yobs 
+bbc 
+dicograohy 
+dicography 
+bright house 
+just for gags 
+just for laugh 
+golden ratio 
+3:10 
+osx86 
+osx86 10.5 
+osx86 
+rap 
+tengo 
+crysis 
+ubuntu 
+ubuntu 
+the wire 
+the concretes 
+you say party we say die! 
+bloc party 
+modest mouse 
+teddybears 
+sugarettes 
+the sugarettes 
+menomena 
+menomena 
+john motta 
+dottor house 
+cazzo 
+cazzo 
+ratatouille 
+ratatoille 
+ratatoille 
+ratatoille 
+cartoon 
+porno 
+die hard 
+die hard 
+die hard 
+58 minuti per morire 
+58 minuti per morire 
+die hard 2 
+molto incinta 
+mortal combat 
+dottor house 
+dottor house 
+dottor house 
+dottor house 
+porno 
+xxx 
+dragon ball 
+sailor monn 
+sailor mon 
+sailor moon 
+cartoon 
+.jpg 
+ragazza 
+paris hilton 
+adult 
+molto incinta 
+adult 
+dragon 
+bruce lee 
+horse 
+horse 
+hause 
+hause 
+home 
+taxi 
+taxxi 
+eagles long road out of eden 
+sailor moon 
+ranma 
+dragon ball 
+city hunter 
+ncis 
+ncis 5 
+csi 8 
+csi 6 
+bones 3 
+amici 2005 
+saranno famosi 
+dennis fantina 
+jameson megapack 
+jameson 
+monstertitten 
+monster tits 
+monster boobs 
+riesentitten 
+tits 
+busty 
+search files, youtube and liveleak 
+ufo 
+ufo afterlight 
+windows xp 64 nl 
+vsp 
+microsoft vista 
+films 
+films ita 
+salieri 
+pecorina 
+grazie zia 
+directx 10 di crysis 
+divx 
+divx ita 
+gigi 
+becali 
+dvd2one2 
+dvd2one2 
+dvd2one 
+becali 
+golden ratio 
+lost season 4 
+dvdfab 
+dvdfab 
+old boy 
+sex scandals 
+mms 
+dps 
+delhi public school 
+delhi public school 
+mms 
+dvdfabù 
+dvdfab 
+ramazzotti 
+ramazzotti 
+adobe 
+adobe premiere elements 
+adobe premiere elements it 
+trade 2007 
+gigi d'alessio 
+gigi 
+d'alessio 
+avatar 10 
+my name is earl 
+search files, youtube and liveleak 
+* 
+* 
+heroes s2 e5 
+heroes s2 e5 
+smackdown vs raw 2008 ps2 
+heroes s2 e5 
+heroes s2e5 
+heroes 
+jericho 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+copying beethoven 
+the painted veil 
+sex 
+sex 
+sex 
+sex 
+sex horse 
+sex horse 
+sex horse 
+sex horse 
+sex 
+sex 
+stacktraxx 
+elizabeth 
+elizabeth the gold age 
+gta 
+adobe premiere elements 3 it 
+molto incinta 
+ratatoville 
+negroamaro la finestra 
+negroamaro la finestra 
+negroamaro la finestra 
+negroamaro la finestra 
+negroamaro la finestra 
+julia ann 
+julia ann 
+julia ann 
+vivid pictures 
+quebase 
+tickling 
+heroes 6 
+robot 
+mr.bean 
+mr.bean 
+tickling 
+crocodile 
+merkel 
+vista 
+300 
+cake 
+bee movie 
+beatles 
+tickling 
+bondage 
+faithless 
+august rush 
+27 dresses 
+27 dresses 
+27 dresses 
+27 dresses 
+nrc next 
+nrc 
+rape 
+lopez 
+search files, youtube and liveleak 
+wow 
+cheerleader 
+vista 
+massage 
+vista 
+forced sex 
+the invasion 
+trible 
+tribler 
+secretary 
+nurse 
+waxing 
+wax 
+dutch 
+japanese 
+nl sub 
+nl sub 
+chemistry 
+chemistry 
+chemistry 
+chemistry 
+nl 
+chemistry 
+windows xp 
+windows xp german 
+feet 
+longhorn 
+the reaping 
+pregnant 
+swingger 
+swinggers 
+swing 
+vista rtm 
+swinger 
+nfs 
+vista rtm 
+prison break 
+waitress 
+maid 
+outlands instances 
+windows millenium 
+windows mediacenter german 
+office xp deutsch 
+office xp german 
+windows xp 2003 german 
+windows xp 2003 
+sex 
+windows 2000 german 
+antony santos 
+linux 
+mother 
+daughter 
+mvcd german 
+eva 
+eva 
+2007 
+transformers german 
+linux 
+office xp german 
+prison.break 
+cserháti zsuzsa 
+prison.break 
+prison.break 
+prison.break 
+prison.break 
+deutsch 
+eating 
+harry potter deutsch 
+search files, youtube and liveleak 
+gigi finizio 
+deutsch 
+szécsi pál 
+daniele stefani 
+hoerbuch 
+lolita 
+nude 
+jordi savall 
+zámbó jimmy 
+apostol 
+hoerspiel 
+search files, youtube and liveleak 
+blowjob 
+tindo 
+uninstall tool 2.2.1 
+virus 
+virus 2008 
+wallpaper 
+firewall 
+browser 
+ho voglia di te 
+explorer 
+secure 
+windows german 
+evan almighty 
+big ones 
+secure 
+sas 
+max secure 
+music and lyrics 
+security 
+windows 98 
+windows aio german 
+windows aio 
+windows ultimate 
+windows xp ultimate 
+windows xp 2008 
+windows 2008 
+aio 
+microsoft 2000 
+microsoft 2001 
+microsoft 2002 
+microsoft 2003 
+window server german 
+windows server german 
+windows server deutsch 
+santana 
+windows server 2008 
+windows 2003 
+il dolce e amaro 
+dolce e amaro 
+windows lite 
+wow 
+brave one 
+brave one 
+brave one 
+windows xp sp3 
+24 season 
+crazy xp 
+crystal xp 
+microsoft 2004 
+windows xp bill gates 
+windows xp 2006 
+windows xp premium 
+windows xp 2007 
+windows xp 2008 
+windows longhorn 
+vista home basic 
+search files, youtube and liveleak 
+dr. j 
+islam 
+springsteen 
+.iso 
+windows iso 
+pro basketball 
+wwe - cyber sunday 
+lost 
+perdidos 
+lost 
+perdidos 
+lost 
+windows power 
+xp power 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+anal 
+indonesia 
+xp experience 
+microsoft 2008 
+mac os 
+mac os windows 
+mac os theme xp 
+islam 
+cake 
+ircommoncom 
+mocom 
+omniweb 
+lotus 
+leopard 
+negramaro la finestra 
+avrilm lavigne 
+leopard 
+ibm 
+thinkfree office 
+versiontracker 
+versiontracker 
+versiontrackerpro 
+nero 
+nero 
+vista 
+http://www.mininova.org/get/898328 
+world inferno friendship society 
+search files, youtube and liveleak 
+golden ratio 
+search files, youtube and liveleak 
+two and a half men 
+two and a half men 
+red green show 
+transformers 
+steamhammer 
+steamhammer 
+search files, youtube and liveleak 
+kacey 
+amy 
+amy shine 
+amy shine 
+search files, youtube and liveleak 
+millennium season 3 
+millennium s3 
+millennium s3 
+millennium s3 
+millennium s3 
+millennium s3 
+robot chiken 
+robot chicken 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+inconvenient truth 
+pursuit 
+dj tiesto 
+ibiza sex 
+ibiza sex 
+taxxi 
+taxi 
+taxxi 
+fat 
+i need love 
+emmanuel 
+salieri 
+mario salieri 
+jessica 
+divx 
+games 
+games pc 
+top 20 
+ceasar 
+ceasar 
+power dvd 
+stone 
+basic 
+ufo 
+ufo afther 
+ufo after 
+power dvd 
+power dvd pc 
+pis 
+piss 
+jamaican 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+suite cs3 ita 
+jamaican porn 
+search files, youtube and liveleak 
+porn 
+coochie 
+sex 
+sex 
+pr0n 
+pr0n 
+prison.break s3 
+suite cs3 
+suite cs3 
+suite cs3 
+
+
+? 
+search file 
+search files 
+
+
+
+
+teen 
+teen 
+teen 
+natasha 
+ann wilson 
+ann wilson 
+ann wilson 
+xxx 
+search files, youtube and liveleak 
+
+teen 
+melua 
+liefde 
+blof 
+.avi 
+avi 
+ramazzotti e2 
+leopard 
+alex jones 
+waking life 
+level 42 
+iso buster 
+acronis 
+blade runner 
+treu image nl 
+backup 
+arab 
+el greco 
+unlock cell 
+blackberry 
+arab 
+discogarphy 
+discography 
+sex 
+xxx 
+pichunter 
+pichunter 
+amateurs 
+nas live 
+google 
+λεβενÏÃ\8e·ÏÂ\83 
+λεÏï¾\8eµÎ½ÏÃ\8e·ÏÂ\83 
+λεβενÏÃ\8e·ÏÂ\83 
+λεβενÏÃ\8e·ÏÂ\83 
+pedo 
+commedie 
+commedie de filppo 
+commedie de filppo dvd 
+pedo 
+horse sex 
+avrilm lavigne 
+linux 
+avrilm lavigne 
+avril lavigne 
+ghost 
+ghost dutch 
+dutch 
+negramaro 
+tazenda 
+search files, youtube and liveleak 
+renault k4m 
+k4m 
+halo 2 
+leopard osx 
+gem boy 
+roxio media creator 
+roxio easy media creator 
+roxio easy media creator 10 
+gothic 3 
+antivirgear 3.8 
+antivirgear 3.8 
+antivirgear 3.8 
+the sims 2 pets 
+ugly betty 
+acdc 
+gothic 3 
+pets 
+renault 
+special exercises 
+sneakers 
+socks 
+white socks 
+logan 
+eva 
+eva vassidy 
+eva vassidy 
+eva vassidy 
+eva vassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+eva cassidy 
+cassidy 
+burnout 
+ray davies 
+prison break 
+need for speed 
+xp 
+logan 
+need for speed pro street 
+wow scolded 
+fucked and bound 
+adobe 
+bound 
+xxx 
+prsin break 
+prison break 
+toe sucking 
+k4m 
+toes 
+seventeen 
+tickeld 
+justin timberlake 
+justin timberlake 
+justin timberlake 
+tickling 
+roxio 
+fitnis 
+new york 
+new york 
+cake 
+faithless 
+foo fighters 
+richard cheese 
+porn 
+george michael 
+2061 - un anno eccezionale 
+carny effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+canary effect 
+top 100 
+heroes 
+xxx 
+lio 
+lion 
+xxx 
+renault 
+conspiracy 
+vice city 
+soundtrack 
+eva cassidy 
+50 cent & justin timberlake & timbaland ayo technology 
+heroes 1 
+heroes s01 
+heroes.s02e0 
+heroes.s02e0 
+heroes.s02e0 
+heroes s02 xor 
+momoi 
+ned. 
+ned 
+ondert 
+horro 
+bure 
+neighb 
+voy 
+strip 
+invasion 
+eros 
+vasco la compagnia 
+the simpsons 
+cedega 
+search files, youtube and liveleak 
+jericho 
+kamelot 
+sonata arctica 
+demetri martin 
+demetri martin person 
+comedy central demetri 
+metallica 
+du hast 
+k4m 
+shakespeare 
+j 
+kamelot 
+march of mephisto 
+at the end of paradise 
+edge of paradise 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+du hast 
+eminem 
+d12 
+bleach 145 
+sonata arctica 
+fabrizio de andrè 
+rammstein 
+pink floyd 
+saw 4 
+30 days of night 
+nanni moretti 
+nanni moretti 
+taxy driver 
+american gangsta 
+30 days of night 
+ich wil 
+ich will 
+ich wil 
+dean martin 
+tribute 
+de andrè 
+de gregori 
+de andrè 
+fabrizio de andrè 
+de gregori 
+rino gaetano 
+sims 2 
+jenna 
+compiz 
+de andrè 
+vecchioni 
+ita 
+italiano 
+porn 
+moana 
+moana 
+ligabue 
+vecchioni 
+franco battiato 
+roberto vecchioni 
+search files, youtube and liveleak 
+finardi 
+jorge palmas 
+finardi 
+finardi 
+hendrix 
+the doors 
+power flower 
+roberto cecchioni 
+roberto vecchioni 
+guccini 
+adobe premiere 
+adobe 
+sims 2 patch 
+sims2 patch 
+sims 2 patch 
+roberto vecchioni 
+sebastian 
+aatje 
+aatje 
+east is east 
+pirates of the carribean at worlds end 
+pirates of the carribean at worlds end 
+test 
+ita 
+california ita 
+. 
+.exe 
+access97 
+cars 
+ita manga 
+ita house 
+ita dr.house 
+ita dr. house 
+lush life 
+jenna jameson 
+ita 
+ita divx 
+ita divx 
+ita divx 
+bresek 
+breserk 
+breserk ita 
+breserk ita divx 
+ita berserk 
+spiderman 3 pcdvd 
+pc dvd 
+christy canyon 
+christy canyon 
+christy canyon 
+christy canyon 
+christy canyon 
+christy canyon 
+christycanyon 
+christy_canyon 
+christy 
+christy canyon 
+christy canyon 
+heroes s01 
+heroes s01e01 
+k4m 
+logan 
+renault clio 
+heroes s01e01 
+break.com 
+deer hunting deer 
+deer hunting bird 
+renault clio 
+worlds end] 
+renault clio 
+cracks 
+wp 
+driver 
+ring tones 
+renault clio 
+foot soldier 
+search files, youtube and liveleak 
+die hard 
+sms 
+sms 
+mango 
+mango 
+sms sotto mentite spoglie 
+mango 
+porno 
+russ 
+mongol 
+window media player ita 11 
+window media player 11 
+dragon 
+hypnotize 
+karol 
+papa woitila 
+papa woitila 
+karol woitila 
+karol woitila 
+ppa karol woitila 
+fucili 
+papa kerol woitila 
+papa kerol woitila 
+bondage 
+pro evolution soccer 2008 
+hacked 
+hacked wireless network 
+hacking wireless network for dummies 
+onion 
+tst 
+test 
+onion 
+mosconi 
+mosconi 
+mac osx 
+.avi 
+sassari 
+dejavu ita 
+dejavu dvd ita 
+app 
+game ita 
+pc game ita 
+hair ita divx 
+jesus christ superstar ita divx 
+jesus ita divx 
+number 23 dvd ita 
+jesus ita 
+mac osx 
+manga ita 
+pc game 
+dvd 
+italian 
+prison brake 
+prison brake 
+prison brake 
+mojica 
+prison brake 
+nelly furtado 
+tazenda 
+tazenda loneros 
+tazenda loneros domo mia 
+tazenda loneros domo mia 
+xbox 
+bob marley 
+bob marley 
+bob dylan 
+p 
+bob marley 
+bruce almighty 
+bourne 
+catania 
+martian child 
+avast 
+taxus 
+trucchi ricamo 
+punto croce perfetto 
+disegni punto croce 
+pooh 
+ich will 
+tokio hotel milano 
+strippoker 
+playboy 
+playboy november 2007 
+prison brake 
+prison break 
+blood rayne 
+journeyman 
+blood rayne 
+blood rayne 
+ryandan 
+prison.break 
+prison.break 
+kate nash 
+mature 
+hey you 
+hey you 
+hey you 
+frak 
+freak 
+amateur 
+alles is liefde 
+paola turci 
+jennifer lopez 
+top gear 
+stockholm 
+je t' aime 
+je 't aime 
+je 't aime 
+je 't aime 
+bmw navigation 
+milf 
+french 
+bmw 
+french dvd 
+miss marple 
+fergi big girls dont cry 
+om shanti om 
+dhan dhana dhan goal 
+pes 2008 
+halo 3 
+moto gp 2007 
+die hard 3 ita 
+football manager 2008 
+dota 
+dota 
+dota 
+try teens 
+softperfect 
+weeds 
+weeds s01e01 
+flashget 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+die hard 3 ita 
+yandel 
+search all files 
+tokio hotel 
+la terza madre 
+the bourne ultimatum 
+the bourne ultimatum ita 
+il caso thomas crawford 
+michael clayton 
+xxx 
+michael clayton ita 
+tideland il mondo capovolto 
+kamael.rar 
+naruto folge 1 part 1/1 
+naruto folge.1 part 1/1 
+naruto episode 1 part 1/1 
+naruto episode 1 part 1/1
+naruto episode 1 part 1/1 
+naruto episode 1 part 1/1 deutsch 
+naruto episode 1 part 1/1 deutsch 
+naruto folge 1 part 1/1 deutsch 
+naruto folge 1 part 1/1 deutsch 
+naruto folge 1 
+chi vuol essere milionario seconda edizione 
+chi vuol essere milionario seconda edizione 
+kamasutra 
+divx ita 2007 
+kamasutra 
+pro tools 
+kamasutra 
+lil wayne 
+akon 
+illegal 
+code1 
+navy cis 
+nana 37 
+nana 37 
+nana 37 
+nana 37 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 36.6 
+nana 
+nana 36 
+nana ita 
+nana ita 
+high school 
+nana 
+nana 
+nana 
+nana 
+spy 
+bourne 
+aria 
+the office 
+the practice 
+flood 
+flood 
+the flood 
+30 nights 
+ludicris 
+ludacris 
+ludacris 
+2pac 
+search files, youtube and liveleak 
+divx ita 
+camera cafe ita 
+camera cafe ita 
+pozzetto ita 
+essential 
+essential 
+guide 
+one u2 
+pes 2008 
+pes 2008 xbox 360 
+colin mcrea 
+wrc 
+xbox 360 
+xbox360 
+dogma ita 
+beastiality 
+dogma 
+hot fuzz 
+horse 
+pthc 
+heroes s02e02 
+heroes s02e01 
+heroes s02e03 
+little lupe 
+amos milburn 
+fucking 
+fuckin horses 
+guns n'roses 
+little lupe 
+animal sex 
+pthc moscow 
+hussyfan 
+hellgate 
+movies 
+south park 
+kruistocht 
+search files, youtube and liveleak 
+avril lavigne 
+laura fuck 
+teen fuck 
+wii 
+lost 
+lost 
+movie 
+alexandra oneil 
+alexandra oneil 
+critisize 
+bad ass 
+alexander oneal 
+horoor movies 
+horror movies 
+alexander o'neal 
+beatles 
+las vegas 
+montecito 
+reddwarf 
+skinned deep 
+search files, youtube and liveleak 
+skinned deep 
+tpain 
+avril lavigne 
+avril lavigne 
+harry potter 
+run, fat boy, run 
+run fat boy run 
+windows 2000 
+pj 
+windows xp 
+girls gone wild 
+ratatoulle 
+disney 
+sex 
+globo 
+blonde 
+hannah montana 
+pictures 
+the who 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+axxo 
+bluetech 
+jiminy glick 
+dvdrip 
+.mpg 
+russia 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+æ¨è±\86 é»Ã©Â\87Â\91å\91¨ 
+æ¨è±\86 é»Ã©Â\87Â\91å\91¨ 
+æ¨è±\86 
+csi.s08e05 
+csi 8 
+csi 8 
+autocad 2007 
+csi 8 
+csi 8 
+mystery jets 
+josh hawkins 
+josh hawkins 
+saferase 
+pink floyd 
+naruto ita 
+un'impresa da dio ita 
+un'impresa da dio 
+bourn ultimatum 
+bourn ultimatum ita 
+bourn ultimatum 
+bourn ultimatum ita 
+il capo dei capi 
+doraemon 
+avril lavinge 
+harry potter 
+50 cent 
+pirates of caribbean the world end 
+windows xp 
+pc games 
+windowsxp 
+windows xp 
+sex tape 
+eagles 
+ita 
+pregnant 
+search files, youtube and liveleak 
+denise richards 
+os windows xp 
+windows vista 
+asia 
+trish 
+mina il cielo nella stanza 
+mina 
+mina il cielo in una stanza 
+il cielo in una stanza 
+il cielo in una stanza 
+il cielo in una stanza 
+csi 8 
+top gear 
+ncis 
+csi 4 
+csi 6 
+ncis 5 
+csi 4 
+hustle 
+hustle4 
+hustle 4 
+hustle 
+csi 4 
+king kong 
+dodge charger r/t 
+toyota 
+toyota 
+land cruiser 
+mtv 2008 
+mtv 2008 
+mtv 2007 
+hit 2007 
+anime ita 
+santana 
+hagigawa 
+hagigawa 
+asian 
+asian 
+xxx 
+xxx 
+xxx 
+noa 
+noa i dont now 
+noa i don't now 
+noa i don't now 
+noa i don't now 
+noa 
+noa 
+noa 
+noa 
+don't now 
+i don't now 
+noa 
+cubase 
+cubase 
+windows 98 
+windows 2000 professional 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+kate nash 
+u2 
+miracle drug 
+pro evalutio soccer 
+pro evalutio 
+navision 
+navision 
+navision 
+navision 
+pro evalutio 
+pro evalutio 
+tildes birojs 2005 
+tildes birojs 2005 
+tildes birojs 2005 
+microsoft office 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cum 
+winzip 
+mature 
+winrar 
+fisting 
+anime 
+lost 
+rocco 
+american gangster 
+heroes 
+wic 
+juelz santana-from me to u 
+season1 
+hentai 
+heroes s02e02 
+heroes s02e03 
+heroes s02e03 mkv 
+tildes birojs 
+heroes s02e04 mkv 
+tildes birojs.lv 
+my name is earl 
+pantyhose 
+juelz santana 
+aesop rock 
+sixtoo 
+sixtoo 
+nylonic 
+aesop rock 
+finger 
+rim 
+rim job 
+the wire 
+doctor who 
+cubase 
+ava devine 
+emiry 
+felicity 
+tenn 
+tenen 
+teen 
+nudist 
+swish 
+the wire 
+reno 911 
+jamie cullum 
+bossa nova 
+songbook 
+songbook 
+songbook 
+jazz guitar 
+jazz guitar 
+jazzguitar 
+jazzguitar 
+jazz 
+capozzi 
+capozzi 
+la la la 
+trade 2007 
+trade 
+soul 
+mowtown 
+heroes s02e03 
+heroes s02e03 mkv 
+northern soul 
+la famiglia robinson 
+heroes s02e05 mkv 
+heroes s02e06 mkv 
+heroes s02e07 mkv 
+heroes s02e05 mkv 
+nudist 
+porno 
+danilewicz 
+nicklemback 
+nickemback 
+nickenback 
+nickenback 
+nick 
+nickelback 
+akon 
+naturist 
+nudist 
+nudist photo 
+nudist photo 
+nudist photo 
+nudist photo 
+xxx photo 
+xxx 
+vid 
+xx 
+xx 
+fr 
+ti 
+wycleff 
+wycleff 
+wycleff 
+wycleff 
+akon 
+alicia keys 
+jars of clay 
+jars of clay 
+nelly 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+yellow jackets 
+police 
+search all files 
+osx leopard 
+smallville 
+tildes birojs.lv 
+smallville season 7 
+microsoft 
+pulp fiction ita 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+heroes 
+heroes.s02e01 
+heroes s02e01 
+pro evolution 
+spanking 
+spanking 
+diabolika 
+emanuele inglese 
+pop killer 
+aerosmith 
+abba 
+spanked 
+chelentano 
+plain white'is 
+plain white is 
+plain 
+tchelentano 
+celentano 
+spanking 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+webreaper 
+metallica 
+torrent tracker 
+tracker 
+turks fruit 
+nerderlandse film 
+first time 
+steve ann 
+search files, youtube and liveleak 
+xxx 
+access 
+access 
+access 
+vba 
+search files, youtube and liveleak 
+ado 
+ajax 
+ruby 
+grails 
+perl 
+spears 
+paris hiulton 
+paris hilton 
+victoria 
+huge 
+microsoft 
+kurt angle 
+hdr 
+hdr 
+hdr 
+photomatix pro 
+photomatix pro 
+photomatix pro 
+photomatix pro 
+photomatix pro 
+photomatix pro 
+book 
+phrack 
+roberto 
+wigan casino 
+toto cutugno 
+ubutu 
+ubuntu 
+search files, youtube and liveleak 
+madonna 
+discostars80 
+discostars80 
+discostars80 
+rock 80 
+disco 80 
+comidy 
+total training 
+army of darkness 
+south park 
+core 
+hilton 
+wwe 
+spiel 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hilton 
+easyart 
+candice night 
+sexy 
+search files, youtube and liveleak 
+bbc 
+michael palin 
+bubble girls 
+aria giovanni 
+film 
+mz.krazy 
+lil rob 
+lil rob 
+unreal 3 
+new age 
+karunesh 
+la terza madre 
+la terza madre ita 
+adobe 
+a come andromeda 
+la terza madre ita 
+kapitein rob 
+ass 
+kapitein rob 
+kapitein rob 
+candice night 
+candice night 
+atonement 
+k3 en de kattenprins 
+timboektoe 
+il capo dei capi 
+fleedwood mac 
+boob 
+tit 
+bones 
+german 
+prison break seconda serie 
+deutsch 
+musik 
+subway 
+bond 
+blue 
+ubuntu 
+paul 
+28 
+heroes 
+heroes 5 
+spiegel 
+tizziano ferro 
+tiziano ferro 
+stardust 
+air sexy boy 
+air sexy boy mp3 
+frank galan 
+dutch 
+mobile 
+pilates 
+pilates 
+fonky family 
+pittige tijden 
+search files, youtube and liveleak 
+anatomy 
+anatomy hunted 
+anatomy s04 
+anatomy s04 e05 
+heroes s01 
+heroes s01 e04 
+die hard 4 
+homo 
+budweiser 
+budweiser toad 
+susteren 
+nero for linux 
+nba 
+niki nova 
+adult 
+bang bus 
+train wreck 
+night of the scarecrow 
+dark night of the scarecrow 
+battle star galactica 
+memory 
+memory 
+audio books 
+search files, youtube and liveleak 
+dvd movies 
+japanese 20 
+monster cock 
+moovies 
+shrek axxo 
+divx for mac 
+divx for mac 
+asia 
+melita 
+lolit 
+lol 
+xxx 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jemeni 
+porn 
+cherokee 
+cherokee 
+cherokee d'ass 
+porn 
+real butts 
+dirty latina 
+latina maid 
+dirty latina 
+pirates 
+balzak 
+lubov 
+need for speed porche 
+tom 
+hilda 
+yuma 
+incest 
+incest 
+incest 
+daa converter 
+daa 
+converter 
+uif 
+plastilina moch 
+plastilina moch 
+plastilina moch 
+kim possible 
+nip tuck ita 
+nip tuck 
+gay 
+hig scool 
+hig scool musical 
+hig scool musical 
+hig scool musical 
+hig scool musical 
+hig scool musical 
+hig 
+high school 
+high school musical 
+emmanuel 
+caniba 
+torture 
+search files, youtube and liveleak 
+dropkick murphy 
+peelers 
+dropkick murphy 
+the mahones 
+the mahones 
+web 
+irish 
+weeds 
+brotherhood 
+brotherhood s02e03 
+the mahones 
+the mahones 
+the mahones 
+the mahones 
+the mahones 
+larkin 
+larkin brigade 
+larkin brigade 
+flogging molly 
+flogging molly 
+floggin molly 
+anal 
+wat zie ik 
+star trek 
+troia 
+tits 
+diktrom 
+irish 
+canibaal 
+peelers 
+ped 
+hentai 
+leopard 
+ratatuille 
+pinkfloid 
+bruce 
+hip pop 
+myrna goossen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+team fortress 
+wat zien ik 
+avs4you 
+avs4you 
+avs4you 
+video clips musica 
+linda de mol 
+silvia millecam 
+avs4you 
+avs4you 
+avs4you 
+avs4you 
+avs4you 
+avs4you 
+katty b. 
+verbaan 
+mecano 
+gorginaverbaan 
+g.verbaan 
+buurvrouw 
+avs4 
+avs4 
+avs4 keygen 
+search all files 
+a chi mi dice 
+blue 
+paul panzer 
+hentai 
+virtual cd 
+nero 8 
+anime 
+winzip 
+virtual 
+nero 
+nero 8 
+country 
+vistaxp theme 
+avatar 10 
+kamasutra 
+.iso 
+all 
+al 
+porn 
+shemale 
+hardcore 
+shemale 
+czdab 
+therbathia 
+bridge to terabithia 
+axxo 
+ultimatum 
+.mp3 
+mp3 
+avi 
+italian 
+galactica 
+bourne 
+bourne italian 
+avi 
+hot 
+mp3 
+italian 
+jericho 
+vista 
+windows 
+juiced 
+pc 
+pc 
+adobe photoshop lightroom 
+grey's 
+adobe photoshop ger 
+ita 
+petter 
+petter p 
+petter album p 
+petter albums 
+petter albums 
+petter albums 
+petter 
+arcade fire 
+movie 
+petter 
+mp3 
+mp3 musik 
+mp3 albums 
+mp3 albums 
+mp3 albums 
+mp3 
+mp3 petter 
+mp3 petter 
+mp3 petter 
+mp3 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+swedish hiphop 
+porn 
+porn 
+chitsujo 
+musik 
+moodyz 
+cocoa 
+tomtom navigator 
+cocoa's room 
+naked mile 
+cocozella 
+small penis 
+small dick 
+sunglasses 
+small penis humiliation 
+tiny dick 
+tiny pecker 
+beirut 
+la terza madre 
+la terza madre 
+penis humiliation 
+nihon 
+cicciolina 
+nihon penis 
+cicciolina 
+nude male contest 
+cicciolina 
+body painting 
+resident evil 
+la terza madre 
+haynes volvo v40 
+brigitta bulgari 
+pillow book 
+vomit 
+jonn holmes 
+jonn holmes 
+jonn holmes 
+un impresa da dio 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+il sette e l'otto 
+stardus 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+moana pozzi 
+rocco sifredi 
+rocco sifreddi 
+rocco sifreddi 
+prison break 
+prison break fr 
+need for speed 
+girls 
+rio de janeiro 
+need for speed 
+need for speed 
+radiohead 
+comptia 
+bdsm 
+navigator 
+trade 2007 
+trade 
+stylexp 
+microsoft office 
+the unit 
+sunshine 
+bourne ultimatum 
+pthc boy 
+pthc boy 
+pthc boy 
+boy pedo 
+unit 
+bourne italian 
+windows vista 
+ocean's 13 
+games 
+ocean's thirteen 
+ocean's 13 
+talk to me 
+talk to me movie 
+american gangster 
+stardust 
+mistress 
+bdsm 
+adobe acrobat 
+iron man 
+kurt 
+wiked game 
+isac game 
+welcome jungle 
+2008 
+movie 
+about you now 
+welcome jungle mp3 
+business 
+http://tracker.zerotracker.com:2710/announce 
+xp pro europe os 
+xp pro europe 
+xp pro europe an 
+european xp-pro 
+european xp-pro 
+european xp-pro 
+european xp-pro 
+timboektoe 
+aquarium 
+charmane 
+charmane 
+aquarium 
+fakebook 
+transformers 
+youtube 
+youtube.com 
+youtube. 
+hindi movies 
+punjabi 
+waris 
+raaz 
+bdsm 
+bdsm 
+hindi 
+bdsm 
+fallen 
+fallen 
+men 
+www.sumotorrents 
+gay 
+www.sumotorrents.com 
+www.sumotorrents.com 
+mitti wajan mardi 
+guys gone wild 
+hkhd 
+hindi movies 
+the craft 
+indian movies 
+x-men 
+bhool bhuliyaa 
+indian 
+tan lines 
+tan lines 
+tan lines movie 
+the seeker the dark is rising 
+punjabi 
+dungeons dragons wrath of the dragon god 
+porn 
+gay porn 
+the seeker the dark is rising 
+tomtom benelux 
+mistress 
+porn 
+cum 
+the craft 
+locoroco 
+tunstall 
+high school musical 
+video converter 
+trojan remover 
+alcohol 120 
+hollandse 
+hollandse 
+hollandse 
+holland 
+jan 
+nederland 
+nederlandse 
+piraat 
+piraat 
+piraat 
+jan keizer 
+jan keizer 
+jan keizer 
+spyware doctor 
+hollands 
+piraten 
+freediving 
+bdsm 
+piraat 
+piraat 
+muziek 
+nieuwe 
+the big 
+brave one 
+atb 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bean 
+bean 
+bean 
+mrs. bean 
+comedy 
+sean cody 
+atc 
+bruce 
+bruce 
+bruce 
+denzal 
+denzal 
+denzal 
+dooh 
+bruce 
+bruce springsteen 
+bruce springsteen 
+bruce springsteen 
+bruce springsteen 
+aha 
+david 
+gung ho 
+bruce springsteen 
+atb 
+bruce springsteen 
+at 
+live 
+gung ho movie 
+atb 
+gung ho 1986 
+gung ho michael keaton 
+music 
+atb 
+you tube 
+you tube 
+you tube 
+you tube 
+you tube 
+potter 
+you tube 
+you tube 
+heroes 
+mr. brooks 
+mr books 
+ps2 
+mr brooks 
+talk to me 
+flyboys 
+naughty office 
+clayton ita 
+osx 
+a 
+osx 
+.mp3 
+bobbe melle 
+bobbe malle 
+piossasco 
+jenna 
+aria giovanni 
+vasco rossi 
+stardust 
+ita 
+bourne 
+bourne ultimatum ita 
+star dust 
+pro street 
+everest 
+crystal 
+team fortress 2 
+team fortress 2 
+rossz pc játékok 
+search files, youtube and liveleak 
+windows media center 
+windows media center 
+http://www.youtube.com/watch?v=wauyjmwpopc 
+windows media center 
+windows media center 
+pdf 
+maya 
+maya tutorial 
+nature 
+lake 
+pc booster 
+nick cave 
+beethoven 
+comic 
+[rus 
+latvia 
+solsuite 
+solsuite 
+solsuite 
+hitman 
+comic xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+britney 
+final cut studio 2 
+hörspiel 
+teen 
+lisa sparxxx 
+dilli dalli 
+nirvana 
+deus 
+mango 
+eminem 
+beowulf 
+fabri fibra 
+ramazzotti 
+sotto mentite spoglie 
+gigi d'alessio 
+joana's guide 
+alessio 
+beyonce 
+sotto mentite spoglie 
+napoletana 
+high school musical 2 
+high school musical 
+beyonce 
+avril lavigne 
+eminem 
+dragon ball 
+patterns 
+singuila 
+sjors 
+antikiller2 rusù 
+antikiller2 rusù 
+antikiller2 rus 
+happy feet 
+handy 
+mobile 
+mobile 
+mobile 
+mobile 
+search files, youtube and liveleak 
+mandi 
+seal 
+enemy territory quake wars 
+10cc 
+heroes 
+search files, youtube and liveleak 
+asian schoolgirls 
+harry potter 
+star wars 
+krieg der sterne 
+debbie 
+youp van het hek 
+inrainbow 
+inrainbows 
+wonderwoman 
+the who 
+win xp 
+supertramp 
+pink floyd 
+varmkorv boggie 
+varmkorv 
+clockwork orange 
+varmkorv 
+jurrasic park 
+porno italiani 
+porno italiani 
+porno italiani 
+porno italiani 
+porno italiani 
+porno 
+tom tom 
+wonderwoman 
+star 
+completo 
+search files, youtube and liveleak 
+eye candy 
+24 season 
+alien skin 
+alien 
+lucky twice 
+cinderella 
+rus 
+latv 
+latv* 
+weeds 
+weeds s03 fr 
+latviski 
+boy 
+boy 
+rfid 
+youtube 
+youtube 
+youtube 
+search files, youtube and liveleak 
+youtube blip.tv 
+stargate atlantis 
+youtube 
+éponge 
+blip.tv 
+youtube 
+youtube 
+bombalurina 
+search files, youtube and liveleak 
+adobe photoshop cs3 
+search files, youtube and liveleak 
+axxo 
+www.blip.tv 
+pj 
+french 
+gigi d'alessio 
+gigi d'alessio 
+trade 2007 
+discografia gigi d'alessio 
+weeds french 
+french 
+fedora 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dmx 
+dmx 
+french 
+french 
+hebrew 
+mom 
+mom 
+mom 
+mom 
+mom son 
+mom boy 
+teen boy 
+bleisch 
+moshav 
+beatles revolver 
+beatles 
+seeed 
+french 
+collection 
+pink floyd 
+jethro tull 
+ska 
+eaul cremona 
+raoul cremona 
+raoul cremona 
+raoul cremona 
+zelig 2007 
+bombalurina 
+russian dvdrip 
+russian dvdrip 
+godzilla 
+godzilla 
+godzilla 
+gta liberty city 
+udelej se 
+media center 
+search files, youtube and liveleak 
+high school musical 
+ita 
+rus 
+search files, youtube and liveleak 
+peep 
+peep 
+media center 
+english 
+gangsters 
+american gangsters 
+stalker 
+the bourne supremacy 
+the bourne identity 3 
+the bourne 3 
+the bourne 3 ita 
+the bourne 3 ita 
+the bourne 3 
+funeral party 
+funeral party 
+funeral party 
+tera copy pro crack 
+tera copy pro 
+adult 
+adult 
+tera copy pro 
+modern talking 
+silent hill dvd 
+ita 
+you tube 
+windows media center 
+shandi 
+windows media center 
+nero 8 
+la terza madre ita 
+film porno 
+film porno 
+film porno 
+justice 
+hard tech 
+hardtech 
+una settimana da dio 
+una settimana da dio 
+un giorno da dio 
+funeral party 
+hardtech 
+hardtech 
+electro 
+una impresa da dio 
+un'impresa da dio 
+una impresa da dio 
+funeral party 
+hot mom 
+hot mom 
+un impresa da dio 
+mother 
+mother 
+mother 
+film hard 
+bridge.to.terabithia[2007]dvdrip[eng]-axxo 
+bridge.to.terabithia[2007]dvdrip[eng]-axxo 
+bridge.to.terabithia[2007]dvdrip[eng]-axxo 
+bridge.to.terabithia[2007]dvdrip[eng]-axxo 
+blood.diamond 
+corel 
+bridge.to.terabithia[2007]dvdrip[eng]-axxo 
+bridge.to.terabithia[2007]dvdrip[eng]-axxo 
+das konklave 
+nl subs 
+nl subs 
+pc game 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+pc game 
+300 
+morther 
+mother 
+mrs 
+vasco rossi 
+aunt 
+renegade 
+ratatuille 
+renegade pc 
+soldier of fortune 
+soldier of fortune pc 
+mom 
+avg anti virus 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+harry potter 
+mother 
+aunt 
+andre hazes 
+son 
+george micheal 
+pau de eeuw 
+paul de eeuw 
+mom son 
+paul de leeuw 
+mom son 
+mom son 
+incest 
+german inzest 
+german 
+divx ita 
+county 
+country 
+cccp 
+csi 
+mom 
+amatoriale 
+car 
+pc game 
+glorious 
+alles is liefde 
+mortyr pc 
+mortyr 
+pc 
+soldier of fortune 
+soldier of fortune pc 
+24 season 4 
+call of duty 
+call of duty pc 
+recepten 
+search files, youtube and liveleak 
+call of duty 3 pc 
+uber soldier 
+leonie van veen 
+nl subs 
+"norton 360" 
+"norton 360" 
+norton 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+star trek 
+das konklave 
+go west 
+go west 
+go west 
+grazie 
+gianna nannini 
+el pecado 
+el pecado 
+esorcista 
+esorcista 
+asia argento 
+zucchero 
+castellano 
+castellano 
+espanol 
+la terza madre 
+piratas 
+la terza madre 
+amor 
+gaucho 
+gaucho 
+folclore 
+tango 
+olipa kerran 
+olipa kerran 
+olipa kerran 
+kerran 
+trade 
+cars 
+suom 
+cars 
+osx 
+crysis 
+amatoriale 
+amatoriale 
+amatoriale ita 
+sesso amatoriale ita 
+rocco selen 
+hard pissing 
+rush 
+magic iso 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lesbian 
+salieri 
+roccaforte 
+salieri 
+salieri 
+porno 
+bitdefender 
+kissing 
+french 
+bioshock 
+mature 
+solo 
+solo 
+lesbians 
+lesbians 
+lesbians 
+nipples 
+milf 
+milflesbian 
+milf lesbian 
+nude 
+surfs up dutch 
+surfs up 
+heroes s01e01 
+osx 
+search files, youtube and liveleak 
+children of bodom 
+children of bodom 
+superbad 
+seinfeld 
+bee 
+ratatu 
+ratatouille 
+license to wed 
+heroes 
+heroes 07 
+war 
+stardust 
+rush hour 
+3:10 
+the assassination 
+simpsons 
+resident 
+transformers 
+vista 
+vista 
+deep purple 
+deep purple 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+vista 
+glamazon 
+hme 
+home 
+glamazon 
+glamazon 
+school 
+home 
+strapon 
+school 
+strapon 
+femdom 
+glamazon 
+femdom 
+lift 
+carry 
+glamazon 
+lift 
+femdom 
+bunny 
+country 
+rar password cracker 
+thinstall 
+cheerleader 
+database 
+cheerleader 
+britny spears 
+britney spears 
+machine fuck 
+forced sex 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jazz 
+smooth 
+level 
+miller 
+wooten 
+lennon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+corel 
+adobe 
+ezgenerator 
+jazz 
+jazz 
+mobile phone tools ita 
+motorola 
+facial 
+cum face 
+cumshot 
+search files, youtube and liveleak 
+harry potter 
+harry potter dutch 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game nld 
+harry potter game 
+harry potter game 
+harry potter nld 
+harry potter nld 
+harry potter nld 
+harry potter nld 
+harry potter nld 
+irene grandi 
+ita 
+alkohol 120 
+eve 
+kim kard 
+pam anderson 
+mase 
+t.i. 
+alcohol 120% 
+architettura 
+casa 
+architettura 
+2 girls 1 cup 
+antivirus 
+microsoft office 
+ok computer 
+utilite 
+utilites 
+programs 
+volksmusik 
+dragon ball z all episodes 
+dragon ball z 
+workgroup 
+microsoft works 
+works 
+igrafx 
+igrafx 
+grafx 
+grafix 
+grafik 
+micrografix 
+micrografx 
+micro 
+porn 
+adult 
+sex 
+ggg 
+sperma 
+sperm 
+schlager 
+bisex 
+bi 
+orgie 
+xxx 
+bisex 
+handynummer 
+adresse 
+privat 
+langeoog 
+puff 
+reeperbahn 
+bios 
+kiez 
+rotlicht 
+pervers 
+kontakt 
+bordell 
+delfi 
+cleener 
+megasex 
+computer cleener 
+massenfick 
+google 
+prono 
+porno 
+mega xxx 
+kungfu 
+daa 
+search all files 
+open files daa 
+open files daa 
+open files .daa 
+open files .daa 
+daa 
+swingersakce 
+groupsex 
+groupsex 
+teen 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+young 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+kungfu 
+wu zu 
+architettura 
+architettura 
+architettura 
+architettura
+matematica 
+film 
+ingegneria 
+legge 109 
+legge 109 
+search files, youtube and liveleak 
+dublado 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+filmes 
+porno 
+photoshop cs3 
+warden 
+ilsa 
+search files, youtube and liveleak 
+japan 
+data warehouse 
+data warehouse 
+apple 
+data warehouse 
+business object 
+calsical 
+calssical 
+calssical 
+x japan 
+x japan 
+x japan 
+x japan 
+jessica alba 
+jessica alba 
+jessica biel 
+vicky 
+vicky 
+hussyfan 
+kingpass 
+kingpass 
+carmen electra 
+shakira 
+alizze 
+ratatouille ita 
+wallpapers 
+bondage 
+roma 
+searcgh 
+eastern promises 
+tribler 
+test 
+tribler 
+i the legend 
+search files, youtube and liveleak 
+legend 
+country 
+dexter 
+convert daa 
+puscifer 
+katie melua 
+liefde 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pro evolution 
+tribler 
+ho voglia di te 
+v is for vagina 
+pino palladino 
+tribler 
+amiga 
+tribler 
+ggg 
+superman 
+tutorial 
+search files, youtube and liveleak 
+italia 
+italia 
+maria bethanea 
+maria bethanea 
+ggg 
+guns in roses 
+ggg 
+ggg 
+ggg 
+maria rita 
+elton jhon 
+tribler 
+katie mailua 
+katie melua 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+siffredi 
+osx 
+x gonna give it to ya dmx 
+tribler 
+x gonna give it to ya dmx 
+x gonna give it to ya dmx 
+xzibit 
+dmx 
+dmx x gonna give 
+dmx 
+lord give me a sign 
+dmx 
+dmx x 
+dmx x gonna 
+tribler 
+dmx x gonna 
+dmx x gonna 
+dmx x gonna 
+dmx x gonna 
+music x 
+dmx 
+music dmx 
+music x gonna 
+music 
+music x gonna 
+music x gonna 
+xzibit 
+xzibit 
+xzibit 
+xzibit 
+stardust 
+we 
+weeds 
+jessi 
+biel 
+alba 
+rar passord cracker 4.12 keygen 
+rar passord cracker 4.12 keygen 
+rar password cracker 4.12 keygen 
+rar password cracker 4.12 
+brian adams 
+rar password cracker keygen 
+rar password cracker keygen 
+rar password cracker keygen 
+rar password cracker keygen 
+rar password cracker 
+teen 
+led zeppelin 
+antonio albanese 
+ceto 
+laqualunque 
+run fat boy run 
+9nv9ydyfodq 
+alba 
+age of empire 
+age of empire iii mac os 
+age of empire iii mac 
+age of empire iii mac 
+age of empire iii 
+age of empire iii dmg 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+harry sacksioni 
+harry sacksioni 
+haydn 
+mozart 
+k3 
+superbad 
+britnay spares album 
+britney spears album 
+3.10 to yuma 
+3.10 to yuma 
+3.10 to yuma 
+miller 
+frank miller 
+ford 
+harrison ford 
+tarantiono 
+tarantino 
+traveler 
+transformers 
+new 
+apocalypse now 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sexy love 
+high school musical 
+christine lavin 
+cctv 
+google tech talks 
+search files, youtube and liveleak 
+beal 
+jessica 
+simply 3d 
+webcam girl 
+bunny 
+bunny 
+bunny 
+glamazon 
+lift 
+lift 
+lift 
+bunny 
+glamazon 
+eminem 
+porn 
+software 
+program 
+programs 
+program 
+software 
+programs 
+program 
+picture it 
+picture 
+bunny glamazon 
+skalls 
+skall 
+naked girls 
+porn 
+naked chicks 
+naked chick 
+naked 
+pussy 
+vagina 
+bressts 
+bresst 
+naked girls 
+naked girl 
+naked 
+amfest 
+bunny 
+lift and carry 
+gang 
+bollicine 
+hostel 
+phenomena 
+glamazon 
+pokerbot 
+dario argento 
+dario argento 
+dario argento 
+ass 
+atlantis 
+atlantis - der versunkene kontinent 
+atlantis - der versunkene kontinent 
+traffic 
+ass traffic 
+bunny glamazon 
+lift and carry 
+combustion 
+cristal 
+pornero 
+love 
+gun 
+gunball 
+dildo 
+lucy lee 
+lucy lee 
+mazinger 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+mazinger español 
+nydahl 
+legend 
+nydahl 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+pedos 
+mixed wrestling 
+pedos 
+pedos 
+pedos 
+pedos 
+beionce 
+beyonce 
+moana 
+pozzi 
+pozzi 
+pedos 
+pedofilia 
+pedofilia 
+pedoporno 
+xxx 
+anal 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cum 
+pedo 
+pedos 
+babes 
+thai 
+thailand 
+thailand 
+china 
+thai 
+thai 
+thai creampie 
+yes 
+thailand 
+thailand 
+thailand 
+thailand 
+thailand 
+thai 16 
+xp ita 
+vasta ita 
+vista ita 
+pedos babes 
+babes 12 
+anal 
+anal cum 
+anal 
+battisti 
+photofont 
+eva enghen 
+eva engen 
+eva enger 
+science 
+fiction 
+japanese 
+español 
+español 
+chinese 
+thai 
+mixed wrestling 
+sumo 
+female suno 
+female sumo 
+female sumo 
+mixed sumo 
+amazon 
+amazon amanda 
+tribler 
+537 cuba 
+amanda 
+usher 
+blithe 
+titanic 
+cuba 
+537 cuba 
+lissi und der wilde kaiser 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+cuba 
+537 cuba 
+amazon 
+galactica 
+usher 
+lion king 
+kim holland 
+amazon lift 
+animal sex 
+goddess 
+amazon woman 
+shoulder lift 
+caryy 
+carry 
+nomadi 
+monica carry 
+monika lift 
+bollywood naked 
+bollywood naked 
+bollywood naked 
+bollywood nude 
+nomadi 
+bollywood nude 
+bollywood nude 
+bollywood nude 
+hollywood nude 
+monica lift 
+hollywood nude 
+venditti 
+lifting 
+venditti 
+heroes.s02 
+heroes 
+venditti 
+ponyplay 
+gruselkabinett 
+freischütz 
+ponyplay 
+schwarze auge 
+dsa 
+dsa 
+auge 
+sergj 
+wächter 
+dsa 
+hustler 
+playboy 
+hustler 
+powerpoint 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+larry brent 
+www.youtube.com 
+dämonenkiller 
+textur 
+search files, youtube and liveleak 
+porn 
+rickshaw 
+http://youtube.com/watch?v=pcmbmjprme0 
+human rickshaw 
+naruto 
+porn 
+kim holland 
+sylvia saint 
+pony play 
+rihanna 
+vista 
+ponygirl 
+search all files 
+search all files 
+convert daa 
+convert daa 
+ponyboy 
+the joker live 
+black horse live 
+american gnangster 
+american gangster 
+plan 9 from outerspace 
+lil wayne 
+pearl harbor 
+pearl harbor - dvd 
+pearl harbor - dvd 
+usher 
+fast and furious 
+[xxx]russian_student_orgy_full_movie-smkd.wmv 
+[xxx]russian_student 
+[xxx]russian_student 
+russian student 
+gina 
+tits 
+teeny 
+españolwilliam pitt 
+william pitt 
+h6 
+hazes 
+samen 
+surf up 
+keltisch 
+hintergrund 
+dutch 
+horror 
+neighbour 
+avira antivirus security premium 
+avira antivirus security premium 
+myth 
+avira antivirus security premium 
+rambo 
+myth 
+ransie 
+five 
+strega 
+sims 2 
+sims 2 castaway 
+alkohol 120 
+alkohol 
+alcohol 
+video converter to psp 
+osborne 
+martin gore 
+martin gore 
+barletta 
+bologna 
+short history of time 
+brief history of time 
+invasion 
+house s03 
+house s03e01 
+ponygirl 
+ponygboy 
+ponyboy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+elo.digital.office 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ponygirl 
+bunny glamazon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tribler 
+big 
+amfest02_chunk_1 
+sumo 
+woman sumo 
+sumo girl 
+osx 
+lady sumo 
+mixed sumo 
+femdom 
+search files, youtube and liveleak 
+xxx 
+xuxa 
+porno 
+boy 
+dragonball gay 
+gay rape 
+porno 
+porno 
+sesso 
+pompino 
+the bourne ultimatum 
+64 bits 
+nl subs 
+daa 
+uif 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+poweriso 
+heroes 
+heroes e07 
+bizut 
+性 
+奸 
+强奸 
+强奸 
+强奸 
+强奸 
+强奸 
+强奸 
+sex 
+simpsons 
+simpsons 
+simpsons 
+simpsons 
+simpsons 
+echo and the bunnymen 
+echo and the bunnymen 
+echo and the bunnymen 
+om shanti om 
+cosmica 
+the meteors 
+more greatest hits 
+bollywood 
+bollywood 
+bollywood nude 
+acdc 
+coral draw 
+corel draw 
+loins of punjab movie 
+rape 
+girls 
+girls getting raped 
+skidrow 
+skid row 
+skid row thickskin 
+skid row thick skin 
+mission 
+imposseble 
+impossible 
+300 
+capoeira angola 
+dragonball budokai 
+ps" dragonball budokai 
+ps2 dragonball budokai 
+sevp 
+scm bradd 
+straigtcollegemen bradd 
+straigtcollegemen 
+straightcollegemen 
+straight college men 
+college men 
+hot men 
+gay 
+sybian 
+ratatuille 
+dutch 
+paint shop 
+andré 
+hazes 
+illustrator 
+new york 
+lunikoff 
+enkelz 
+onkelz 
+nordfront 
+onkelz 
+lunikoff 
+landser 
+kategorie 
+vollkontact 
+vollkontact 
+vollkontact 
+vivere o morire 
+kc 
+rape 
+jogos mortais 
+pateta 
+f8-rc3-live kde 
+f8 
+f8 kde 
+the simpsons 
+westlife 
+westlife 
+draw & paint 
+ms paint 
+lightwave 3d 
+station pro 
+imagine 
+dream 3d 
+dreams 3d 
+true space 
+type styler 
+amorphium 
+darw & paint 
+draw & paint 
+3blow 
+3blow 
+search files, youtube and liveleak 
+ken 
+guerriero 
+spiderman 
+aldo giovanni giacomo 
+ita ebook 
+ita e-book 
+ita e-book 
+ita e-book 
+ita ebook 
+ita 
+selen 
+mp3 
+light wave 
+xvid italian 
+la terza madre 
+italian 
+search files, youtube and liveleak 
+laura pausini 
+conquistando o impossivel motivação pessoal 
+conquistando o impossivel motivação pessoal 
+conquistando o impossivel motivação pessoal 
+conquistando o impossivel motivação pessoal 
+motivacional 
+motivacional 
+videos 
+rocco 
+conquistando o impossivel 
+conquistando o impossivel 
+conquistando o impossivel 
+video motivação 
+video motivação 
+access 
+accessfatture 
+fatture 
+fattura 
+corso access 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+video motivação 
+autocad 
+the sims 
+rocco 
+the sim 
+gif 
+non siamo soli 
+winrar 
+non siamo soli 
+zecchino d'oro 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+rocco 
+rocco 
+rocco 
+rocco 
+albano 
+games 
+games iso 
+nirvana 
+vasco 
+need for speed carbon iso 
+sicko 
+xvid italian 2007 
+the gost 
+the gost 
+the gost 
+la terza madre 2007 silent 
+la terza madre 2007 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+la terza madre 
+search files, youtube and liveleak 
+hape kerkeling 
+hape kerkeling 
+hape kerkeling 
+hape kerkeling 
+hape kerkeling 
+xvid italin 2007 
+.mp4 
+.mpg4 
+.mpg4 
+movie lat 
+slut 
+britney 
+britney 
+britney 
+italian 
+horror italian 
+horror 
+italian 
+saw 4 
+saw 4 
+misoux 
+fuck 
+access 
+encarta 2008 
+encarta 2008 
+encarta 2008 ita 
+xvid italian 
+3d max 
+xvid italian 
+galactica 
+enrico ruggeri 
+enrico ruggeri 
+genesis 
+enrico ruggeri 
+mac 
+sony ericsson w300i 
+sony ericsson w300 
+w300 
+corel 
+jennifer lopez 
+teen 
+king 
+singers 
+gregoriaans 
+ouverture 
+galactica 
+playboy 
+bhool bhulaiya 2 
+bridget 
+antonacci 
+bridget maasland 
+katja 
+dvd 
+pamela 
+dragonball 
+dvd ita dragonball 
+ita dragonball 
+dragonball 
+amatoriale 
+aria fist 
+aria 
+boys 
+red 
+girls 
+boys 
+xvid la terza madre 
+bondage 
+bondage 
+ramazzotti 
+film 
+aladin 
+i7 e l8 
+sex 
+preteen 
+porno 
+sidnee 
+sidnee 
+pausini 
+adriano celentano 
+adriano celentano 
+adriano celentano 
+marco masini 
+porno 
+girlfriend 
+girlfriend 
+girlfriend 
+girlfriend 
+girlfriend 
+superman 
+foletti 
+foletti 
+pinocchio 
+renegade 
+celentano 
+amateur 
+tracy lords 
+porno 
+beo 
+wife 
+celine dion 
+beowulf 
+prison break fr 
+ableton live 
+crazytalk 
+mature 
+crossdressing 
+spank 
+spank 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adult 
+spanking 
+a walk to remember 
+a walk to remember/ movie download 
+a walk to remember/ movie 
+a walk to remember 
+amazon blithe 
+blithe 
+blithe 
+amazon girl 
+amazon women 
+amazon 
+prison brake 
+prison brake 
+prison brakee 
+prison brake 
+prison brake 
+prison brake 
+prison break 
+eros ramazzotti 
+eros ramazzotti etazzenda 
+tazzenda 
+eros ramazzoti 
+eros ramazzotti 
+tazzenda 
+tazenda 
+eros 2007 
+italian 2007 
+justin timberlake 
+justin timberlake mp3 
+tribler 
+tribler 
+domo mia 
+test 
+elephant 
+elephant 
+ubuntu 
+test 
+splinter[2006]dvdrip 
+splinter*dvdrip 
+splinter 
+amazon susan 
+hairspray 
+justin timberlake mp3 
+justin timberlake mp3 
+justin timberlake mp3 
+justin timberlake 
+apogolize 
+apogolize 
+reno.911 
+reno911 
+reno 911 
+reno 911 
+goddess 
+shemale 
+nude 
+saw 
+naked 
+borati 
+borrati 
+borroti 
+borroti 
+boroti 
+boroti 
+mixed wrestling 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+goddess 
+mixed wrestling 
+corel draw 
+the last legion 
+charmed 
+heroes 
+charmed 
+charmed 
+uitzending gemist 
+uitzending gemist 
+crissy moran 
+crissy moran 
+crissy moran 
+crissy moran 
+maximum 
+peter gabriel 
+search files, youtube and liveleak 
+stargate atlantis 
+acronis true image 
+bourne 
+autodesk 
+acronis true image 
+ets 
+troy pescara 
+troy pescarese 
+will miami 
+yes 
+lost season 3 
+lost season 4 
+lost season 3 
+babysitter 
+symbian 
+chuck 
+chuck so1eo6 
+chuck so1e06 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+vulcao etna 
+vulcao etna 
+etna 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+http://www.bbc.co.uk/portuguese/avconsole/player/vid100.shtml?lang=pt-br&clippos=6.093999999999999&clipurl=http://www.bbc.co.uk/portuguese/meta/dps/2007/04/nb/070405_volcanoreunion_console_4x3_nb!asx&title=rios%20de%20lava&wintype=200&rhs=http://www.bbc.co.uk/portuguese/news/avfile/2007/04/nb_wm_070405_volcanoreunion_console.shtml&cs=news&fsname=nb_wm_fs&bw=nb 
+super 
+mobile 7 
+taxi 4 
+720p 
+creampie 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+google video 
+life 
+stargate atlantis 
+life 
+dragon ball 
+swimming 
+pirates 
+starcraft brood war 
+young 
+transformers 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+djay 
+girls gone wild 
+accepted 
+hape kerkeling 
+mixed wrestling 
+search files, youtube and liveleak 
+concept draw 
+grappling 
+grapplinggirls 
+amazon 
+linux 
+antivirus 
+no tears anymore 
+cascada 
+lösch mir die möse mit sperma 
+lösch mir die möse mit sperma 
+norton 
+chikan 
+chinese 
+asian 
+hongkong 
+hong kong 
+eboney 
+ebony 
+fr 
+french 
+sesso 
+french 
+tribler 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+* 
+gay 
+italian 
+doors 
+doors 
+rolf zuckofski 
+rolf zuckowski 
+rolf zuckowski 
+den-o 
+eri clepton 
+clepton 
+nomadi 
+french 
+prison break 
+prison break 
+prison break 
+gangbang 
+slave 
+slavegirl 
+fetish 
+tribler 
+tribler 
+tribler 
+delft 
+delft 
+peer 
+peer 
+ratatouille pl 
+ps2 
+fedora 8 
+tribler 
+snl 
+buonanotte italia 
+buonanotte italia 
+sex 
+madonna 
+madonna 
+chris brown 
+blowjob 
+shakatak live 
+shakatak live 
+toast 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+alles is liefde 
+cnn 
+bdsm 
+window vista ultimate 
+window vista ultimate italiano 
+window vista ultimate italiano 
+window vista ultimate italiano 
+window vista ultimate 
+window vista ultimate crack 
+window vista ultimate ita crack 
+house 
+house. 
+mp3 
+window vista italiano 
+window vista ultimate italiano 
+window vista ultimate ita 
+burqa 
+les experts pc 
+les experts pc game 
+csi pc game 
+paki 
+heroes season 2 
+paki 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+madame fate 
+prisonbrake 
+prison brake 
+prison brake 
+prison brake 
+prison brake 
+prison brake 
+prison break 
+spiderman 
+play private eye - greatest unsolved mysteries 
+play private eye 
+play private eye 
+private eye 
+search files, youtube and liveleak 
+shalwar 
+python 
+python 
+avatar 
+hijab 
+dutch 
+bumba 
+vale tudo 
+veil 
+free fight 
+bellydance 
+settlers 
+turkish bellydance 
+didem 
+new 
+paki 
+avatar 
+free fight 
+free fight 
+free fight 
+sturmgeist 
+sturmgeist89 
+latino muscle men 
+. 
+games 
+sato 
+ulead pro 
+ulead 
+http://cgi.omroep.nl/cgi-bin/streams?/id/vara/rest/2007/vara_101158543/bb.20071030.asf 
+bob 
+ita 
+heroes 
+valentijn 
+pquadro 
+la terza 
+sexy back 
+58 minuti per morire 
+58 minuti per morire 
+30 days 
+helsing 
+helsinjk 
+helsink 
+helsing 
+van helsing 
+van helsing 
+the unit 
+hoeren 
+galactica 
+enterprice 
+enterprise 
+osx86 
+nine inch nails 
+dresden dolls 
+dresden dolls 
+placebo meds 
+placebo 
+placebo 
+dresden dolls 
+backstabber 
+head like a hole 
+head like a hole 
+head like a hole 
+head like a hole 
+head like a hole 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+kmfdm 
+the cure 
+if nly tnight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+if only tonight we could sleep 
+nine inch nails 
+nine inch nails 
+nine inch nails 
+nine inch nails 
+lullaby 
+etabs 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+la tigre e la neve 
+etabs 
+sap2000 
+suspiria 
+inferno 
+la tigre e la neve 
+ratatuille ita 
+dresden dolls 
+spiderman 3 ita 
+dresden dolls 
+spiderman 3 ita 
+dune 
+harry potter 
+harry potter e l'ordine 
+dresden dolls 
+the smashing pumpkins 
+the smashing pumpkins 
+the smashing pumpkins 
+the smashing pumpkins 
+adriano celentano 
+godlike 
+godlike 
+godlike 
+godlike 
+godlike 
+godlike 
+godlike 
+kmfdm 
+paz 
+paz! 
+paz 
+la guerra dei anto 
+la guerra dei anto 
+je t'aime paris 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+paris je t'aime 
+roberto benigni 
+roberto benigni 
+roberto benigni 
+roberto benigni 
+roberto benigni 
+hentai 
+hentai 
+hentai 
+take that 
+lesbians 
+older 
+milf 
+mothers id like 
+mothers 
+oral 
+cunnilinguis 
+cunnilingus 
+devil prada 
+take that 
+nuffnang 
+japan 
+van morrison 
+hellgate 
+solo 
+fusca 
+masturbation 
+kissing 
+kissing 
+kissing 
+tongue 
+tongue 
+tongue 
+feet 
+legs 
+feet 
+amateur 
+vmware 
+matuer 
+matuer 
+matuer 
+mature 
+milfs 
+sex 
+nipples 
+tits 
+xxx 
+sex 
+rubbing 
+fucking 
+over 40 
+women over 40 
+sex over 40 
+saphos 
+sapphos 
+sappho 
+kissing 
+dmx 
+porn 
+lesbians fisting 
+diehard 
+diehard 
+sex 
+starrcraft 2 
+starcraft 2 
+starcraft 
+smallville.s07e07 
+dog fuck 
+autocad 2007 
+mattafix big cuty live 
+mattafix big city live 
+autocad 2007 
+x gonna give it 
+x gonna give it to ya 
+search files, youtube and liveleak 
+shrek 
+ps2 shrek 
+mattafix big city life 
+magazine 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+oxygen 
+heroes season 1 
+search files, youtube and liveleak 
+* 
+auteurs 
+home movies gril 
+home movies grill 
+klysma 
+enema 
+natale acasa dj 
+heroes 
+heroes 6 
+natale a casa deejay.avi 
+la terza madre 
+film 2007 
+la terza madre 
+la terza madre 
+asian sluts 
+asian sluts 
+asian sluts 
+little drummer 
+little drummer 
+drummer 
+ll cool j 
+ll cool j 
+ll cool j 
+dr dre 
+dvd 
+lightroom 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ska collection 
+seeed 
+obrint 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+marley 
+negra 
+mano 
+manu 
+chao 
+dancehall 
+natale a casa deejay.avi 
+snow patrol 
+sawariya 
+search files, youtube and liveleak 
+ricky gervais 
+2010 
+comic 
+kiraro 
+kitaro 
+salsa 
+salsa 
+hoogvliet 
+focus 
+wende snijders 
+diskeeper 
+la terza madre 
+stealth 
+timers 
+ãżã¤ã\83\9eãĵãÅ\9f 
+timers bootleg 
+timers boot 
+gay 
+timers 
+brazzers 
+druksexorgy 
+partyhardcore 
+audio recording 
+recording audio 
+camstudio 
+solution 
+camel 
+aika 
+timers 
+emerson lake and palmer 
+girls out west 
+girls out west 
+pregnant 
+nipple 
+piss 
+sekt 
+girls out westishotmyself 
+ishotmyself 
+ishotmyself 
+profondo rosso 
+csi 
+profondo rosso 
+csi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+brutal rape 
+temi vista 
+eros 
+biagioantonacci 
+biagio antonacci 
+temi xp 
+xp 
+themes xp vista mac linux 
+themes xp vista 
+themes for xp vista 
+themes for xp 
+java for sony ericsson w300i 
+java for sony ericsson w300i 
+java 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rick wakeman 
+brian wilson 
+thm 
+.thm 
+themes.thm 
+miquel bosè 
+miquel bosè 
+papito 
+sognami 
+brian wilson 
+brian wilson 
+compilation 
+brian wilson 
+brian wilson 
+brian wilson 
+brian wilson 
+compilation love 
+moto gp french 
+richard hawley 
+french 
+2006 
+2005 
+2003 
+2004 
+van kooten en de bie 
+2002 
+2001 
+tiromancino 
+richard hawley 
+love 
+only you 
+traci lords 
+heroes 
+papito 
+sailor moon 
+moana 
+windows vista 
+sailor moon il film 
+xxx 
+teens 
+tomtom 
+cinthia mulherao 
+dvd 
+dvd 
+ciblythe 
+blythe 
+blythe 
+amazon blythe 
+kayak 
+amazon astrid 
+amazon astrid 
+roger waters 
+spice girls 
+astrid 
+svejk 
+dailly motion 
+tango 
+koremans 
+xxx 
+gandalf 
+cum 
+amazon taanna's 
+lucinda 
+goddess severa 
+cum 
+hardcore 
+oral 
+a tribute to r d burman 
+severa 
+mallu 
+adult 
+mario salieri 
+ita divx 
+tango argentino 
+dracula 
+pics xxx 
+xxx 
+piggyback 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode 
+depeche mode# 
+supertramp 
+daily 
+blythe 
+adope 
+cnn 
+xxx 
+film xxx 
+film xxx 
+matrix 
+matrix ita 
+search files, youtube and liveleak 
+beatles 
+adult 
+amateurs 
+divx 
+divx ita 2007 
+divx ita 2007 
+divx ita 
+layo bushwacka 
+divx ita 
+divx ita 
+divx ita 
+divx ita 2007 
+nora jones 
+the invisible ita 
+the invisible 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible ita 
+the invisible 
+dvdrip ita 
+dvdrip ita 2007 
+shemales 
+bareback 
+anal 
+cnn 
+don omar 
+don omar 
+ita 
+don omar 
+don omar 
+don omar 
+multilanguage 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+don omar 
+james last 
+james last 
+james last 
+james last 
+spiderman ned. 
+spiderman 
+tom tom go 
+don omar 
+divx ita 
+danny tenaglia 
+m2o 
+m2o 
+mp3 
+biagio antonacci 
+antonacci 
+antonacci 
+antonacci 
+goku 
+naruto 
+divx 
+divx ita 
+spaanse les arjan 
+mp3 
+nikki tyler 
+vivid 
+wicked 
+don omar 
+nino d'angelo 
+nino d'angelo 
+nino d'angelo 
+nino d'angelo 
+nino d'angelo 
+hoerbuch 
+audiobook 
+film 
+film azione 
+film 
+film xxx 
+film xxx 
+film xxx 
+film hostel part 2 
+film azione 
+alessandro siani 
+latina 
+latina 
+mortal kombat 
+teen pussy 
+teen pussy 
+triple x 
+jethro 
+shemale 
+paki 
+private 
+boobs 
+breast 
+student 
+naked 
+naked 
+gay 
+penis 
+kali 
+tony hawk 
+princess kali 
+mistress 
+groped 
+oakley hall 
+molested 
+ride 
+wushu 
+shoulder carry 
+downblouse 
+shoulder lift 
+david török 
+search files, youtube and liveleak 
+seks 
+atomic drop 
+body slam 
+bach 
+scoop slam 
+bach 
+fireman's carry 
+markus heilmann 
+mafia 
+jordan 
+fireman's carry 
+funny movies 
+de vogeltjes dans 
+funny movies 
+slam 
+blade 
+bokito 
+bokito 
+bokito 
+bokito 
+bokito 
+bokito 
+funny movies 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+paki 
+search files, youtube and liveleak 
+ass rape 
+ass rape 
+livorno 
+blondes anal 
+blondes 
+patrizio 
+selen 
+milli d'abbraccio 
+jessica gaile 
+jessica gaile 
+alessia mell 
+manuel 
+samantha del rio 
+ita 
+disco inferno 
+50 cent 
+rush hour 3 
+search files, youtube andaika liveleak 
+aika 
+rosetta stone 
+rosetta stone 
+smallville 
+pantyhoseline 
+pantyhose 
+tiny 
+pantyhosetales 
+pantyhosescreen 
+pantyhose screen 
+panties 
+blow 
+search files, youtube and liveleak 
+anal 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tiffany towers 
+maxi mounts 
+maxi mounds 
+souza 
+susan de nero 
+linzie mckenzie 
+linzie dawn 
+linzie dawn nckenzie 
+linzie dawn nckenzie 
+rape 
+fucking 
+fucking young girl 
+star wars 
+axis&allies 
+axis allies 
+tropico 
+sim earth 
+shogun 
+the last dragon 
+plaisir de nuire 
+tous au paradis 
+search files, youtube and liveleak 
+ita 
+ita 
+ita 
+chelsey charms 
+chelsey charms 
+weihnacht 
+watch us fuck 
+watch us fuck 
+watch us fuck 
+watch us fuck 
+watch us fuck 
+age of empires 3 
+age of empires 3 
+age of empires 3 
+autocat 
+autocad 
+autocad ger 
+autocad 
+tune up utilities 
+ita 
+ita 
+ita 
+ita 
+http://wiki.ubuntuusers.de/downloads/gutsy_gibbon/statisch?action=attachfile&do=get&target=ubuntu-7.10-dvd-i386.iso.torrent 
+http://wiki.ubuntuusers.de/downloads/gutsy_gibbon/statisch?action=attachfile&do=get&target=ubuntu-7.10-dvd-i386.iso.torrent 
+http://wiki.ubuntuusers.de/downloads/gutsy_gibbon/statisch?action=attachfile&do=get&target=ubuntu-7.10-dvd-i386.iso.torrent 
+fedora 
+casino royale 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+quuen - somebody to love 
+queen - somebody to love 
+queen - somebody to love 
+aika 
+yhe shield 
+pc.football.manager.2007.italian.torrent 
+abbyy 
+charms 
+chrystal star 
+busty dusty 
+busty dusty 
+lazio 
+ronaldinho.a.day.in.the.life.of 
+ronaldinho 
+pompom 
+hohoho 
+sajdik 
+halasz 
+monty python 
+farkashazy 
+xxx 
+spybot 
+spy bot 
+spy bot 
+playboy 
+south park 
+inconveniant truth 
+inconvenient truth 
+premiere pro cs3 
+dvd*avi 
+dvdavi 
+ramones 
+calinforication dvd 
+calinfornication dvd 
+calinfornication 
+calinfornication 
+calinfornication 
+calinfornication 
+calinfornication 
+calinforn 
+calin 
+cali 
+californication 
+softwares 
+google earthe 
+google earth 
+google earth pro 
+hk tv 
+moves 
+movus 
+giann dany 
+giann deny 
+giann deny 
+giann deny 
+giann deny 
+giann deny 
+giann deny 
+giann deny 
+gianni deny 
+gianni dany 
+gianni dany 
+gianni dany 
+sawariya 
+search files, youtube and liveleak 
+led zeppelin 
+harry potter und * 
+harry potter und der orden des phönix 
+harry potter und der orden des phönix 
+harry potter und der orden des phönix 
+search files, youtube and liveleak 
+jenna 
+harry potter und der orden des phönix 
+search files, youtube and liveleak 
+games 
+harry potter und der orden des phönix 
+die hard 4 
+cumshot 
+facial 
+die hard 4 
+amatoriale 
+underworl 
+vista 
+amatoriale 
+vanessa incontrada 
+resident evil 
+adobe 
+resident evil apocalipse 
+resident evil apocalisse 
+search files, youtube and liveleak 
+mature 
+clone dvd 
+anti virus 
+back stage 
+back stage 
+paris hilton 
+300 
+prision breack 
+capon 
+nipple 
+omnipcx 
+resident evil 
+pc games need for speed pro street 
+ratatui 
+13 fantasmas 
+die hard 4 
+neox 
+spiderman 
+google 
+bbc 
+spiderman 
+spiderman 
+netfront 
+school of rock 
+pocoyo 
+el sindrome de ulises 
+pocke pc 
+eva dahlgren 
+bratz 
+eva dahlgren 
+eva dahlgren 
+hombre cubo 
+winx 
+search files, youtube and liveleak 
+jovanotti 
+maroon 5 
+tomita 
+deva ghan 
+dave gahan 
+avatar 
+hourglass 
+nomadi 
+bisex 
+cruyf 
+schiedam 
+search files, youtube and liveleak 
+windows 
+office 2007 
+24 
+visual basic 2007 
+visual basic 
+opera 
+blade 
+beverwaard 
+nero 
+blender 
+ulises 
+300 
+avril lavigne 
+wii 
+kaspersky 
+rolling stones 
+flood 
+the simpsons the movie 
+fedora 7 
+led zeppelin 
+depeche mode 
+scarface the film 
+wordpress revolution 
+snike squadron 
+smoke squadron 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asia sex 
+dvd ita 
+funny clip 
+funny clip 
+funny clip 
+funny clip 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+grey's 
+eros ramazzotti 
+renato zero 
+jameson 
+mom 
+game 
+pc game 
+beowulf 
+pc shooter game 
+pc fps game 
+pc 
+ken follett mondo senza fine 
+ken follett 
+follett 
+search files, youtube and liveleak 
+battlefield tour: 
+timbaland 
+modern talking 
+moddern talking 
+talking 
+modern 
+need for speed 
+dog 
+vanoza 
+gay fuck 
+kerez 
+gabriella 
+bella 
+rocco 
+adult gay 
+pyramid 
+ever after 
+visual basic 
+mac leopard 
+ik herb schijt 
+ik heb schijt 
+ik heb schijt 
+ik heb schijt 
+ik heb schijt 
+ac 97 
+ac 97 
+ac 97 
+ac 97 
+realtek 
+realtek 
+realtek 
+ac'97 
+ac'97 
+gta 
+fifa 
+pes 2008 
+mappe tt 
+mappe tomtom 
+i vichinghi 
+film ita 
+avatar 
+respectable 
+premiere 
+linkin park 
+fort minor 
+power dvd 
+vomit 
+diamond 
+terminetor 
+mina 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adriano celentano 
+dolly 
+atb 
+video 
+rihanna 
+acronis 
+doom 3 
+jlo 
+tmf 
+pussycat 
+call of duty 
+call of duty ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+call of duty multilanguage 
+battlefield 
+madonna 
+call of duty 
+rugby 
+madden 
+madden 
+rugby 
+ray charles 
+summertime 
+summertime 
+summertime 
+ray charles 
+die ard vivere o morire 
+crack fifa 08 
+shemal 
+amici miei 
+amici miei 
+amici miei 
+amici miei 
+die hard 
+ligabue 
+dragon wars 
+cfnm 
+film 
+enchanted 
+ramsay 
+iron chef 
+kitchen 
+motovation 
+bee movie 
+porno 
+cellulare 
+sexy 
+funny 
+american ninja 
+demon tools 
+donne di mafia 
+tosca d'aquino 
+mafia 
+osx86 
+nin 
+nine inch nails 
+nine inch nails 
+wireless 
+bdsm german attac 
+vicente celestino 
+porta aberta 
+enquanto o sol brilhar 
+enquanto o sol brilhar 
+enquanto o sol brilhar 
+facinação 
+facinação 
+facinação 
+facinação 
+champagne 
+champagne 
+champagne 
+carruagem de fogo 
+groove armada goodbye country 
+groove armada 
+carruagem de fogo 
+carruagem de fogo 
+ave maria 
+ave maria 
+star wars 
+air kung fu 
+jay brannan 
+sybian 
+tied 
+gyno 
+gyno 
+yikes 
+dormroom 
+vibrator 
+die hard 
+gay 
+stargate 
+17 and 
+matrix 
+doctor who season 3 
+matrix 1080p 
+doctor who season 3 dvd 
+×Â\9eש×Â\94 ×¤×¨×¥ 
+mumbai salsa 
+gloryhole 
+glory hole 
+criminal minds 
+whore 
+bdsm 
+bdsm 
+bdsm 
+forced 
+andromeda ita 
+wende 
+davilex 
+davilex software 
+davilex software 
+software 
+fall out boy 
+test 
+l'ultima legione 
+legion 
+nero 
+resident evil 
+scoter 
+scooter 
+criminal minds 3 
+drupal 
+witchblade 
+hentai 
+[bakakozou_-_conclave]witchblade 
+[bakakozou_-_conclave]witchblade 
+foxy lady 
+search files, youtube and liveleak 
+source 
+rockabilly 
+hot coffee 
+amazing grace 
+anal 
+anal 
+pixar 
+kurumi 
+haendel concerti grossi 
+vivaldi 
+zimmer 483 
+pasolini 
+zimmer 483 
+calcio 
+calcio 
+calcio 
+mare 
+relient k 
+dildo 
+taylor rain 
+porpora 
+dr housedr house md 
+dr house md 
+spiderman 2 
+spiderman 2 ita 
+kurumi 
+house 
+spiderman 2 
+search files, youtube and liveleak 
+supertramp 
+my sims 
+mikayla 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+winx 
+mikayla latin 
+mikayla 
+lichelle marie 
+my sims.jar 
+codec 
+doraemon 
+dame chocolate 
+juanes, me enamoras 
+mumbai salsa 
+peillo 
+enrique 
+enrique naranjo 
+angela 
+hernandez 
+hernandez enrique 
+brazt 
+bratz 
+search files, youtube and liveleak 
+la terza madre 
+barbie la peliculas 
+barbie la pelicula la reina de los animaless 
+barbie la pelicula la reina de los animales 
+barbie la pelicula la reina de los animales 
+barbie la pelicula la reina 
+annie cruz 
+lily thai 
+a letto con le quarantenni 
+jonn holmes 
+milly d'abraccio 
+bocche di comesse 
+selen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+asus 
+asus eee 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+zooskool 
+beast 
+dog 
+pig 
+snake 
+machined 
+wired 
+wired 
+machined 
+bondage 
+nino d'angelo 
+witchblade 1-24 eng softsub mkv complete 
+witchblade witchblade 1-24 eng softsub mkv complete 
+witchblade 1-24 eng softsub mkv complete 
+witchblade 1-24 eng softsub 
+eng softsub 
+telugu 
+telugu youtube 
+musoc 
+music 
+music 
+music gryan adams 
+music bryan adams 
+anime 
+timberland 
+dove sei gigi d'alessio 
+timberland 
+sternwander germa 
+sternwander german 
+gigi d'alessio 
+prison break 08 
+anal 
+hot mom 
+rafaello 
+friends 
+miragio 
+d'angelo 
+nino d'angelo 
+monros 
+monros 
+nino d'angelo 
+nino d'angelo 
+happy movie 
+telugu movie 
+nino d'angelo 
+avatar 
+settlers 
+nlsub 
+nl sub 
+finizio 
+planet terror 
+finiziojessica massaro 
+jessica massaro 
+jessica massaro 
+jessica 
+rosario miragio 
+rosario miragio 
+nino d'angelo 
+rocco sinfredi 
+rocco sinfredi 
+acdc 
+psv live 
+rocco sinfredi 
+dj ötzi 
+mika 
+elektor 
+dj ötzi 
+dj ötzi 
+foxy 
+mika 
+search files, youtube and liveleak 
+video 
+search files, youtube and liveleak 
+filme 
+anastacia 
+video 
+filme pt-br 
+musica brasileira 
+peillo 
+alessio 
+video porno 
+alessio 
+video porno 
+pocoyo 
+video 
+video 
+pupo 
+pupo 
+pupo 
+p2p 
+search files, youtube and liveleak 
+nino d'angelo 
+transformers german 
+hombre cubo y pocoyo 
+hombre cubo y el perro 
+dp 
+hombre cubo 
+hombre cubo 
+sex 
+carton 
+carton 
+nino d'angelo 
+hombre cubo episodio2 
+girsll 
+girsll 
+winx 
+aquatica 3d 
+3d fish tank 
+pompino 
+pompino 
+pompino 
+pompini 
+pompini 
+pompini 
+pompini 
+milan campione 
+marachesh express 
+endless 
+asia argento 
+dj orzt 
+dj otzi 
+dj otzi 
+dj otzi 
+rihanna 
+bigfish 
+bigfish 
+bigfish crack 
+cicciolina 
+bigfish patch 
+gemball 
+aquaball 
+giorgia 
+puzzle jisaw 
+reflexive 
+reflexive 
+mika 
+stand up for the champian 
+standup 
+orlowski 
+stand up for 
+kuschelrock 
+stand up for the champions 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rule the world - radio edit 
+rule the world 
+burqa 
+rule the world 
+take that rule the world 
+nino d'angelo 
+nino d'angelo 
+powerpoint 
+publisher 
+publisher 
+iran 
+nieuwsbrief 
+sex 
+history 
+statistiek 
+statistica 
+mathematica 
+statistiek 
+veil 
+dokkum 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+publisher 
+statistica 
+statistica.exe 
+statistica.zip 
+statistica 
+spss 
+posthumus 
+spss 
+titanic 
+publisher 
+mario merola 
+publisher 2007 
+portal pc 
+gigi d'agostino 
+american gangster 
+lift veil 
+lifting veil 
+irai 
+irani 
+30 days of night nl 
+saw 4 nl 
+anchorman 
+andre hazes 
+andre hazes 
+hijab 
+fantascienza 
+waitress 
+giant 
+giantess 
+search files, youtube and liveleak 
+foxy lady 
+justin timberlake 
+justin timberlake 
+justin timberlake 
+premiere pro cs3 
+teacher 
+premiere pro cs3 
+premiere pro cs3 
+beyoncé - b'day 
+titanic 
+titanic 
+beyoncé b'day 
+beyonce day 
+night 
+noodweer 
+noodweer 
+nino d'angelo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+blonde 
+search files, youtube and liveleak 
+stardust 
+nature 
+cars 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+hotmom 
+hot mom 
+private 
+hilarius 
+search files, youtube and liveleak 
+harry potter 
+los nocheros 
+pavaroti 
+pavaroti 
+pavaroti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+franz ferdinand 
+franz ferdinand 
+franz ferdinand 
+franz ferdinand 
+zetsubou 
+ã°ãÄ´ã³ãÄ°ãŹã³ 
+ã°ãÄ´ã³ãÄ°ãŹã³ 
+gurren 
+911 
+911 terror 
+pak fa 
+digimon the movie 
+digimon 
+elizabeth 
+vincenzo ampolo 
+ubuntu 
+legione 
+papa roach 
+fome 
+fome 
+kama 
+kama 
+kilometer 
+israel 
+sex 
+soares 
+soares 
+jo soares 
+lula 
+lula 
+rape 
+me enamora 
+xp home 
+me enamora 
+me enamora 
+el polaco 
+vista 
+wanda nara 
+bananero 
+wanda nara 
+beast 
+dog 
+american french 
+gang 
+gang 
+italian 
+moulin 
+moulin 
+moulin 
+tabu 
+tabu 
+tabu 
+tabu 
+70ties porn 
+toon boom 
+toon boom 
+feist 
+alizee 
+beck 
+shaun of the dead 
+beck 
+nyce 
+nyce 
+nadia 
+no good 
+rehab 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+http://endeavor.isat.com.br/ 
+http://endeavor.isat.com.br/ 
+xp sp2 
+deus ex 
+wii 
+http://endeavor.isat.com.br/ 
+blender 
+i think i love my wife 
+exploited black teens 
+airwolf 
+a-team 
+simon and garfunkel 
+roling stones 
+rolling stones 
+chris rea 
+pink floid 
+pinc floid 
+pink floid 
+pink floyd 
+fantascienza 
+friday night lights 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+ubuntu 
+ubuntu 
+ubuntu 
+ubuntu 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+michelle duits 
+poze 
+pictures 
+pictures 
+pictures 
+sex 
+break.com 
+gay 
+shrek trzeci 
+huge nipple 
+huge 
+clit 
+squirt 
+spanish 
+metheny 
+taboo 
+orge 
+shrek 
+shrek_pl 
+teletubbies 
+sanct 
+don omar 
+don omar 
+don omar 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+preteen lolita sex 
+preteen sex 
+lolita sex 
+gay 
+nude boys 
+finereader 
+cumshot 
+facial 
+lions for lambs 
+young boys 
+buddha 
+boys 
+travaglio 
+nude boy png 
+nude boy 
+sex boys 
+dandini 
+marta 
+fica 
+porn 
+lions for lambs 
+winavi 
+winavi 
+tracy lords 
+gay hardcore 
+pink floyd 
+tracylords 
+culo 
+tracylords 
+tracylords 
+tracylords 
+tracylords 
+tracy lords 
+tracy lords 
+tracy lords 
+chubby 
+teen 
+lions for lambs 
+tracy lords 
+tracy lords 
+happy hardcore 
+mental theo 
+charly lownoise 
+cascada 
+search files, youtube and liveleak 
+mental theo 
+vista ultimate 
+cascada 
+danielle derek 
+derek 
+veronica zemanova 
+francine dee 
+sophia rossi 
+jessica cirioç 
+jessica cirio 
+detective conan 
+detective conan capitulo 1 
+detective conan capitulo 21 
+detective conan capitulo 50 
+detective conan la pelicula 
+bananero 
+animal 
+stay alive 
+the ring 
+vmware windows 
+vmware windows 
+vmware windows 
+tera patrik 
+tera patrik 
+tera patrik 
+nino rota 
+selen 
+selen 
+rocco 
+de/vision 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+de/vision 
+de/vision 
+nino rota 
+rocco siffredi 
+moana pozzi 
+spain 
+piratas caribe 
+shy love 
+spy 
+prison break season 3 
+resident evil 3 
+mu online 
+shrek 3 
+por ti 
+te quiero 
+si alguna vez 
+angel 
+naruto 
+austin lounge lizards 
+tastes like chicken 
+drunk 
+battle hymn of the republic 
+sognami 
+sognami 
+andy williams 
+andy williams 
+tiziano 
+il diario di bridjet jons 
+battle hymn of the republic 
+il diario 
+il diario 
+adobe 
+search files, youtube and liveleak 
+vintage cheese 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+adobe cs3 master 
+vintage cheese 
+search files, youtube and liveleak 
+avatar 
+dvdrip ita 2007 
+sex 
+kardashian 
+dvdrip ita 2007 
+dvdrip ita 2007 
+jij bent zo 
+boom 
+blunt 
+bach 
+beethoven 
+beethoven 
+mp3 
+techno 
+lunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+blunt 
+magix foto su cd e dvd 
+blunt 
+james blunt 
+james blunt 
+t david guetta ft. cozi 
+david guetta 
+film 
+david guetta 
+blunt 
+robyn 
+simpson il film 
+simpson il film italiano 
+simpson 
+xxx 
+m 
+a 
+b 
+techno 
+t 
+dvdrip ita 2007 
+i 
+a 
+a 
+magix 
+magix foto 
+porcupine tree 
+porcupine tree voodoo 
+porcupine tree festival 
+porcupine tree 
+umbilical brothers 
+a 
+eminem 
+e 
+eminem 
+search files, youtube and liveleak 
+encarta 2008 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+tarzan 
+tarzan ita 
+tarzan ta 
+tarzan ita 
+tarzan 
+foxy lady 
+musiq soulchild 
+xxx 
+alles is liefde 
+search files, youtube and liveleak 
+la terza madre 
+http://youtube.com/watch?v=ljmnao2pnrq 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dream theater 
+oblivion 
+oblivion 
+sims 2 
+aliens 
+spanish 
+pcsecuresystem 
+peratas del caribe 
+piratas del caribe 
+spanish 
+mina 
+reptilians 
+zodiac 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+66 
+v 12 
+nokia 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bush 
+hansma 
+hansma 
+ireland 
+dockinga 
+thie 
+little lupe 
+little lupe anal 
+anal 
+thai 
+anal slut 
+wagner 
+ebony slut 
+ebony 
+wagner 
+wagner 
+wagner 
+ebony porn 
+asian porn 
+pthc 
+sarah young 
+xxx 
+50 cent 
+fist fuck 
+paris 
+goa 
+xxx mpg 
+thai xxx 
+thai 
+mpg 
+avi 
+rocco mpg 
+anal mpg 
+filipina mpg 
+r@ygold mpg 
+bitch mpg 
+anal avi 
+anal avi 
+anal avi 
+august rush 
+august rush 
+invasion 
+bon jovi 
+nokia 
+smallville 
+family guy 
+nino d'angelo 
+il gladiatore 
+il gladiatore 
+il gladiatore 
+bangbus 
+search files, youtube and liveleak 
+slipknot 
+the office 
+regina spektre 
+seymour 
+seymour butts 
+cassius toop 
+cassius toop 
+nude 
+xxx 
+phil collins no jacket required 
+no jacket required 
+no jacket required 
+phil collins 
+no jacket required 
+phillip 
+phil 
+philipp 
+autodesk 
+bangkok 
+jap 
+indian 
+phil collins 
+thai 
+falling down duran 
+falling down duran 
+duran falling down 
+thai 
+thai 
+fantasy 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pearl 
+heros 
+heroes 
+heroes four months earlier 
+heroes four months 
+daywatch 
+alien vs predator 
+paris hilton 
+linux 
+shrek 
+black 
+shrek 
+annie body 
+"annie body" 
+colbert' 
+colbert 
+brianna banks 
+annie body 
+all pissed off 
+annie body 
+annie body 
+"anastasia x" 
+"mr. horsecock" 
+squirting 
+texas twins 
+"texas twins" 
+philippine 
+boom 
+pda 
+dvdrip ita 2007 
+dvdrip ita 2007 
+dvdrip ita 2007 
+pocket 
+stripping nude 
+dj zany 
+stripping nude 
+homw 
+divx ita 2007 
+divx ita 2007 
+ita 2007 
+rem 
+ps2 
+ps2 singstar 
+ps2 singstar 
+fixkes 
+ps2 is0 
+singstar 
+ps2 
+voyager 
+happy feet 
+acdcsee 9 
+acdcsee 9 
+acdcsee 9 
+acdcsee 9 
+acdcsee 
+acdcsee 
+ps2 singstar 
+ps2 
+ps2 karaoke 
+juegos singstar 
+signs 
+shrek 
+ps2 fifa 2008 
+5o cent 
+buschido 
+buschido 
+buschido 
+g unit 
+fast and furious 
+ita 2007 
+ita 2007 
+ita 2007 
+the doors 
+ita 2007 
+ita 2007 
+ita 2007 
+ita 2007 
+ita 
+mp3 
+italian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+italian 
+italian 
+winx, español.ç 
+winx, español. 
+winx 
+thalia 
+thalia 
+thalia 
+thalia 
+thalia 
+mango 
+mango 
+winx la pelicula 
+amedeo minghi 
+amedeo minghi 
+amedeo minghi 
+amedeo minghi 
+renato zero 
+renato zero 
+anime 
+smallville 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville ita 
+smallville 
+sciarada divx ita 
+sciarada 
+smallville ita 
+charade 
+smallville ita 
+smallville ita 
+ita 2007 
+italian 2007 
+dvdrip italian 2007 
+dvdrip italian 2007 
+dvdrip italian 2007 
+http://www.youtube.com/watch?v=jdc4gfx1mdu 
+afro samurai 
+afro samurai - ep1/1 
+the.invasion 
+avi dvdrip ita 
+avi dvdrip ita 2007 
+avi dvdrip ita 2007 
+avi divx ita 2007 
+avi divx ita 2007 
+erwin 
+avi divx ita 
+avi divx ita 
+avi 2007 ita 
+dvdrip avi 2007 ita 
+dvdrip avi 2007 ita 
+dvdrip avi 2007 ita 
+dvdrip avi 2007 ita 
+kill bill 2 
+dvdrip avi 2007 ita 
+painter 
+painter 
+heroes seconda serie 
+acdsee 
+acronis 
+la vie an rose 
+rose 
+edith piaf 
+spyware doctor 
+vie en rose 
+statistica 
+piaf 
+history 
+statistiek 
+dvd ita 
+csi 
+settlers 
+ceasar 
+caesar 
+settlers 
+caesar 
+rose 
+sposs 
+spss 
+spss 15 
+cartoon 
+child 
+csi las vegas 
+csi las vegas ita 
+csi las vegas ita 
+csi las vegas 
+winrar 
+search files, youtube and liveleak 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+csi hard evidence 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+nora johnes 
+nora jo 
+inimigos da hp 
+inimigos da hp 
+inimigos da hp 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+viejo camaradas 
+linkin park 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vivi fernandes 
+porno 
+de lama,s verboden te lachen 
+porno 
+de lama,s verboden te lachen 
+scheila carvalho 
+sexy 
+doors 
+scheila carvalho 
+moody bleus 
+moody bleus 
+you tube 
+carla peres 
+funk 
+sexo no salão 
+sexo no salão 
+brasileirinhas 
+search files, youtube and liveleak 
+alessio 
+pooh 
+pooh 
+pooh 
+pooh 
+pooh 
+pooh 
+linux 
+anime 
+afterlife 
+stardust 
+the golden compass 
+the dark is rising 
+space miami 
+david guetta 
+eye of the tiger 
+final countdown 
+eye tiger 
+* 
+beatles 
+beatles 
+beatles 
+miami nikki 
+paul simon 
+guano apes 
+beatles 
+guano apes 
+3 doors down 
+dean martin 
+crazy talk 
+realusion 
+tutte le donne della mia vita 
+i vicere' 
+i vicere' 
+i vicere' 
+i vicere' 
+i vicere' 
+i vicere 
+as i am 
+as i am 
+steve morelli 
+steve morelli 
+steve morelli 
+steve morelli 
+steve morelli 
+maria bellucci 
+selen 
+canzoni nel tempo 
+the new deal 
+the new deal 
+autocad car model 
+autocad car 
+datenbank 
+slave 
+slave 
+tribler 
+fachinformatiker 
+große 
+big 
+blonde 
+não é o fim 
+não é o fim 
+não é o fim novo som 
+novo som 
+green day 
+linkin park 
+britney spears 
+windows xp 
+pantera 
+opeth 
+nfs most wanted 
+nba live 2008 
+green day 
+novo som 
+nfs underground 
+novo som 
+novo som 
+lesbicas 
+lesbicas 
+little lupe 
+fratellis 
+lesbian 
+novo som 
+novo som 
+novo som 
+novo som 
+novo som 
+novo som 
+novo som 
+green day 
+green day 
+green day 
+brasil 
+my chemical romance 
+lesbian 
+novo som 
+juliana paes 
+across the universe 
+akon 
+smallville 
+house 
+aquecimento global 
+war at home 
+war at home 
+smallville 
+aquecimento global e o nosso futuro 
+pool 
+nfs most wanted 
+paris hilton 
+britney sears 
+akon 
+britney sears 
+britney spears 
+rhino 
+sexo 
+porno 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jessie jane 
+tracy lords 
+tracy lords 
+tracy lords 
+tracy lords 
+porn 
+ratatouille 
+slayer 
+justin slayer 
+die hard 
+300 
+ratatouille 
+die hard 
+big 
+black 
+mandingo 
+lex steele 
+steele 
+naruto 
+lupin 
+ligabue 
+film 
+anal 
+tomtom 
+tomtom ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+vasco 
+wealth 
+wealth 
+wealth 
+wealth 
+ass 
+video 
+wealth 
+wealth 
+wealth 
+wealth 
+wealth 
+crack 
+simpson 
+simpson 
+sex 
+gay 
+anal 
+monique 
+ita 
+gay 
+delerium 
+gay 
+song 
+warefare 
+warefare 
+avi 2007 ita 
+das wiegenlied vom tod 
+hot 
+dialogys 
+dialogys 
+gossip girl 
+mika 
+the valet 
+new device 
+new device 
+new device 
+device 
+divx ita 
+divx ita 2006 
+divx ita 2007 
+divx ita 2007 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+divx ita 2007 
+divx ita 2007 
+divx ita 
+2007 ita 
+2007 
+2007 
+2007 
+2007 
+crysis 
+crysis 
+folk music 
+an introduction to probability theory and its applications 
+an introduction to probability theory and its applications 
+an introduction to probability theory and its applications 
+an introduction to probability theory and its applications 
+shark 
+search files, youtube and liveleak 
+nero photoshow 
+ibiza 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sacred love 
+sting 
+ibiza 
+ibiza 
+ibiza 
+machete 
+machete 
+machete 
+the invisible 
+the invisible 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita 
+spiderman 3 ita avi 
+spiderman 3 ita avi 
+avi spiderman 3 ita 
+spiderman 3 
+la leggenda di beowulf 
+la leggenda di beowulf 
+hot fuzz 
+pthc 
+pthc 
+pthc 
+pthc 
+asian 
+asian 
+stargate 
+justice league 
+heroes season 1 
+firefly 
+justin timberlake 
+michael jackson 
+akon 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+l.i.e 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+video 
+hairspray 
+×Â\9eש×Â\94 ×¤×¨×¥ 
+ocean thirteen 
+pirati dei caraibi la maledizione del forziere fantasma 
+pirati dei caraibi ai confini del mondo 
+silverfall 
+hairspray 
+×Â\94×æ\87\81ר×Â\90×Â\9c×Â\99×Â\9d 
+heroes.s02e08 
+lolitas 
+xxx 
+xxx 
+xxx 
+the mist 
+cum in mouth 
+karaoke 
+karaoke vietnam 
+viet karaokes 
+demjen 
+kks 
+kks 
+kks 
+warum rappst du 
+smoky 
+smoky 
+genesis 
+bach 
+bach 
+mp4 
+mp4 
+mp4 
+greek 
+ferrari 
+alan jackson 
+skery mouvi 
+full album 
+full album 
+full album 
+john legend 
+john legend 
+john legend 
+john legend 
+john legend 
+fergie big girls don't cry\ 
+fergie big girls don't cry 
+fergie big girls don't cry 
+life house 
+doctor who 
+doctor who 
+doctor who 
+the fall 
+"the fall" 
+sean kingston 
+"the fall" 
+zwartboek 
+super 
+fergie 
+search files, youtube and liveleak 
+star wars 
+shark 
+search files, youtube and liveleak 
+cabo frio 
+cabo frio 2007 
+cabo frio, brazil, 2007 
+star wars 
+star gate 
+teens 
+perverse 
+american 
+witch doctor 
+harry potter 
+beo wulf 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+skeri mouvi 
+skeri mouvi 
+skery mouvi 
+disney 
+maya 
+chaves 
+chaves 
+chaves 
+naruto 
+scary movie 
+scary movie 
+scary movie 
+skery mouvi 
+skery mouvi 
+skery mouvi 
+skery mouvi 
+skery mouvi 
+skery mouvi 
+skery mouvi 
+skery mouvi 
+mister beean 
+mister beean 
+skery mouvi tradus in romaneste 
+skery mouvi in romaneste 
+skery mouvi tradus 
+skery mouvi cu traducere 
+skery mouvi cu traducere 
+skery mouvi 
+harry potter 
+harry potter czech 
+fenixuv rad 
+corrs 
+musa eroglu 
+vivid 
+hairspray 
+gangbang 
+ronaldo 
+galatasaray 
+prison 
+porno 
+revelation 
+doors 
+advert 
+terminator 
+hudge dick 
+hudge dick 
+privat 
+nero 
+hairy 
+nero 
+seventeen 
+unit 3 7 
+bob dylan 
+guthrie 
+joan baez 
+katyÅÂ\84 
+nohavica 
+de gregori 
+napoleon 
+whores of warcraft 
+allan 
+italian 
+spanish 
+jenna jameson 
+search files, youtube and liveleak 
+sexo 
+camera escondida 
+camera escondida 
+camera escondida 
+30 rock 
+anime 
+animal 
+warcraft 
+warcraft 
+warcraft 
+whorecraft 
+vivaldi 
+apple 
+apple 
+apple 
+apple 
+feet 
+foot fetish 
+animal 
+licking 
+animal 
+rim job 
+asshole 
+cum swap 
+fingers 
+fingering 
+wet pussy 
+search files, youtube and liveleak 
+wet 
+sicko 
+youtube 
+æ¥ï¿½®ï¿½¯ 
+tits 
+youtube 
+horse 
+zoo 
+dog 
+any dvd 
+virtual dj 
+sex 
+teens 
+teens 
+preeteens 
+lolita 
+vmware 
+win98.mdk 
+*.mdk 
+rihanna 
+sexy 
+search files, youtube and liveleak 
+games 
+muziek 
+muziek 
+sleeping with the enemy 
+babysitter 
+family guy 
+sopranos 
+software 
+porno 
+porno 
+rolling stones 
+rolling stones 
+rolling stones 
+rolling stones 
+bee gees 
+disco 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+irish 
+irishgeorge micheal 
+disco 
+jamiroquai 
+sting 
+das geheimnis der geisterinsel 
+operation: kingdom 
+operation:kingdom 
+operation:kingdom 
+operation kingdom 
+hack 
+avg 
+keygen 
+power2go 
+hacking 
+windows 
+donna summer 
+tune up 
+linux 
+cartoons 
+cartoons 
+cartoons 
+simply red 
+search files, youtube and liveleak 
+the cats 
+the cats 
+jan smit 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+hot stolen porn from some persons 
+myseq 
+a rush of blood to the head 
+a rush of blood to the head 
+a rush of blood to the head 
+a rush of blood to the head 
+dialogys 
+dialogys 
+dialogys 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+screwballs 
+manga 
+sex 
+teens 
+next door 
+crysis 
+animalfarm dogs 
+bio shock 
+animalsex dogs 
+animalsex dogs +horses 
+animalsex dogs +horses 
+animals 
+harry potter e l'ordine della fenice 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+neyo 
+ne-yo in my own words 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+because of you 
+chris brown 
+ne-yo 
+bobobo 
+bobobo ,en español 
+tarzoon 
+marco borsato 
+louis de funes 
+marco borsato 
+ne-yo 
+avelgem 
+bamako 
+pregnant wife 
+horse 
+couple 
+craig david 
+usher 
+pro evolution soccer 
+fifa 2008 
+gerard vermeersch 
+bambi 
+harlequin 
+harlequin 
+harlequin 
+beth kery 
+beth kery 
+beth kery 
+beth kery 
+beth kery 
+beth kery 
+beth kery 
+beth kery 
+beth kery 
+ebook 
+fantastic four rise of the silver surfer dutch 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+lost 
+lollita 
+web 
+web 
+robbie williams 
+search files, youtube and liveleak 
+sexo 
+sexy 
+incesto 
+tabooincesto 
+taboo 
+poweriso 
+taboo 
+winmx italiano 
+moggel 
+alaura 
+saves the day 
+thursday 
+daft punk - stronger 
+daft punk 
+daft punk 
+gorillaz 
+daft punk 
+musikvideos 
+seks grugs gehandicapt 
+american gangster 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+magic iso 
+liebrand 
+firefly 
+omd 
+iron chef 
+simon and art garfunkel 
+iron chef 
+iron chef 
+iron chef 
+iron chef 
+iron chef 
+city select 
+search files, youtube and liveleak 
+left behind 
+alicia keys 
+elisa 
+elisa 
+kingdom come 
+glorious appearing 
+way of the master 
+nederlands 
+dutch 
+search files, youtube and liveleak 
+xxx 
+yahel 
+yahel 
+sex 
+sex 
+trance 
+house 
+10yo 
+10yo 
+kids 
+cosma 
+sex 
+ש×Â\9c×Â\9e×Â\94 ×�צ×Â\99 
+haitam 
+ש×Â\9c×Â\9e×Â\94 ×�צ×Â\99 
+tiesto 
+yahel 
+yahel 
+trance 
+yahel 
+yahel 
+sex 
+sex 
+rare 
+search files, youtube and liveleak 
+search all files 
+heros 
+heros 
+audiobook 
+science fiction 
+fedora 
+superbad 
+visual studio 
+xx 
+resident evil 
+italo disco 
+italo disco 
+italo disco 
+italo disco 
+nirvana 
+tits 
+radiohead 
+umbrella 
+marie digby 
+joy division 
+kayne west 
+closer 
+radiohead 
+radiohead 
+kayne 
+linkin park 
+search files, youtube and liveleak 
+radiohead 
+jay-z 
+black eyed peas 
+p.j. harvery 
+p.j. harvey 
+kayne west 
+lucinda williams 
+rhodesian 
+justin timberlake 
+white stripes 
+the killers 
+when you were young 
+the killers 
+film 
+horror 
+teen 
+girls 
+girls 
+weezer 
+60s 
+70s 
+britney 
+britney 
+britney 
+britney official 
+the full codec 
+nero 
+magix 
+magix 
+modest mouse 
+csi 4 
+stargate atlantis 4 
+csi 8 
+csi 4 
+csi 4e08 
+gay wrestling 
+sean cody 
+bgeast 
+bg east 
+bg 
+can-am 
+wrestling 
+ita 
+ita 
+muziek 
+muziek 
+muziek 
+80s 
+50s 
+70s 
+codec 
+mepg4 
+mepg 4 
+mpeg4 
+mpeg 4 
+pinnacle 
+magix 
+search files, youtube and liveleak 
+britney spears 
+0208 
+s02e09 
+s02e09 
+ratatuille 
+l'era glaciale 
+l'era glaciale 
+l'era glaciale 
+l'era glaciale 
+l'era glaciale 
+l'era glaciale 
+bittorrent 
+dvx ita 
+dvx ita 
+dvx ita 
+dvx ita 
+dvx ita 
+dvx ita 
+dvx 
+dvx 
+dvx 
+dvx 
+"corbin fisher 
+"corbin fisher" 
+corbin fisher 
+gay 
+economics 
+veritas 
+veritas 
+need speed pro street 
+nuance 
+crysis 
+sawariya 
+codec 
+call of duty modern 
+e are the night 
+pinnacle 
+gatto 
+pesaro 
+sorella 
+porca 
+spiderman 3 
+eden mor 
+bozena 
+sesso 
+boobs 
+www.vuze.com/syndication/browse/azhot/all/movie/x/x/x/_/_/x/x/feed.xml 
+www.legaltorrents.com/rss.xml 
+www.legaltorrents.com/rss.xml 
+www.legaltorrents.com/ 
+hetgesprek 
+lesbian 
+www.vuze.com 
+www.vuze.com 
+bbw 
+blowjob 
+search files, youtube and liveleak 
+http://thepiratebay.org/tor/3780716/in_a_lonely_place_(1950) 
+http://thepiratebay.org/ 
+xxx 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+csi 8 
+gay porn 
+japonesas 
+japonesas 
+japonesas 
+japonesas 
+chinas 
+chinas 
+humillacion 
+humillacion 
+japanese 
+japanese humiliation 
+resident evil apocalipse 
+resident evil 
+verve 
+sting 
+sting 
+disgorge 
+minnie riperton 
+verve 
+peru 
+dennis bergkamp 
+doe maar 
+verve 
+csi 8 
+csi 8 
+csi 8 
+strawberry 
+chmivcal 
+chemical 
+chemical brothers 
+traci 
+traci lords 
+david gilmour 
+kate bush 
+kate bush 
+kate bush 
+kate bush 
+kate bush 
+fedora 
+fedora 64 
+final cut 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+fedora 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ashcroft 
+ashcroft richard 
+verve 
+aplications 
+aplications 
+kwai 
+bridge on the river quai 
+bridge on the river kwai 
+palin 
+zbrush 
+plugins 
+acdsee 
+massada 
+bryce 
+bryce 
+bryce 
+zbrush 
+arumbai+massada 
+3d 
+massada 
+caligari\ 
+caligari 
+amapi 
+youp 
+ebook 
+audiobook 
+scarlatti 
+a 
+wende 
+bach 
+nero 
+mastercam 
+nero keygen 
+peitudas 
+peitos 
+acdc 
+rock 
+rock 
+os intocaveis 
+os intocaveis 
+os intocaveis 
+kees van kooten] 
+kees van kooten 
+peitos 
+peitos 
+acdc 
+gold 
+various 
+ever 
+sexyback 
+search files, youtube and liveleak 
+sex 
+oma 
+foxy lady 
+best 
+justin timberlake 
+mabonna 
+mr 18 
+world big cock 
+big cock 
+ligabue 
+prison break 
+prison break 
+prison break hdtv 
+playground 
+playground 
+salieri 
+salieri 
+salieri 
+salieri 
+salieri 
+salieri 
+reggea 
+reggae 
+old 
+jimi blue 
+us5 
+prince & me 
+bush 
+bochherini 
+mozart 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+resident 
+webmail hack 
+avg 
+apocalyptica 
+epica 
+club 
+dave matthews 
+firewall 
+spyware 
+adolescent 
+apocalyptica 
+jula 
+jula 
+jula 
+justin timberlake 
+norton anti viris 
+norton antivirus 
+american gangster ger 
+deep purple 
+eye tiger 
+superman 
+flood 
+parchute 
+parachute 
+night skies 
+chemical 
+chemical brothers 
+chemical brothers we are the night 
+videos 
+i wish i had an angel 
+videos 
+torrent 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+masturbation 
+julia hayes 
+met-art 
+house 
+club 
+tchtonic 
+techtonic 
+dance 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+youtube 
+avond boudewijn de groot 
+guus meeuwis live 
+my name is earl 
+eros ramazzotti 
+fuoco nel fuoco 
+eros cher 
+eminem 
+hamlet 
+nero 
+vista 
+my name is earl season 2 
+my name is earl season 2 
+my name is earl season 2 
+my name is earl 
+search files, youtube and liveleak 
+paris 
+squirt 
+zen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sg1 
+santana 
+crash test dummies 
+weezer 
+flight of the chonchords 
+flight of the choncords 
+depeche mode 
+touble rasa 
+aikido 
+suyl 
+love 
+banned 
+nina simone 
+bruce 
+sixties 
+classic country 
+country 
+country 
+gay 
+resident 
+alex jones 
+ufo 
+ho'oponopono 
+family guy 
+futurama 
+source 
+markus 
+stream 
+stream 
+stream 
+rock 
+visual 
+tv 
+webtv 
+php 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+judy garland 
+gigi soundtrack 
+britney spears 
+judy garland 
+pink 
+pink-delirium 
+judy garland 
+oddworld 
+the slim shady lp 
+the slim shady lp 
+the slim shady lp 
+the slim shady lp 
+eminem 
+science 
+science 
+animal 
+dirty mary crazy larry 
+dirty mary crazy larry torrent 
+dirty mary crazy larry torrent 
+dirty mary crazy larry avi 
+dirty mary crazy larry avi 
+search all files 
+christmas tale 
+moby 
+meat beat manifesto 
+search files, youtube and liveleak 
+tomtom 
+tomtom go 
+fat boy slim 
+csi 8 
+jordan capri 
+jordan capri 
+anne rice 
+dvdrip italian 2007 
+anne rice 
+anne rice 
+dvdrip italian 2007 
+creep 
+ann rice 
+men in trees 
+titties 
+red 
+live at the playboy mansion 
+soul heaven 
+kenny dope & karizma 
+diana krall 
+kenny dope 
+bob sinclair 
+baantjes 
+baantjer 
+baantjer 
+baantjer 
+baant 
+baan 
+baan 
+baan 
+baantjer 
+baantjer 
+amateur 
+om shanti om 
+font twister 
+font twister 
+neil young 
+fonttwister 
+kenny dope 
+kenny dope & karizma 
+studio 10 
+pinnacle 
+der dünne mann 
+xx 
+xxx 
+xxx thai 
+wishbone ash 
+greatful dead 
+greatful dead 
+king kong 
+30 days of night 
+drawn together season1 
+drawn together season 1 
+drawn together season 
+drawn together 
+ich will 
+godzilla 
+broadcast 
+u2 
+u2 single 
+country 
+search files, youtube and liveleak 
+reggie bush 
+ricky willaims 
+downloads 
+adobe creative 
+adobe creative 
+adobe creative 
+adobe creative suite 
+godtube 
+goldfilmmaker 
+indesign 
+solidgoldfilmmaker 
+j 
+dinner youtube 
+dinner 
+key gen 
+u2 singles 
+casusa 
+cazuza 
+cazuza 
+cazusa 
+cazuza 
+the mission 
+led zeppelin 
+search files, youtube and liveleak 
+silent service game pc 
+submarine pc 
+silent hunter pcgame 
+silent hunter 
+streaming 
+streaming 
+apocalyptica 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+candice night 
+acrobat 8 
+search files, youtube and liveleak 
+v-max 
+v-max 08 
+v-max 2008 
+scorpions 
+f1 
+jesus christ superstar 
+borat movie 
+borat the movie 
+search files, youtube and liveleak 
+katie melua 
+katie melua 
+sanne 
+sanne 
+anal 
+earth and fire 
+bangbros 
+bangbros 
+anal 
+ruby is the one 
+uriah heep 
+taxi 
+bisex 
+amimals 
+house of the rising sun 
+sanne wallis 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il pacificatore 
+il pacificatore 
+il pacificatore 
+il pacificatore 
+surf 
+snowboard 
+surf 
+cerrone 
+guetta 
+sinclar 
+sinclair 
+paolini 
+il sergente della neve 
+la citta imperiale 
+la citta imperiale 
+carosello 
+carosello 
+alan parsons live 
+alan parsons 
+il capo dei capi 
+il capo dei capi 4 
+night skies 
+beowulf 
+compiz-fusion 
+* 
+scrubs 
+beowulf 
+feet 
+cum eater 
+hitman 
+fetish 
+borat the movie 
+borat the movie 
+borat the movie 
+borat the movie 
+borat the movie 
+dreamwotks 
+dreamworks 
+vista 
+rats 
+zucchero 
+zucchero 
+zucchero 
+vista x64 
+simlistisch verbond 
+beatbox 
+neale donald walsch 
+walsch 
+walsch 
+games 
+debate 
+debate rom 
+debate mit 
+debate 
+dylan 
+rhianna 
+jethro tull 
+jethro tull 
+green day 
+30 days of night 
+11th hour 
+11th hour 
+11th hour 
+11th hour 
+11th hour 
+11th hour 
+jab we 
+30 days of nightgreen day 
+green day 
+main agar 
+om shanti 
+om shanti 
+naslite 
+wa 
+dark crusader 
+real time with bill maher 
+dark crusade 
+how i met your mother 
+heroes 
+dexter 
+superbad 
+superbad 
+terminator 
+summer cumings 
+pthc 
+r@ygold 
+fight club 
+racquel darrian 
+frontline 
+pbs frontline 
+night gallery 
+newsworld 
+passionate eye 
+outer limits 
+pbs frontline 
+gta 
+windows vista 
+windows vista programs 
+driver detective key 
+driver detective 
+rough sex 
+shreck 
+shrek 
+prank 
+prank jap 
+rape 
+2007 comedy 
+gears of war 
+stargate atlantis s04 
+prank 
+3d marine fish 
+side winder 
+side winder 
+raindance 
+telepathy 
+ray keith 
+mc dett 
+back to the jungle 
+sidewinder raw vol 3 
+jungle 
+dj sy 
+drum and base 
+drum and base arena 
+hellgate 
+call of duty 4 
+metallica 
+achmed the dead terrrorist 
+achmed the dead terrrorist 
+call of duty 4 
+tiesto 
+brian adams 
+subsonica 
+hentai 
+teens 
+search files, youtube and liveleak 
+lavoura arcaica 
+lavoura arcaica 
+shrek3 
+halleluja jeff 
+xxx 
+il cane pompiere divx 
+il cane pompiere divx 
+il cane pompiere divx 
+gigi d'alessio 
+gigi d'alessio 
+celentno 
+celentano 
+mauro nardi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gianni morandi 
+gianni morandi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+curvead air 
+gianni morandi 
+curvead air 
+curvead air 
+genesis 
+gianni morandi 
+nardi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pink floid 
+teen 
+teen 
+sex 
+beatles 
+rolling stones 
+genesis 
+beatles 
+selen 
+eva 
+moana 
+xvid ita 
+300 hd 
+pulpfiction 
+vista x64 
+jenna jameson 
+eva 
+fuck 
+erotic 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+films 
+assassin 
+films 
+films 
+avanters 
+films nl 
+films nl 
+fotos 
+vista x64 
+software nl 
+software 
+gma 
+bleach 
+vista x64 
+bleach 4 
+bleach episode 4 
+bleach episodes 
+bleach 
+bleach4 
+anime 
+"duck dodgers" 
+"riders in the sky" 
+xxx japan 
+xxx japan 
+xxx japan 
+xxx japan 
+japan 
+fun with dick and jane 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+b eatles 
+search files, youtube and liveleak 
+pigeons 
+pigeons 
+pigeons 
+search files, youtube and liveleak 
+hadley 
+spandau 
+spandau 
+spandau 
+duran 
+rolling stones 
+barbie 
+doors 
+search files, youtube and liveleak 
+pogues 
+doors 
+tony hadley 
+ligabue 
+ligabue 
+rem 
+duran duran 
+spandau 
+springsteen 
+heroes s2e8 fr 
+heroes s2e8 fr 
+heroes s2e8 fr 
+tony hadley 
+heroes fr 
+ligabue 
+u2 
+arcadia 
+red carpet 
+amateurs 
+pavarotti 
+craig david 
+francesco renga 
+a ha 
+a-ha 
+promise land 
+gary kemp 
+hadley 
+celentano 
+ligabue 
+ligabue 
+burial: "near dark" 
+status quo 
+elvis presley 
+elvis presley 
+elvis presley 
+vervelen 
+ernie vervelen 
+tele tubbies 
+tubbies 
+winny the po 
+winny the 
+crysis 
+rockabilly 
+rockabilly 
+katie melua 
+katie melua 
+katie melua 
+prison break 08 
+prison break 08 
+katie melua call of the search 
+call of the search 
+cod4 
+vuurwerk 
+vuurwerk 
+vuurwerk 
+firework 
+firework 
+avril 
+jessica 
+jenna 
+julia 
+fuck 
+nude 
+kids fuck 
+children 
+children 
+kids 
+vervelen 
+foxy lady 
+300 
+orlowski 
+ice t 
+home invasion 
+shrek 3 
+orlowski 
+phil 
+windows xp 
+windows vista 
+discovery channel 
+beatles 
+litfiba 
+rolling stones 
+search files, youtube and liveleak 
+beatles 
+pink floid 
+pynk floyd 
+the exorsist 
+pogues 
+nomadi 
+the exorcist 
+mein neuer freund 
+helgate 
+123 
+biancaneve 
+helraiser 
+hellraiser 
+ita 
+evil death 
+evil dead 
+henger 
+brian adams 
+doors 
+toto 
+toto 
+articolo 31 
+styx 
+styx 
+styx 
+styx 
+articolo 31 
+articolo 31 
+articolo 31 
+elton jon 
+michael porter 
+queen 
+kansas 
+mandriva-linux-2007-spring-one-gnome-cdrom.i586.torrent 
+fica 
+peter druker 
+van kooten en de bie 
+spand 
+spand 
+rory gallagher 
+brian adams 
+brian adams 
+led zeppelin 
+u2 
+doors 
+cute goth chick painful anal 
+cute goth chick anal 
+led zeppelin 
+wrox 
+led zeppelin 
+moody blues 
+van der graf generator 
+hockey 
+ufo 
+procol harum 
+atomic rooster 
+rolling stones 
+game iso 
+deep purple 
+game iso 
+curvead air 
+barry ryan 
+steeleye span 
+blackmore's night 
+yes 
+king crimson 
+search files, youtube and liveleak 
+vanilla fudge 
+gentle giant 
+apocalyptica 
+credence clearvater revival 
+credence clearvater revival 
+credence clearvater revival 
+credence clearvater revival 
+credence clearvater revival 
+moana pozzi 
+gay 
+voorburg 
+heavy metal 
+search files, youtube and liveleak 
+hairsprai 
+hairspray 
+ho voglia di te 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+remove good4 
+remove good4 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+gay 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+voorburg 
+voorburg 
+metal gear solid 
+bdsm 
+voorburg 
+animal 
+queen 
+bullit 
+shocking blue 
+ufo 
+deep purple 
+taiji 
+taichi 
+tai chi 
+animal sex 
+michael porter 
+vba 
+film 
+nudist 
+girl fucking her dog 
+girl fucking her dog 
+fucking her dog 
+animals sex 
+scent of a woman 
+mardi grass 
+chinpo 
+phili 
+zelda 
+elektor 
+elektor 
+red hot 
+elektor 
+red hot 
+audioslave 
+ita 
+fight club 
+animalssex dog 
+animals sex dog 
+realplayer 
+dog porn 
+dog fuck sex 
+borat the movie 
+kayak 
+tinto brass 
+rammstein 
+orlowski 
+renato zero 
+renato zero 
+renato zero 
+laura pausini 
+enya 
+hitzone the best of 2007 
+hitzone the best of 2007 
+hitzone the best of 2007 
+hitzone 
+audio 
+foxy lady 
+casino royal 
+francesco totti 
+francesco totti 
+francesco totti 
+francesco totti 
+francesco totti 
+the invisible 
+simon and garfunkel 
+la leggenda di beowulf 
+matrimonio alle bahamas 
+search files, youtube and liveleak 
+xxx 
+abba 
+hollands 
+ita 
+nederlands 
+ita 
+porn 
+spongebob 
+spongebob 
+fred claus 
+2007 
+2007 
+2007 ita 
+2007 ita 
+2007 ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub-ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+naruto sub ita 
+pink floyd 
+italian 
+italian 
+italian 
+ita 
+album 
+laura pausini 
+laura pausini 
+laura pausini 
+let me think about it 
+lei 
+david amo 
+davida amo 
+david 
+amateur 
+david vendetta 
+thalia 
+search files, youtube and liveleak 
+thalia 
+thalia 
+thalia 
+raggae 
+raggae 
+search files, youtube and liveleak 
+fuck her dog 
+fuck her dog 
+animals sex 
+aria squuirts 
+weeds s01e10 
+woman warrior 
+survivor eye tiger 
+femal warrior 
+female warrior 
+eye tiger 
+fifa 08 
+fifa 08 wii 
+fifa 08 wii 
+amazon warrior 
+rape mpg 
+rape mpg 
+rape mpeg 
+mario e sonic wii 
+hentai 
+amazon warrioe 
+amazon warrior 
+rape mpeg 
+rape mpg 
+amazon 
+female warrior 
+slave nurses 
+"slave nurses" 
+bang bros 
+bang bros 
+monsters of the cock 
+mageweave 
+tsuyoi 
+chiisai 
+japan 
+small 
+dick 
+small dick 
+funny 
+skimpy 
+public 
+tiny 
+weenie 
+kuschelrock 
+animals sex 
+animals sex 
+autocad lt 
+autocad lt 
+lion cloth 
+lioncloth 
+elektor 
+bag on fire 
+eastpak 
+eastpak 
+eastpak 
+eastpak 
+eastpak 
+eastpak 
+rugtas 
+rugtzak 
+rugzak 
+eastpak 
+search files, youtube and liveleak 
+schooltas 
+miusic was my 
+jon miles miusic 
+jon miles miusic 
+underground 
+jon miles la sacrada familia 
+psx 
+anime 
+superbad axxo 
+kruistocht 
+state play 
+prank 
+harem 
+osx86 
+osx 
+tera patrick 
+jenna jameson 
+kardashian 
+caligula 
+kim kardashian sex 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amazon britta 
+amazons 
+amazons 
+paintshop 
+microsoft 2000 
+manhunters 
+manhunters jessica drake 
+jessica drake 
+boob bangers 
+banger bros 
+pc games 
+pc games 
+bangerbros 
+banger bros 
+fantasy 
+search files, youtube and liveleak 
+windows 
+raemonn 
+fantasy 
+ÑÄ\90½Ð¼Ðº 
+tnmk 
+ray 
+search files, youtube and liveleak 
+american gangster 
+south park s11 
+south park s11e08 
+bi sex 
+animal 
+cavalo 
+horse 
+scarface 
+gay 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke - shine on 
+george duke 
+sex 
+pedo 
+15 
+teens 
+28 days 
+hidden cam 
+white stripes 
+anal 
+rush hour 3 
+microsoft office 2003 
+four-hour 
+four-hour work week 
+live daftpunk 
+live daft punk 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+g25 
+banger bros 
+bangerbros 
+kardashian 
+pizza blow 
+himuro 
+velvet rose 
+velvet rose himuro 
+himuro 
+tesla 
+master of lighting 
+tesla master of lighting 
+musica 
+cd 
+documentary 
+die legende von beowulf 
+vista ita 
+test 
+you tube 
+all that 
+songs 
+songs 
+trace 
+trace 
+trace 
+white ladies 
+winrar password 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+1st base 
+corno 
+corninho 
+corninho 
+corninho 
+corninho 
+corninho 
+marido 
+casais 
+orgia 
+suruba 
+swinger 
+1941hollywood movies 
+old hollywood movies 
+travesti 
+shemale 
+bi sex 
+big 
+dog 
+gay 
+brasil 
+brazil 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+deep purple 
+jovem guarda 
+gay, boy 
+gay 
+bisceglie 
+puglia 
+search files, youtube and liveleak 
+aerofly 
+aerofly 
+andre hazes 
+aerofly pro 
+aerofly pro 
+hardcore 
+charly lownois 
+aerofly pro 
+realflight 
+aeroflight 
+aeroflight 
+aeroflight 
+aeroflight 
+aeroflight 
+aeroflight 
+rape 
+kanaal2 
+lost 
+garfield 
+18 
+18 
+underworld 
+jump 
+aeroflight 
+realflight 
+wii 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx 
+brazil 
+duran 
+http://inferno.demonoid.com:3402/announce 
+indesign cs2 
+indesign cs2 
+asterix 
+search files, youtube and liveleak 
+project runway 
+gay 
+bi sex 
+tranny 
+pussy 
+seventeen 
+masturbate 
+finger 
+search files, youtube and liveleak 
+she male 
+http://fr.youtube.com/watch?v=fysmli570k8 
+http://fr.youtube.com/watch?v=fysmli570k8 
+http://fr.youtube.com/watch?v=fysmli570k8 
+finger 
+http://fr.youtube.com/watch?v=9khzwsytnde 
+sandwich 
+squirt 
+squirt 
+finger 
+milf 
+search files, youtube and liveleak 
+pov 
+milf 
+porn 
+alicia keyes 
+alicia keyes 
+alicia keyes 
+no one 
+cant tell me nothing 
+cant tell me nothing 
+can tell me nothing 
+p.o.v. 
+seventeen 
+masturbate 
+asian 
+winx studio 
+ls 
+winx studio 
+giorni e nuvole 
+crossover 
+cedega 
+codega 
+cedega 
+mark medlock dieter bohlen 
+seal 
+crossover 
+beowulf 
+search all files 
+search all files 
+search all files 
+swinger 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+film 
+the cure 
+sex 
+room 
+visio 
+microsoft 2007 
+microsoft 2007 
+database 
+php 
+search files, youtube and liveleak 
+clayton 
+office for mac 
+office for mac 
+lions lambs 
+anthems 
+ethoria 
+beowulf 
+outlook 
+adult 
+enchanted 
+ita 
+fucking 
+pussy 
+women 
+cartoonsmart 
+porn 
+leben 
+ejac 
+jima 
+vmware 
+diskeeper 
+question mark 
+that's the question 
+tribler 
+fido 
+blunt 
+fido 
+zwartboek 
+les 
+vmware keygen 
+stream 
+hopw i met your mother 
+how i met your mother 
+david guetta 
+benny benassi 
+dj selection 
+search files, youtube and liveleak 
+ass 
+porno 
+j 
+h 
+captivity movie 
+captivity 2007 
+captivity 2007 dvd rip 
+doctor who 
+blake seven 
+blake 7 
+blakes 7 
+blakes 
+blakes 
+search files, youtube and liveleak 
+zero 7 
+home design 
+heroes 9 
+toulouse 
+toulouse 
+joni mitchell 
+arab 
+tunisian 
+sexy 
+anime 
+tech 
+geek 
+chuck 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+http://www.craveonline.com/videos/nsfw/00005963/gangster_sesame_street.html 
+robert frost 
+porno 
+reggiani 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+teens 
+graupner taxie 
+taxie 3 
+john legend 
+marco borsato 
+marco borsato 
+brianna blaze 
+brianna 
+beast fucker 
+animal sex 
+animal sex 
+fifa 2008 
+music 
+animal sex 
+teens for cash 
+music 
+rc airplane 
+real madrid 
+miguel angel munoz 
+miguel angel munoz 
+cubase 4 
+fabio dimi 
+fabiodimi 
+miguel torres 
+contaminato 
+picotto 
+heathcliff and marmaduke 
+picotto 
+half life 
+salieri 
+salieri 
+mario salieri 
+search files, youtube and liveleak 
+spice girls 
+divx ita 
+spiderman 3 italian 
+all 
+albums italian 
+albums 
+single albums 
+albums single 
+2007 
+the simpsons 
+beastiality 
+hymen 
+masturbate 
+sepultura 
+teibalistas 
+ja' sei namorar 
+najib amhali 
+harry potter 
+salieri 
+liberty marian 
+liberty marian 
+marian 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+amore in chat 
+madonna 
+spyware doctor 
+madonna 
+dutch 
+dutch 
+sit 
+sit 
+south park s11e06 
+kiss 
+kiss 
+rock 
+axe 
+http://www.torrent.to/torrent/upload/xxx/398/215398/889-www.torrent.to...lacey%5b1%5d.duvalle.-.pounded.in.the.office.by.a.big.white.cock.torrent 
+sabrina salerno 
+south park s11e07 
+south park s11e07 
+hysteria 
+mark thomas 
+mark thomas 
+mark thomas 
+mark thomas 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dalla pelle al cuore 
+venditti 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+microsoft media 
+video 
+gay 
+bi sex 
+davide sounds 
+il signore dei piselli 
+kuskczak 
+kuszczak 
+kuszczak 
+manchester rules 
+nod 
+rc 
+rc airplane 
+yes 
+search files, youtube and liveleak 
+harry potter 
+dirty 
+evil dead 
+evil death 
+evil daed 
+evil dead 
+santa es miralda 
+war of the worlds 
+frankie gose to holliwood 
+frankie gose to hollywood 
+franky gose to hollywood 
+frankie gose to hollywood 
+hollywood 
+search files, youtube and liveleak 
+frankie goes to hollywood 
+search files, youtube and liveleak 
+boinx 
+fantastic 
+non siamo soli 
+non siamo soli eros 
+eros ramazzotti 
+search files, youtube and liveleak 
+eros ramazzotti non siamo soli 
+eros ramazzotti non siamo soli 
+search files, youtube and liveleak 
+black eyed peas 
+avril 
+rihanna 
+universal business adapter 
+gilmore 
+rape 
+search files, youtube and liveleak 
+ramazzotti 
+resident evil 
+alien vs predoator 
+avp 
+laundia 
+raand 
+randi 
+bitch 
+kylie minogue sex 
+kylie minogue sex 
+lindsay lohan sex 
+avril 
+sexy 
+sex 
+justin timberlake 
+pamela 
+ita 
+italian ld 
+spanish 
+intocht elst 2007 
+epc 
+elster 
+goutier 
+goûtier 
+aarsman 
+ponyclub 
+sinterklaas 
+harry potter 3 
+harry potter 3 
+search files, youtube and liveleak 
+french nudisy colony 
+french nudist colony 
+kokeshi 
+lesbian 
+pedo 
+hitman 
+asian 
+osx86 
+jas 
+jas osx86 
+reign 
+missy 
+deathnote 
+onmitsu 
+search files, youtube and liveleak 
+avril lavigne 
+yui 
+funny 
+funny 
+zrinka 
+amazon zrinka 
+axxo# 
+amazon rose 
+irina 
+amazon julia 
+tall megan 
+super amanda 
+skimpy 
+search files, youtube and liveleak 
+westlife 
+nirvana 
+nirvana 
+. 
+ninfeta 
+teen 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+rap 
+dmx 
+jordan capri 
+disco 
+photoshop 
+donna summers 
+falcon studios 
+lime wire pro 
+dexter avi 
+dexter 2 
+acdc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+axxo 
+search all files 
+masha 
+masha 
+heroes 
+anal 
+over hill far away 
+over hill far away 
+over hill 
+over hill 
+over hill 
+over hill 
+over hill 
+over hill 
+over hill 
+search files, youtube and liveleak 
+cabaret 
+cabaret 
+cabaret 
+the cure 
+the cure 
+film 
+cabaret 
+cabaret 
+cabaret 
+cabaret 
+cabaret 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+bb king 
+[rus] 
+[rus] 
+[rus] 
+[rus] 
+russian 
+nasha russia 
+kamedi klab 
+comedy klab 
+bombalurina 
+teen 
+pulp common people 
+sim city 2000 rush hour 
+leonard cohen 
+simcity 2000 rush hour 
+simcity 2000 rush hour 
+sim city rush hour 
+simcity rush hour 
+simcity rush hour 
+sim city 4 
+animal sex 
+interpol 
+legalizacja xp 
+legalizacja xp 
+prodigy 
+black and white 2 
+mp3 
+albums full 
+albums full 
+sex 
+maarten 
+animal sex 
+maarten 
+nouvelle vague 
+nouvelle vague 
+maarten 
+realflight 
+taggart 
+realflight 
+fms 
+taggart 
+taggart 
+sex 
+padre de familia 
+muchachada nui 
+black and white 2 
+chuck deely 
+chuck deely 
+chuck deely 
+chuck deely 
+chuck deely 
+chuck deely 
+black and white 2 
+maarten 
+black and white 2 
+fucking 
+russian 
+maarten 
+design 
+discovery 
+discovery 
+maya 
+maya tutorial 
+maya 
+digital tutors 
+digital tutors maya 
+digital tutors maya 
+digital tutors maya 
+digital tutors maya 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+russian teen 
+devil may cry 
+shark 
+disney 
+elton john 
+search files, youtube and liveleak 
+xxx 
+japanese 
+japanese 
+drama 
+japanese 
+utada 
+shrek 
+shrek ita 
+search files, youtube and liveleak 
+shrek ita 
+smoking 
+anti-smoking 
+anti-smoking tar 
+smoking 
+no country for old men 
+bruce almighty 
+hana yori dango 
+resident evil 
+teen 
+erotic witch project 
+nihon 
+cat fight 
+ecchi 
+pee 
+stripper 
+pee 
+caribbean 
+brazilian 
+in plain view 
+ghost rider 
+call girl cat fight 
+ipanema 
+girls from ipanema 
+la professoressa di lingue 
+la professoressa di lingue 
+puttane 
+puttane 
+sex 
+lavezzi 
+l 
+g 
+lavezzi 
+napoli 
+napoli 
+sex 
+dragonball 
+brazilian models 
+brazilian girls 
+brazilian carnival 
+captivity 
+juiced 2 
+games 
+games ps2 
+black girls 
+games ps2 ita 
+games ps2 
+games ps2 
+games ps2 
+games ps2 
+napoli 
+lavezzi 
+calcio 
+cakcio 
+calcio 
+c 
+tutte le puntate del capo dei capi 
+il capo dei capi 
+il capo dei capi 
+capo dei capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+capi 
+dragonball gt ita 
+dragonball gt 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt ita 
+dragonball gt 
+dragonball gt 
+dragonball 
+dragonball 
+amy whinehouse 
+amy winehouse 
+president bush 
+bush 
+call duty 
+julia paes 
+juliana paes 
+search all files 
+search all files 
+zrinka 
+tall julia 
+tall zrinka 
+amazon 
+goddess 
+party 
+party 
+amazon 
+amazon kitty 
+mikayla miles 
+beowulf 
+the golden compass 
+axxo 
+queen adrena 
+simpsons 
+bunny's jap 
+dido 
+naruto 
+avira 
+naruto shippuuden 35 
+enya 
+fm wrestling part 4 
+avira premium security suite 
+bbc david attenborough 
+bbc 
+tall kat 
+nathalie 
+nathalie kelley 
+strapon 
+skimpy 
+velvet 
+overalls 
+arrestmovies 
+femdom police 
+"manon thomas" 
+mistress francesca 
+jogos mortais 
+arrest movies 
+arrestmovies 
+jogos mortais 4 
+jogos mortais 
+swinger 
+search files, youtube and liveleak 
+2007 
+teen 
+flash 
+flash slideshow 
+cabaret 
+barely 
+i simpson 
+divx 
+divx ita 
+dvd 
+compagnie pericolose 
+2007 
+divx ita 
+divx ita 
+divx ita 
+divx ita 
+divx ita 
+jlo 
+lopez 
+tornado 
+club mix 
+mix 
+atb 
+atb 
+search files, youtube and liveleak 
+beyond the enemy lines 
+be dazzled 
+pink floyd 
+eagles 
+tarzan and the lost city 
+tarzan and the lost city 
+como ama una mujer 
+como ama una mujer 
+lopez 
+hawkwind 
+latin 
+beowulf 
+search files, youtube and liveleak 
+dvd ita 
+denzel washington 
+denzel washington 
+kevin kosner 
+donne di mafia 
+dvd ita - donne di mafia 
+dvx ita - donne di mafia 
+dvx ita - donne di mafia 
+50 cent 
+50 cent 
+modest mouse 
+beirut 
+afte effects cs3 
+premiere cs3 
+premiere cs3 
+ninja 
+american ninja 
+50 cent curtis 
+julia paes 
+filme 
+search files, youtube and liveleak 
+xxx 
+pbs frontline 
+codecs for wmv 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+jogos mortais 
+teachers 
+young teachers 
+sex teachers 
+blink 
+blink182 
+substitute teacher 
+search files, youtube and liveleak 
+lost 
+treasures 
+vivid 
+osx 
+jennifer biel 
+jessica biel 
+elvis presley 
+elvis presley 
+primus 
+primus 
+dire straits 
+dire straits 
+londoncafe 
+friso 
+luca jurgen 
+elvis presley 
+elvis presley 
+chuck 
+ratatouille 
+rails and ties 
+flight of the living dead outbreak on a plane 
+vampire 
+zombie 
+horror 
+cannibal 
+witch 
+search files, youtube and liveleak 
+dragon 
+gtk 
+reverse 
+exploit 
+windows 
+shellcode 
+virus 
+ghost rider 
+ghost rider ita 
+ita 
+duty 
+ita 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ita 
+ita 
+ita resident 
+ita resident 
+italian 
+ita resident 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+grey's anatomy 
+men in trees 
+men in trees 
+brothers and sisters 
+gillore girls 
+gillore girls 
+gilmore girls 
+scheepsjongens 
+bontekoe 
+bontekoe 
+bontekoe 
+scheepsjongens van bontekoe 
+scheepsjongens van bontekoe 
+playstation 
+pro street 
+divx ita 
+yamyam15 
+xvid ita 
+zelda 
+adobe 
+adobe mac 
+u2 
+anal 
+first anal 
+american gangsters 
+pro rugby 
+final cut 
+you tube 
+google video 
+search files, youtube and liveleak 
+oblivion 
+search files, youtube and liveleak 
+vista 
+oblivion 
+oblivion 
+divx ita 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+madonna 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+oblivion 
+canzoni bambini 
+oblivion 
+oblivion 
+oblivion sn 
+oblivion sn 
+tanto va la gatta al lardo 
+adobe cs premiun mac 
+oblivion sn 
+adobe mac 
+sn 
+photoshop cs3 mac 
+adobe cs3 mac 
+sn 
+sn 
+sn key 
+key sn 
+renato zero 
+keygen+key+sn 
+keygen+key+sn 
+keygen+key+sn 
+keygen+key+sn 
+walt disney 
+keygen+key+sn 
+keygen key sn 
+keygen key sn 
+keygen key sn 
+search files, youtube and liveleak 
+afee 
+cardigans 
+nirvana 
+cardigans 
+toast titanium 
+big fish 
+hidden 
+sanfransisko 
+sanfransisco 
+arie en bastiaan 
+samson en fred 
+search files, youtube and liveleak 
+amature 
+janossi 
+search files, youtube and liveleak 
+amature 
+nikelback 
+nickelback 
+nickelback 
+nickelback rockstar 
+nickelback 
+firefly 
+alicia kays bono 
+alicia kaeys bono 
+alicia keys bono 
+uk garage 
+ms dynamite 
+the hives 
+the smiths 
+black rebel 
+cardigans 
+janossi 
+janossi 
+peter licht 
+rio reisser 
+rio reisser 
+rio reisser 
+cardigans 
+peter 
+a 
+b 
+c 
+d 
+e 
+f 
+g 
+h 
+i 
+j 
+k 
+l 
+l 
+m 
+n 
+o 
+ü 
+p 
+q 
+r 
+s 
+t 
+u 
+para pente 
+high school musical 
+fredro starr 
+sp3 
+xp sp3 
+amature 
+teen 
+teen 
+teen 
+nice 
+almodovar 
+gay 
+stagger 
+stagger 
+boy 
+boy gay 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+goldfrapp 
+search files, youtube and liveleak 
+hellboy 
+search files, youtube and liveleak 
+hellboy 
+gambero 
+raped 
+wii 
+ernst en bobbie 
+ernst en bobbie 
+ernst en bobbie 
+emergency 4 
+heroes 
+i don't wanna be a murderer 
+50-50 
+50-50 
+50-50 
+search files, youtube and liveleak 
+scat sex 
+scat 
+50-50 
+50-50 
+veggie tales 
+bible 
+gina wild 
+bioshock 
+illuminati 
+audio books 
+audiobooks 
+audiobooks 
+appleseed 
+appleseed 2004 
+appleseed 2004 
+appleseed 2004 
+house of flying daggers 
+hero 
+hero jet li 
+endgame 
+italian 
+divx 
+anonymous 
+anonym 
+italian 
+honey bee king2 
+anonymous 
+anonymous christ 
+trex 450 
+divx italian 
+anonymous christ 
+divx ita 
+anonymous christ 
+anonymous christ 
+anonymous christ 
+anonymous christ 
+ita 
+jesus christ 
+scrubs season 7 
+grub girl 
+sex 
+pro street 
+portal 
+postal 
+scrubs season 7 episode 1 
+scrubs season 7 episode 1 
+naruto 
+brian setzer 
+ttc 
+ttc 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ttc 
+ruby 
+ruby 
+matrimonio alle bahamas 
+jonn holmes 
+jon holmes 
+jon holmes 
+michelle 
+sesso 
+sex 
+ebay song 
+cd70 
+bakoma 
+bakoma 
+bakoma 
+bakoma 
+bakoma 
+bakoma 
+goldfrapp 
+crov 
+drawn together 314 
+drawn together s3e14 
+naruto 
+hilton 
+anubis 
+vista 
+anubis 
+jump top 100 
+teen 
+search files, youtube and liveleak 
+loli 
+all 
+daddy 
+sister 
+sist 
+cous 
+cousin 
+coussin 
+ali 
+german 
+all 
+cartoon 
+3d 
+all 
+report 
+report 
+report 
+first 
+girl 
+allµ 
+all 
+media player nl 
+media player 
+bony m 
+mcafee 
+child in time 
+blackout 29 
+blackout 
+these tunes 
+maxizems 
+maxiems 
+pier 
+maxiems 
+dance tunes 
+ice age 
+ice age 
+ice age 
+ice age 
+ice age 2 
+ice age 2 
+ice age 2 
+search files, youtube and liveleak 
+bigtit 
+bigtits 
+e-boox kung-fu 
+e-boox kung-fu 
+e-books kung-fu 
+e-book kung-fu 
+earth and fire 
+e-book kung-fu 
+e-book kung-fu 
+kung-fu e-book 
+kung-fu e-book 
+gangbang 
+kung-fu e-book 
+kung-fu e-book 
+kung-fu e-book 
+kung-fu e-book 
+kung-fu e-book 
+e-book 
+e-book 
+e-book 
+maxiems 
+maxiems 
+e-book 
+maxiems 
+e-book 
+e-book 
+e-book 
+usenext 
+big cock 
+intervention 
+xp-sp3 
+call of duty 
+xp-sp3 
+xp-sp3 
+windows xp-sp3 
+windows xp-sp3 
+windows xp-sp3 
+windows xp-sp3 
+windows xp-sp3 
+beowulf 
+school tycoon 
+remo williams 
+remo williams 
+remo wiliams 
+remo 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sn 
+search files, youtube and liveleak 
+christmas 
+sn 
+s-n 
+s/n 
+s/n 
+vivaldi 
+s/n 
+s/n 
+sample 
+prison break season 3 mpg 
+sample 
+sample 
+sample 
+sample 
+sample 
+prison break season 3 mpg 
+prison break season 3 mpg 
+prison break 
+return to castle wolfenstein 
+xxx 
+return to castle wolfenstein 
+return to castle wolfenstein 
+return to castle wolfenstein 
+tunisia 
+saw 4 
+led zeppelin 
+south park 10 
+south park 
+djumbo 
+djumbo 
+djumbo 
+djumbo 
+djumbo 
+28 
+28 kinderen 
+kinderen voor kinderen 
+ministry of sound 2008 
+lancut 
+anouk 
+sunshine 
+clubland 12 
+lover 
+erotiek 
+kamasutra 
+cd70 
+dubbel d 
+kamasutra fuck fest 2 
+neuken 
+vingeren 
+wet 
+klit 
+dog 
+the nits 
+dance 
+dj jean 
+dj- jean 
+dj 
+crysis 
+xp sp2 
+deus ex 
+dopeman 
+rhinoceros sex 
+human sex 
+amateur sex 
+aikido 
+amateur porno 
+faces of death japanese torture 
+faces of death chinese electro torture 
+faces of death japanese electro torture 
+japanese electro torture 
+chinese electro torture 
+electro torture 
+lolita 
+jan smit 
+boondocks 6 
+worms 
+worms 
+spiderman3 
+worms portugues 
+saturno contro 
+saturno contro dvd 
+rem 
+enema 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+spiderman3 
+spiderman3 
+nederlands 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+** 
+stage 6 
+carmon electra 
+carmon electra 
+carmon electra 
+crysis 
+join hands 
+michael clayton 
+blackhawk down 
+black hawk down 
+cs3 kaygen 
+mac antivirus 
+antivirus mac 
+antivirus 
+sophos mac 
+gay 
+jeff dunham 
+nina conti 
+ventriloquists 
+itunes 
+jennefer lopez 
+bree olson 
+thugi thugi 
+bleach music video 
+manhunt 
+bbc survivors manhunt 
+bernadette 
+equations 
+equations 
+linear equations 
+linear equations 
+linear equations 
+antivirus 
+strip 
+phtoto shop 
+photo shop 
+office 07 
+mother 
+inzest 
+incest 
+mx3 
+mrs 
+hot mom 
+mutter 
+granny 
+. 
+. 
+. 
+nl subs 
+srt 
+nl subs 
+moeder 
+zoon 
+aunt 
+granny 
+grandmother 
+young 
+lesson 
+mariach 
+mariatchi 
+one piece episde 87 
+mariachi 
+mariachi 
+mariachi 
+one piece episde 87 
+one piece episde 87 
+one piece episde 87 
+mexico 
+latin 
+county 
+county 
+county 
+mariachi 
+mariachi 
+mr bean 
+movies 
+audio 
+top 
+mariachi 
+mom 
+country 
+nfs pro street maps 
+nfs pro street car 
+nfs pro street car tuner 
+spss 
+fireplace 
+openhaard 
+manon thomas 
+manon thomas 
+need for speed pro street 
+eminem 
+delifianneio parthenagogeio 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+il capo dei capi 
+sburro fisso 
+bamba 
+search files, youtube and liveleak 
+nomadi 
+lost passage 
+photoshop 
+roxio 
+flashcards 
+benders 
+kanji flashcards 
+jenni lee 
+jenna haze 
+roxio 
+harry potter 
+elton john 
+harry potter dutch 
+elton john 
+elton john duets 
+elton john duets 
+elton john duets 
+duets 
+dutes elton john 
+dutes elton john 
+elton john 
+harry potter dutch 
+88 minutes 
+city navigator 
+psyclon nine 
+spears 
+spears 
+spears 
+hentai 
+japan 
+lolita 
+hussyfan 
+f@ygold 
+r@ygold 
+*.mpg 
+pthc 
+ama10 
+c2joy 
+korean 
+*.* 
+video editing 
+a prova di morte 
+a prova di morte 
+nudes a poppin 
+hitman 
+vangelis 
+thai 
+chinese 
+bangkok 
+threesome 
+outdoor 
+fkk 
+nude 
+rape 
+hooker 
+russian 
+katie meluha 
+seventeen 
+beowulf 
+jeff dunham 
+search files, youtube and liveleak 
+elisabeth 
+13 
+12 
+14 
+15 
+14 
+16 
+deutsch 
+pre teen 
+tesseract 
+*.* 
+redhead 
+culcha candela 
+culcha candela 
+monty phyton 
+trigenics 
+bukkake 
+kraaykamp 
+kitaro 
+gung ho 
+stomu yamasta 
+sinatra tahamata 
+pipo 
+young dumb 
+young dumb cum 
+young cum 
+ratatouille 
+rube goldberg billiards! 
+peru 
+zodiac 
+kayak 
+ekseption 
+fredro starr 
+asian 
+trace 
+sister7 
+patrice pike 
+neil young 
+true colors 
+fredro starr 
+hertzfeldt 
+bitter 
+bitter films 
+everything will be ok 
+topstyle 
+darjeeling 
+il capo dei capi 
+f.e.a.r 
+win xp 
+opensuse 10.3 
+doom 3 
+fear 
+simpson 
+weather 
+xxx 
+xxx 
+xxx 
+call of duty 4 
+xxx 
+search files, youtube and liveleak 
+maps 
+campagin cartog 
+campagin cartographer 
+campagin 
+campaign 
+beowulf 
+dundjinni 
+game 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+audio 
+audio 
+ghost reacon 
+ghost reacon 
+ghost reacon 
+ghost reacon 
+search files, youtube and liveleak 
+preteen 
+arina 
+loli 
+loli 
+lolita 
+arina 
+ls magazine 
+ls land 
+r@ygold 
+pthc 
+ptsc 
+preteen 
+dvdr 
+dvdr pal 
+weihnacht 
+loli 
+pthc 
+arina 
+13yo 
+12yo 
+12yo 
+11yo 
+14yo 
+10yo 
+need for speed 
+le ragazze del coyote ugly 
+movies 
+xxx 
+dragon ball 
+die hard 4 - vivere o morire 
+porno 
+search files, youtube and liveleak 
+films 
+sex 
+nero 
+djavan 
+search files, youtube and liveleak 
+djavan - delirio dos mortais 
+phil collins 
+phil collins - i cannot believe is true 
+phil collins 
+mariah carey 
+fabio junior 
+lenine 
+pedro mariano 
+filmati 
+filmati 
+phil collins 
+djavan 
+phil collins 
+dragonball 
+naruto 
+film 
+distraction 
+mr been 
+search files, youtube and liveleak 
+1900 
+brian setzer 
+distraction 
+lightroom 
+ron paul 
+weeds 
+mr been 
+premiere pro 
+red 
+tribes: vengeance 
+tribes: vengeance 
+tribes: vengeance 
+tribes: vengeance 
+search files, youtube and liveleak 
+1701 a.d. 
+company of heroes 
+crysis 
+halo 2 
+gear of war 
+halo 2 
+jenna jameson 
+britney spears gimme more 
+jenna jameson 
+smash my pc 
+yahoo 
+yahoo 
+higurashi 
+higurashi 
+higurashi kai 
+higurashi kai 01 
+schooldays 
\9c°ç\90�¸ 
\9c°ç\90�¸ã\80\8001 
+æ�äºï¿½Â\88 
+ãŹã³ã\83\80ã  
+ï½Ä\8f½Ã¯½Â\86 
+be yourself 
+ear training 
+python 
+dolly buster 
+lolo ferrari 
+busen 
+quickbooks 
+call of duty 4 
+microsoft games 
+microsoft games 
+games 
+action games 
+action games 
+action games 
+photoshop 
+adobe flash 
+ana 
+osx 
+mac os 
+bionic 
+bionic woman 
+search files, youtube and liveleak 
+acdsee 
+acdsee photo editor 
+bb king 
+"shadows of war" 
+shadows of war 
+motionbuilder 
+motionbuilder 
+tnv 
+chloroform 
+chloroform 
+chloroform 
+john foggerty 
+tom tom 
+tomtom 
+tomtom win 
+erika eleniak 
+tomtom laptop 
+tomtom 
+eleniak 
+my guide 
+my guide 
+my guide 
+myguide 
+myguide 
+rigas sargi 
+rigas sargi 
+map italia 
+triiphlon 
+triiphlon 
+triphlon 
+tribler 
+tribler ita 
+hogtied 
+hogtied 
+tribler 
+hogtied 
+first time 
+sporty girs 
+sporty girls 
+porn 
+sport girls 
+gym girls 
+shemale 
+shemale 
+search files, youtube and liveleak 
+resident eveil 
+urusei 
+ninja 
+ninja scroll 
+caligula 
+bogart 
+humphry bogart 
+tokyo joe 
+bogart 
+rossella 
+james bond 
+godzilla 
+monkey island 
+avatar 
+saving grace 
+immediate music 
+dead or alive 
+ars arcana 
+takeshii 
+dein ex,mein albtraum,mvcd 
+dein ex mein albtraum,mvcd 
+solange du hier bist 
+solange du hier bist,mvcd 
+force majeure 
+american gangster 
+humphry bogart 
+razor 
+u2 
+rigas sargi 
+hitmen 
+il collezionista 
+perdere l'amore 
+perdere l'amore di massimo ranieri 
+shrek 3 
+xxx 
+search files, youtube and liveleak 
+films 
+search files, youtube and liveleak 
+die siedler 
+eros 
+leopard 
+djumbo 
+porno 
+little brunette fuck 
+interraccial porno 
+porno 
+brunette fuck 
+massive attack 
+hatebreed 
+kim 
+dutch 
+nintendo ds 
+the doors 
+nintendo ds 
+nintendo ds roms 
+nintendo ds roms e 
+nintendo ds roms (e) 
+blues brothers 
+dvd blues brothers 
+porno paris hilton 
+celentano 
+adriano celentano 
+adriano celentano 
+jessica may 
+ligabue 
+jessika may porno 
+ligabue 
+porno 
+al rangone 
+private spice 
+titty bianchi 
+titti bianchi 
+psilvia saint 
+silvia saint 
+silvia saint porno 
+titti bianchi 
+vasco rossi 
+vasco rossi 
+vasco rossi 
+company of heroes 
+dragon ball af 
+rigas sargi 
+vdeo 
+hellgate london 
+tom tom 
+tom tom mappe italia 
+tom tom maps 
+tom tom maps italy 
+tom tom maps 
+harry potter 
+harry potter 
+il gladiatore 
+manuale d'amore 2 
+manuale d'amore 2 
+hellgate london 
+empire earth 3 
+the witcher 
+supreme commander 
+supreme commander 
+supreme commander 
+silverfall 
+heroes s02 avd 
+heroes s02 ps3 
+unreal tournament 
+zapper 
+ulead 
+online tv 
+ulead studio 
+machtlos 
+machtlos 
+machtlos 
+machtlos 
+veen oosterhuis 
+oosterhuis 
+von löwen und lämmern 
+beowulf 
+beowulf ger 
+la leon 
+la leon 
+la leon 
+la leon 
+la leon 
+machtlos 
+machtlos 
+across the universe 
+across the universe ger 
+across the universe 
+camel 
+dropkick murphy 
+kelly bundy 
+lost 
+lost mpg 
+lost 
+privite spice 
+gouden kooi 
+gouden kooi stream 
+third man 
+tean steam 
+teen steam 
+teen steam 
+alyssa milano 
+alyssa milano 
+kournikova 
+kournikova 
+kournikova 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+1804 
+office 
+1408 
+stevie wonder 
+abba 
+tilde 
+tildes birojs 
+tildes birojs 
+tildes birojs 
+tildes birojs 
+tildes birojs 
+gladiator 
+saw 4 portugues 
+gladiator 
+gladiator 
+kavel57 
+kavel57 
+kavel57 
+kavel57 
+jodi mitchel 
+jodi mitchell 
+jodi mitchell 
+christy moore 
+christy moore 
+christy moore 
+xuxa 
+tori amos 
+abba 
+tori amos 
+tori amos 
+hbo 
+tori amos 
+dfhdghsgfjfg 
+tori amos rainbow 
+kont 
+keller 
+greg lake 
+sex 
+tori amos rainbow 
+sex 
+sex 
+sex 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porn 
+search all files 
+search all files 
+tori amos 
+counting crows 
+search files, youtube and liveleak 
+kaspersky 
+eset smart security 
+man at work 
+fibra 
+mega mindy 
+panda 
+50 cent 
+alarm zone 
+michael bubble 
+dora the explorer+diego 
+dora the explorer 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pilates 
+sahuleka 
+massada 
+lekcja 3 
+jon miles 
+japan 
+crowded house 
+seal 
+japanese 
+mary winsor 
+bellydance 
+danza del ventre 
+belly 
+belly dance 
+lessons 
+ventre 
+belly 
+pilates 
+pianoman 
+pearl jam 
+billy joel 
+john lennon 
+barletta 
+barletta 
+the orange box 
+portal 
+elton john 
+halloween 
+xbox 360 
+van helsing 
+need for speed pro street xbox 360 
+halloween 
+kontor 
+fetish 
+femdom 
+femdom 
+mistress 
+kavel57 
+beatles 
+child in time 
+kavel57 
+kavel57 
+matrix 
+universum 
+hush 
+deep purple 
+strapon 
+search files, youtube and liveleak 
+exception 
+.mpg animal sex 
+.mpg sex 
+bubble 
+exseption 
+exeption 
+the fifth of beethoven 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+piss 
+exception 
+u2 
+search files, youtube and liveleak 
+hazes 
+sony ericsson video converter 1.0 
+sony ericsson video converter 1.0 
+ccredo 
+search files, youtube and liveleak 
+xander b 
+xander de 
+oblivion 
+english dictionary 
+dictionary 
+house 
+no cd key oblivion 
+black sabbat 
+no cd key oblivion 
+cd key oblivion 
+cd key oblivion 
+oblivion 
+english-latvian 
+latvian 
+latvian-english 
+latvian-english 
+femdom 
+femdom 
+femdom 
+femdom 
+femdom 
+alice in chains 
+cartoon 
+alice in chains 
+beatles 
+alice in chains 
+rage against the maschine 
+rage against the machine 
+rage against the machine 
+dropkick murphy 
+dropkick murphys 
+femdom 
+rowan heze 
+rowan heze 
+rowan heze 
+rowan heze 
+latvian 
+u2 
+u2 
+u2 
+u2 
+windows vista 
+tildes birojs 
+salieri 
+u2 
+flogging molly 
+flogging molly 
+u2 
+u2 
+blof 
+blof 
+blof 
+porno 
+dictionary 
+dictionary 
+dictionary 
+firpo 
+tango 
+free english 
+english 
+eurotalk 
+eurotalk 
+rigas sargi 
+oxford 
+cnn 
+rigas sargi 
+rigas sargi 
+rigas sargi 
+search files, youtube and liveleak 
+matrix 
+overture 
+hitman 
+big 
+mewtwo 
+anal 
+computer cz 
+friends bloopers 
+after effect 
+photo shop for mac 
+friends bloopers 
+friends bloopers 
+friends bloopers 
+space 1999 
+photo shop plugin 
+pokemon the movie 
+queen 
+pokemon 
+i'll be there for you 
+mistress 
+mistress 
+pokemon 
+pokemon 
+pokemon 
+pokemon 
+pokemon 
+mewtwo 
+queen 
+friends 
+search files, youtube and liveleak 
+friends bloopers 
+friends serie 
+search files, youtube and liveleak 
+mario salieri 
+europe countdown 
+teresa orlowski 
+la terza madre 
+cumparsita 
+xzibit 
+ken lorela 
+matrix 
+la terza madre 
+300 
+ghost reacon 
+ghost reacon 
+garmin 
+osvaldo pugliese 
+la cumparsita 
+all 
+amalia rodriguez 
+london 
+englsh-latvian 
+english-latvin 
+english-latvin 
+english-latvin 
+english-latvin 
+search files, youtube and liveleak 
+english 
+cnn 
+antivirus 
+pervers 
+microsoft word 
+gladoator 
+gladiator 
+matrix 
+abba 
+abba 
+abba 
+vivaldi 
+vivaldi 
+mozart 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+manon thomas 
+mr been 
+spears 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+dvd 
+dvd films 
+dvd 
+call of duty 
+search files, youtube and liveleak 
+la terza madre 
+search files, youtube and liveleak 
+delphi 
+risk 
+la terza madre 
+arm7 
+arm7 
+delphi 
+il nascondiglio 
+acdc 
+gufi 
+il nascondiglio 
+il nascondiglio 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+pupi avati 
+pupi avati 
+pupi avati 
+pupi avati 
+anal 
+delphi 
+delphi 7 
+delphi 7 update 
+d programming 
+vista 
+upskirt 
+bisexsual 
+secret 
+crowded house 
+spirerman3 
+search files, youtube and liveleak 
+battlestar 
+battlestar 
+gladiator 
+young asians lesbian 
+young asians lesbian 
+asians lesbian 
+asians lesbian 
+gladiator 
+search files, youtube and liveleak 
+anima porn 
+anima 
+cold cases 
+hentia 
+cold case 
+cartoon 
+gladiator 
+sex 
+saw2 
+porno 
+cartoon 
+xxx 
+porno 
+xxx 
+xxx 
+xxx 
+xxx 
+bikini 
+bikini 
+search files, youtube and liveleak 
+2pac 
+pthc 
+chinese 
+chinese 
+star dust 
+lost 
+syx 
+syx action sports 
+resident evil 
+law and order 
+prokuplje 
+axxo 
+planet terror pal 
+planet terror pal 
+planet terror pal 
+planet terror 
+death proof 
+beck 
+youngest 
+teens 
+teens 
+teens 
+teens 
+teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+young teens 
+eddie murphy 
+rigas sargi 
+rigas sargi 
+rigas sargi 
+young teens 
+young teens 
+teens 
+search files, youtube and liveleak 
+shemale 
+shemale 
+smart dating 
+call of duty 4 ita 
+ass 
+pissing 
+uitzending gemist 
+sexparty 
+pee 
+amy whinehouse 
+amy winehouse 
+moana pozzi 
+luca damiano 
+photomagico 
+dieskau 
+schuberts lieder 
+search files, youtube and liveleak 
+stephen king 
+the stand 
+no country for old men 
+linkin park 
+cubase 
+search files, youtube and liveleak 
+keng leiden 
+kunst en genoegen leiden 
+kunst en genoegen leiden 
+ilse delange 
+microsoft word 
+string 
+ilse delange 
+wii rockband 
+spanked 
+spanking 
+sybian 
+sybian 
+sex 
+sex 
+sex 
+sex 
+sex 
+stephen king 
+tv 
+ratatui 
+majewski 
+rozmowy w tloku 
+eva 
+rozmowy w tÅÂ\82oku 
+bryce 
+cnn 
+lock stock barrels 
+rear window 
+vn helsing 
+van helsing 
+across the universe 
+across the universe 
+ratatouille 
+love actually 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+high scholl musical 2 
+high scholl musical 2 
+high scholl musical 2 
+high scholl musical 2 
+juiced 2 ps2 
+juiced 2 ps2 
+juiced 2 ps2 
+futurama bender 
+nero 8 
+zecchino 
+scat 
+strap on 
+exploited 
+exploited 
+poques 
+the dubliners 
+the dubliners 
+strap on 
+room 1408 
+the dubliners 
+flogging molly 
+intervista col vampiro 
+angel 
+tera patrick 
+jenna jameson 
+soldierboy 
+soja boy 
+nl subs 
+emmanuel jal 
+release therepy 
+ludacris 
+ludacris 
+irish 
+hilton 
+shrek terzo 
+shrek terzoita 
+mohaa 
+bound 
+kidnapped 
+kidnapped 
+forced] 
+forced 
+bondage 
+delphi 
+delphi 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+number 23 ita 
+number 23 divx 
+sonia eyes 
+search files, youtube and liveleak 
+eagels 
+wind surf 
+la terza madre 
+wind surf 
+music all 
+giochi 
+blood rayne 
+cats 
+rescue dawn 
+wind surf 
+wind surf 
+wind surf 
+bto 
+spiderman 3 ita 
+rescue dawn 
+alpinismo 
+alpinismo 
+superman returns ita 
+pirati dei caraibi ai confini del mondo 
+search files, youtube and liveleak 
+mp3 
+vasco 
+dancing the night away 
+dancing the night away 
+dancing the night away 
+dancing the night away 
+dancing the night away 
+dancing 
+vasco 
+dancing the night 
+rescue dawn 
+knocked up nl sub 
+knocked up nl sub 
+knocked up nl sub 
+knocked up nl sub 
+knocked up nl sub 
+knocked up 
+knocked up 
+arm7 
+rescue dawn 
+wristcutters 
+john denver 
+jannacci 
+jannacci 
+kate holmes 
+holmes 
+i gufi 
+rescue dawn 
+splash 
+splashid 
+saw 
+horror 
+prison break 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+sex 
+movie 
+movie dutch subs 
+movie dutch subs 
+movie dutch subs 
+movie dutch subs 
+movie dutch subs 
+movie dutch subs 
+hddvd 
+movie dutch 
+movie dutch 
+gaston en leo 
+schoolgirl 
+taxi driver 
+schindler's list 
+husker du 
+pink floyd 
+heroes 
+ashwin navin 
+ashwin navin 
+osx 
+osx10.4 
+osx 10.4 
+osx 10.4 
+osx 10.4 
+osx 
+mac tiger 
+nfl 
+boxing 
+boxing girls 
+bears 
+private gladiator 
+atmega 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+stanford 
+stanford 
+stanford 
+yes 
+300 
+daily show 
+bridge to ter 
+bridge to ter 
+picture 
+search files, youtube and liveleak 
+"afghan knights" 
+gay 
+tom waits 
+xxx 
+jungle 
+naked 
+nakednews 
+japan 
+rtvi 
+slut 
+search files, youtube and liveleak 
+pro evolution soccer 
+elisa 
+vasco 
+vasco 
+vasco 
+vasco rossi 
+vasco rossi 
+.mp3 
+gianni morandi 
+ad-aware 
+codegear borland 
+search files, youtube and liveleak 
+borland 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+ad-awarespybot 
+teenmodel 
+spybot 
+teenmodel 
+pornbot 
+emule 
+delphi 
+delphi 
+teenmodel 
+teenmodels 
+creampie 
+teenmodels 
+ella teenmodel 
+jan smit 
+nederlands talig 
+gta 
+nederlands talig 
+nederlands talig 
+genuine advantage crack 
+jazz 
+gossip 
+ella teenmodel 
+van allen 
+van hallen 
+van hallen 
+doors 
+cry 
+session 
+meet 
+models 
+ashampoo 
+ashampoo aio 
+aio 
+pamela 
+pim sleur 
+troia 
+whore 
+ass 
+assassin creed for pc 
+assassin creed 
+mp3 
+adobe all in one 
+adobe all in one 
+ashampoo all in one 
+search files, youtube and liveleak 
+davilex 
+davilex 
+ad aware 
+lavasoft 
+school 
+alien 
+scooter 
+scooter 
+school 
+anna kurnikov 
+search files, youtube and liveleak 
+delphi2005 
+delphi 
+poweriso 3.8 
+premiere pro cs3 crack 
+delphi 
+nijlpaard sex 
+shemal sex 
+dildo sex 
+music 
+trubble 
+trubble 
+truble 
+clear 
+fire 
+giochi pc 
+david bowie 
+trade 
+trade 
+queen's of the stone age 
+queen's of the stoneage 
+queen's 
+queen 
+slayer 
+metallica 
+drupal 
+*.mp3 
+negramaro 
+pulp 
+earlier 
+head 
+tortas 
+wendy 
+king singers 
+zarautz 
+james galway 
+macabre 
+vocal 
+a capella 
+montezuma 
+top5 
+search files, youtube and liveleak 
+harry potter 
+venditti 
+search all files 
+26.11.2007 
+26.11.2007 
+top 5 
+globe 2006 
+top 10 
+ricky rubio 
+olimpiade patinador 
+olimpiadas patinaje 
+olimpiade patinaggio 
+lazy town 
+top 5 
+nudes 
+milf 
+search files, youtube and liveleak 
+obrigado brazil 
+history channel 
+yo yo ma brazil 
+sex 
+tragedy 
+gossip 
+sonic youth 
+sonic 
+soul 
+bling 
+blues 
+gary moore 
+more 
+bio 
+windows vista nrg 
+windows vista 
+bling fling trick 
+sex 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+sinterklaas 
+wolfetones 
+the wolfe tones 
+tool 
+earth 
+zwarte piet 
+sinterklaasliedjes 
+schindler's list 
+sinterklaas 
+bling fling trick 
+game 
+sexy games 
+sexy game 
+xxx game 
+xxx game 
+xxx game 
+ita 
+sonic youth 
+anime ita 
+i griffin ita 
+heroes 
+heroes ita 
+phil collins 
+top 10 
+champions 
+messi gol 
+messi maradonna 
+divx ita 
+la terza madre 
+un matrimonio alle bahamas 
+rihanna 
+matrimonio alle bahamas 
+la terza madre 
+encatada 
+film ita 
+valeria marini 
+jenna 
+maria 
+maria 
+sap 
+search files, youtube and liveleak 
+hyderabad 
+hyderabad sex 
+hyderabad party 
+trace 
+win trace 
+mistress 
+domina 
+trampling 
+heels 
+femdom 
+female 
+female piss 
+golden shower 
+dragon 
+dragonball 
+halo 3 
+high heels 
+halo 3 
+halo 3 
+halo 3 
+halo 3 
+halo 3 
+high heels 
+halo 3 
+halo 3 
+pes 2008 
+pes 2008 
+stiletto 
+call of duty 4 
+call of duty 4 
+call of duty 4 
+call of duty 4 
+call of duty 4 
+call of duty 4+ 
+pes 2008 
+pes 2008 
+halo 3 
+halo 3 
+stiletto sex 
+halo 3 
+halo 3 
+halo 
+halo 1 
+halo 1 
+halo 1 
+halo 1 
+halo 1 
+halo 1 
+halo 1 
+halo 
+sexy heels 
+sexy heels 
+per un pugno di dollari 
+sum 41 
+sum 41 
+vasco 
+porno 
+sonic 
+sonic 
+sonic 
+sonic youth 
+sonic youth 
+queen 
+queen 
+*.cdt 
+alkohol120 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+porn 
+ero 
+la leggenda del lago maledetto 
+la leggenda del lago 
+non son degno di te 
+oblivion 
+per un pugno di dollari 
+per un pugno di dollari 
+per un pugno di dollari 
+oblivion 
+nicola di bari 
+dieci comandamenti 
+dieci comandamenti 
+mina 
+mina 
+mina 
+la guerra dei ross 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 3 
+spederman 
+spederman 
+spederman 
+spederman 
+spederman 
+oblivion 
+oblivion 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+gheorghe zafir 
+nel sole 
+massimo di cataldo 
+massimo di cataldo 
+massimo di cataldo 
+giacomo rondinella 
+giacomo rondinella 
+toto 
+toto 
+toto 
+toto 
+toto 
+gianni morandi 
+gianni morandi 
+gianni morandi 
+gianni morandi 
+*.avi 
+spanish 
+film ita 
+will smith 
+control freak 
+will smith 
+control freak 
+control freak 
+control freak 
+control freak 
+ready steady go 
+film ita 
+film 
+film 
+bowie 
+bowie 
+divx ita 2007 
+dj lory 
+film kim ki duk 
+film francis ford coppola 
+asian 
+film francis ford coppola 
+film ita 
+zombie nacion 
+mozart 
+zombie nacion 
+a 
+red zone 
+red zone 
+red zone 
+red zone 
+moko 
+film 
+hentai 
+adam proll – hummel 
+adam proll – hummel 
+hummel 
+hummel 
+afroman 
+fica 
+hentai 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+metallica 
+canzoni italiane 
+canzoni italiane 
+red zone 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+canzoni italiane mp3 
+mamma mp3 
+mamma mp3 
+mamma mp3 
+girl of next door 
+ufo 
+costantin 
+benny jnn 
+naruto 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+benny hinn 
+search files, youtube and liveleakcreepers 2 
+jeepers creepers 2 
+flauto 
+flauto 
+jeepers creepers 
+fisarmonica 
+fisarmonica 
+mini 
+erotic 
+sexy 
+l esorcista 
+l'esorcista 
+nude 
+l esorcista film 
+film drammatico 
+vertigo 
+vertigo alfred hitchcock 
+film 
+film 
+film ita 
+naked 
+film 
+search files, youtube and liveleak 
+la terra madre 
+televisione 
+televisione 
+televisione 
+search files, youtube and liveleak 
+a guide to the penis for men and women 
+programmi utili 
+search files, youtube and liveleak 
+pink floyd 
+programmi 
+antivirus 
+antivirus +crak 
+antivirus +crak 
+commedia 
+commedia 
+commedia 
+animali 
+animali 
+sex 
+sex 
+g-data 
+gdata 
+tinto brass 
+chavez 
+brigitta bulgari 
+chavez 
+eva henger 
+kaspersky 
+gdata 
+antiviruskit 
+avk 
+f-secure 
+masturbation 
+asian 
+solo 
+lesbians 
+search files, youtube and liveleak 
+os intocaveis dublado 
+tropa de elite 
+orgia 
+witches 
+witches 
+witches 
+witches 
+witches 
+witches 
+witches 
+witches 
+resident evil 
+orgy 
+kat 
+cradle of filth 
+cradle of filth 
+cradle of filth 
+avp2 
+avp2 
+avp2 
+enchanted 
+k9 animalsex 
+rocco sigfredi 
+film porno 
+er medici in prima linea 
+endgame 
+star trek 
+iphone 
+hgtv 
+leopard 
+foot 
+feet 
+nipples 
+legs 
+thigh 
+thighs 
+thighs 
+tongues 
+kissing 
+kissing 
+mature 
+over 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+video porno 
+video porno 
+video porno 
+j neto 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+search files, youtube and liveleak 
+xxx files 
+xxx 
+xxx 
+xxx 
+teens 
+teens 
+teens 
+preteens 
+schwester 
+schwester 
+schwester 
+sister 
+porn 
+teens 
+teens 
+teens porn 
+preteens 
+adult 
+teen sex 
+porn teens 
+fuking video 
+www.yahoo.com 
+teens picture gallery 
+adult teen movies 
+300 
+rigas sargi 
+the mist 
+i am legend 
+love my way 
+film italiani 
+film ita 
+template monster 
+van 
+hindi 
+hindi 
+hindi 
+hindi 
+hindi 
+hindi 
+english 
+english 
+english 
+english 
+methini 
+pat 
+jam session 
+session 
+playboy 
+jazz 
+planet earth 
+kombi 
+kombi 
+bergkamp 
+the string quartet linkin park 
+search files, youtube and liveleak 
+moggl 
+akira 
+u2 
+akira 
+akira 
+u2 
+u2 
+akira 
+ghost in the shell 
+paris 
+.mov 
+groningen 
+le prix a payer dvd 
+prostreet 
+nancy drew 
+nancy drew fr 
+tioz 
+nancy drew fr iso 
+tioz 
+tioz 
+tioz 
+tioz 
+nancy drew fr 
+los 
+tios 
+tioz 
+les enquetes de nancy drew 
+les enquetes de nancy 
+les enquetes de 
+tioz 
+semantic web 
+ratatuj 
+bdsm 
+ratatopuille 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+mckenzie lee 
+carmen luvana 
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast_msg.bat b/instrumentation/next-share/BaseLib/Test/test_buddycast_msg.bat
new file mode 100644 (file)
index 0000000..dcc45e9
--- /dev/null
@@ -0,0 +1,9 @@
+set PYTHONPATH=..\..\r
+\r
+python test_buddycast_msg.py singtest_good_buddycast3\r
+python test_buddycast_msg.py singtest_good_buddycast4\r
+python test_buddycast_msg.py singtest_good_buddycast6\r
+python test_buddycast_msg.py singtest_bad_all\r
+python test_buddycast_msg8plus.py singtest_all_olproto_ver_current\r
+python test_buddycast_msg8plus.py singtest_all_olproto_ver_11\r
+python test_buddycast_msg8plus.py singtest_all_olproto_ver_8\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast_msg.py b/instrumentation/next-share/BaseLib/Test/test_buddycast_msg.py
new file mode 100644 (file)
index 0000000..0049631
--- /dev/null
@@ -0,0 +1,496 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+import os
+import sys
+import time
+import socket
+from BaseLib.Core.Utilities.Crypto import sha
+from traceback import print_exc
+from types import StringType, ListType, DictType, IntType
+import tempfile
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+import btconn
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.simpledefs import *
+
+DEBUG=True
+
+
+class TestBuddyCastMsg(TestAsServer):
+    """ 
+    Testing BUDDYCAST message of BuddyCast extension V1+2+3
+    
+    Note this is based on a reverse-engineering of the protocol.
+    Source code of the specific Tribler release is authoritative.
+    """
+    
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        # BuddyCast
+        self.config.set_buddycast(True)
+        self.config.set_start_recommender(True)
+        
+        fd,self.superpeerfilename = tempfile.mkstemp()
+        os.write(fd,'')
+        os.close(fd)
+        self.config.set_superpeer_file(self.superpeerfilename)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())        
+        self.myhash = sha(self.mypermid).digest()
+
+        # Give Tribler some download history
+        print >>sys.stderr,"test: Populating MYPREFERENCES table"
+        self.myprefdb = self.session.open_dbhandler(NTFY_MYPREFERENCES)
+        data = {'destination_path':'.'}
+        infohashes = self.create_good_my_prefs(self,btconn.current_version)
+        for i in range(0,len(infohashes)):
+            commit = (i == len(infohashes)-1) 
+            self.myprefdb.addMyPreference(infohashes[i], data, commit=commit)
+
+        # Give Tribler some peers
+        print >>sys.stderr,"test: Populating PEERS table"
+        self.peerdb = self.session.open_dbhandler(NTFY_PEERS)
+        past = int(time.time())-1000000000
+        peers = self.create_good_random_peers(btconn.current_version,num=200)
+        
+        peers = []
+        
+        for i in range(0,len(peers)):
+            peer = peers[i]
+            peer.update({'last_seen':past, 'last_connected':past})
+            del peer['connect_time']
+            peer['num_torrents'] = peer['nfiles'] 
+            del peer['nfiles']
+            commit = (i == len(peers)-1)
+            self.peerdb.addPeer(peer['permid'], peer, update_dns=True, update_connected=True, commit=commit)
+
+
+    def tearDown(self):
+        """ override TestAsServer """
+        TestAsServer.tearDown(self)
+        try:
+            os.remove(self.superpeerfilename)
+        except:
+            print_exc()
+
+
+    #Arno, 2010-02-16: we now kick v2 peers, so can't test anymore.
+    #def singtest_good_buddycast2(self):
+    #    self.subtest_good_buddycast(2)
+        
+    def singtest_good_buddycast3(self):
+        """ I want a fresh Tribler for this """
+        self.subtest_good_buddycast(3)
+        
+    def singtest_good_buddycast4(self):
+        """ I want a fresh Tribler for this """
+        self.subtest_good_buddycast(4)
+        
+    def singtest_good_buddycast6(self):
+        """ I want a fresh Tribler for this """
+        self.subtest_good_buddycast(6)
+
+    def singtest_bad_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        self.subtest_bad_not_bdecodable()
+        self.subtest_bad_not_dict1()
+        self.subtest_bad_not_dict2()
+        self.subtest_bad_empty_dict()
+        self.subtest_bad_wrong_dict_keys()
+        self.subtest_bad_buddycast_simple()
+        self.subtest_bad_taste_buddies()
+        self.subtest_bad_random_peers()
+
+    #
+    # Good BUDDYCAST
+    #
+    def subtest_good_buddycast(self,oversion):
+        """ 
+            test good BUDDYCAST messages
+        """
+        print >>sys.stderr,"test: good BUDDYCAST",oversion
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,myoversion=oversion)
+        msg = self.create_good_buddycast_payload(oversion)
+        s.send(msg)
+
+        s.b.s.settimeout(60.0)
+        try:
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good BUDDYCAST: Got reply",getMessageName(resp[0])
+                if resp[0] == BUDDYCAST:
+                    break
+                elif resp[0] == GET_METADATA:
+                    self.check_get_metadata(resp[1:])
+                elif resp[0] == KEEP_ALIVE:
+                    if oversion >= 3:
+                        self.check_keep_alive(resp[1:])
+                    else:
+                        print >> sys.stderr,"test: Tribler sent KEEP_ALIVE, not allowed in olproto ver",oversion
+                        self.assert_(False)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with BUDDYCAST message"
+            self.assert_(False)
+
+        self.check_buddycast(resp[1:],oversion)
+        time.sleep(10)
+        # the other side should not have closed the connection, as
+        # this is all valid, so this should not throw an exception:
+        s.send('bla')
+        s.close()
+
+    def create_good_buddycast_payload(self,oversion):
+        d = self.create_good_buddycast(oversion)
+        return self.create_payload(d)
+        
+    def create_good_buddycast(self,oversion):
+        self.myprefs = self.create_good_my_prefs(oversion)
+        tastebuddies = self.create_good_taste_buddies(oversion)
+        randompeers = self.create_good_random_peers(oversion)
+        recentcoll = self.create_good_recently_collected_torrents(oversion)
+        d = {}
+        d['ip'] = '127.0.0.1'
+        d['port'] = 481
+        d['name'] = 'Bud Spencer'
+        d['preferences'] = self.myprefs
+        d['taste buddies'] = tastebuddies 
+        d['random peers'] = randompeers
+        
+        if oversion >= 3:
+            d['connectable'] = True
+        
+        if oversion >= 4:
+            d['collected torrents'] = recentcoll
+        
+        if oversion >= 6:
+            d['npeers'] = 3904
+            d['nfiles'] = 4027
+            d['ndls'] = 4553
+
+        #print >>sys.stderr,"test: Sending",`d`
+        
+        return d
+
+    def create_good_my_prefs(self,oversion,num=50):
+        p = []
+        for i in range(0,num):
+            infohash = chr(ord('a')+i) * 20
+            p.append(infohash)
+        return p
+
+    create_good_recently_collected_torrents = create_good_my_prefs
+
+    def create_good_taste_buddies(self,oversion):
+        tbs = []
+        for i in range(0,10):
+            tb = self.create_good_peer(i,oversion)
+            tbs.append(tb)
+        return tbs 
+
+    def create_good_random_peers(self,oversion,num=10):
+        tbs = []
+        for i in range(0,num):
+            tb = self.create_good_peer(i,oversion)
+            tbs.append(tb)
+        return tbs 
+        
+    def create_good_peer(self,id,oversion):
+        d = {}
+        d['permid'] = 'peer '+str(id)
+        d['ip'] = '192.168.0.'+str(id)
+        d['port'] = 7762+id
+        d['connect_time'] = int(time.time())
+
+        if oversion <= 2:
+            d['age'] = 0
+            
+        if oversion <= 3:
+            d['preferences'] = self.create_good_my_prefs(oversion,num=10)
+        else:
+            d['similarity'] = 1
+
+        if oversion >= 6:
+            d['oversion'] = btconn.current_version
+            d['nfiles'] = 100+id
+        
+        return d
+
+    def create_payload(self,r):
+        return BUDDYCAST+bencode(r)
+
+    def check_buddycast(self,data,oversion):
+        d = bdecode(data)
+        
+        print >>sys.stderr,"test: Got BUDDYCAST",d.keys()
+        #print >>sys.stderr,"test: Got CONTENT",`d`
+        
+        self.assert_(type(d) == DictType)
+        self.assert_('ip' in d)
+        self.assert_(type(d['ip']) == StringType)
+        self.assert_('port' in d)
+        self.assert_(type(d['port']) == IntType)
+        self.assert_('name' in d)
+        self.assert_(type(d['name']) == StringType)
+        self.assert_('preferences' in d)
+        self.check_preferences(d['preferences'],oversion)
+        self.assert_('taste buddies' in d)
+        self.check_taste_buddies(d['taste buddies'],oversion)
+        self.assert_('random peers' in d)
+        self.check_random_peers(d['random peers'],oversion
+                                )
+        if oversion >= 3:
+            self.assert_('connectable' in d)
+            #print >>sys.stderr,"CONNECTABLE TYPE",type(d['connectable'])
+            self.assert_(type(d['connectable']) == IntType)
+        if oversion >= 4:
+            self.assert_('collected torrents' in d)
+            self.check_collected_torrents(d['collected torrents'],oversion)
+        if oversion >= 6:
+            self.assert_('npeers' in d)
+            self.assert_(type(d['npeers']) == IntType)
+            self.assert_('nfiles' in d)
+            self.assert_(type(d['nfiles']) == IntType)
+            self.assert_('ndls' in d)
+            self.assert_(type(d['ndls']) == IntType)
+
+    def check_preferences(self,p,oversion):
+        self.assert_(type(p) == ListType)
+        self.assert_(len(p) <= 50)
+        for infohash in p:
+            self.check_infohash(infohash)
+            
+    check_collected_torrents = check_preferences
+            
+    def check_infohash(self,infohash):
+        self.assert_(type(infohash) == StringType)
+        self.assert_(len(infohash) == 20)
+
+    def check_taste_buddies(self,peerlist,oversion):
+        return self.check_peer_list(peerlist,True,oversion)
+    
+    def check_random_peers(self,peerlist,oversion):
+        return self.check_peer_list(peerlist,False,oversion)
+
+    def check_peer_list(self,peerlist,taste,oversion):
+        self.assert_(type(peerlist) == ListType)
+        for p in peerlist:
+            self.check_peer(p,taste,oversion)
+
+    def check_peer(self,d,taste,oversion):
+        self.assert_(type(d) == DictType)
+        self.assert_('permid' in d)
+        self.assert_(type(d['permid']) == StringType)
+        self.assert_('ip' in d)
+        self.assert_(type(d['ip']) == StringType)
+        self.assert_('port' in d)
+        self.assert_(type(d['port']) == IntType)
+        self.assert_('connect_time' in d)
+        self.assert_(type(d['connect_time']) == IntType)
+
+        if oversion <= 3 and taste:
+            self.assert_('preferences' in d)
+            self.check_preferences(d['preferences'],oversion)
+        
+        if oversion >= 4:
+            self.assert_('similarity' in d)
+            self.assert_(type(d['similarity']) == IntType)
+
+        if oversion >= 6:
+            if 'oversion' in d:
+                # Jie made this optional, only if peer has enough collected files
+                # its record will contain these fields
+                self.assert_(type(d['oversion']) == IntType)
+                self.assert_('nfiles' in d)
+                self.assert_(type(d['nfiles']) == IntType)
+            
+
+    def check_get_metadata(self,data):
+        infohash = bdecode(data)
+        self.check_infohash(infohash)
+        
+        # Extra check: he can only ask us for metadata for an infohash we
+        # gave him.
+        self.assert_(infohash in self.myprefs)        
+
+    def check_keep_alive(self,data):
+        self.assert_(len(data) == 0)
+
+    #
+    # Bad buddycast
+    #    
+    def subtest_bad_not_bdecodable(self):
+        self._test_bad(self.create_not_bdecodable)
+
+    def subtest_bad_not_dict1(self):
+        self._test_bad(self.create_not_dict1)
+
+    def subtest_bad_not_dict2(self):
+        self._test_bad(self.create_not_dict2)
+
+    def subtest_bad_empty_dict(self):
+        self._test_bad(self.create_empty_dict)
+
+    def subtest_bad_wrong_dict_keys(self):
+        self._test_bad(self.create_wrong_dict_keys)
+
+    def subtest_bad_buddycast_simple(self):
+        methods = [
+            self.make_bad_ip,
+            self.make_bad_port,
+            self.make_bad_name,
+            self.make_bad_preferences,
+            self.make_bad_collected_torrents]
+        for method in methods:
+            print >> sys.stderr,"\ntest: ",method,
+            self._test_bad(method)
+        
+        
+    def make_bad_ip(self):
+        d = self.create_good_buddycast(btconn.current_version)
+        d['ip'] = 481
+        return self.create_payload(d)
+
+    def make_bad_port(self):
+        d = self.create_good_buddycast(btconn.current_version)
+        d['port'] = '127.0.0.1'
+        return self.create_payload(d)
+
+    def make_bad_name(self):
+        d = self.create_good_buddycast(btconn.current_version)
+        d['name'] = 481
+        return self.create_payload(d)
+    
+    def make_bad_preferences(self):
+        d = self.create_good_buddycast(btconn.current_version)
+        d['preferences'] = 481
+        return self.create_payload(d)
+
+    def make_bad_collected_torrents(self):
+        d = self.create_good_buddycast(btconn.current_version)
+        d['collected torrents'] = 481
+        return self.create_payload(d)
+
+        
+    def subtest_bad_taste_buddies(self):
+        methods = [
+            self.make_bad_tb_not_list,
+            self.make_bad_tb_list_not_dictelems,
+            self.make_bad_tb_list_bad_peer]
+        for method in methods:
+            d = self.create_good_buddycast(btconn.current_version)
+            d['taste buddies'] = method()
+            func = lambda:self.create_payload(d)
+            
+            print >> sys.stderr,"\ntest: ",method,
+            self._test_bad(func)
+
+    def make_bad_tb_not_list(self):
+        tbs = 481
+        return tbs
+        
+    def make_bad_tb_list_not_dictelems(self):
+        tbs = []
+        for i in range(0,50):
+            tbs.append(i)
+        return tbs
+        
+    def make_bad_tb_list_bad_peer(self):
+        tbs = []
+        for i in range(0,50):
+            tbs.append(self.make_bad_peer())
+        return tbs
+
+    def make_bad_peer(self):
+        d = {}
+        d['permid'] = 'peer 481'
+        # Error is too little fields. 
+        # TODO: test all possible bad peers
+        
+        return d
+
+
+    def subtest_bad_random_peers(self):
+        methods = [
+            self.make_bad_ip,
+            self.make_bad_port,
+            self.make_bad_name,
+            self.make_bad_preferences,
+            self.make_bad_collected_torrents]
+        for method in methods:
+            d = self.create_good_buddycast(btconn.current_version)
+            d['taste buddies'] = method()
+            func = lambda:self.create_payload(d)
+            
+            print >> sys.stderr,"\ntest: ",method,
+            self._test_bad(func)
+    
+    def _test_bad(self,gen_buddycast_func):
+        print >>sys.stderr,"test: bad BUDDYCAST",gen_buddycast_func
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = gen_buddycast_func()
+        s.send(msg)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        self.assert_(len(s.recv())==0)
+        s.close()
+
+    def create_not_bdecodable(self):
+        return BUDDYCAST+"bla"
+
+    def create_not_dict1(self):
+        buddycast = 481
+        return self.create_payload(buddycast)
+
+    def create_not_dict2(self):
+        buddycast = []
+        return self.create_payload(buddycast)
+
+    def create_empty_dict(self):
+        buddycast = {}
+        return self.create_payload(buddycast)
+
+    def create_wrong_dict_keys(self):
+        buddycast = {}
+        buddycast['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        buddycast['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return self.create_payload(buddycast)
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_buddycast_msg.py <method name>"
+    else:
+        suite.addTest(TestBuddyCastMsg(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast_msg.sh b/instrumentation/next-share/BaseLib/Test/test_buddycast_msg.sh
new file mode 100644 (file)
index 0000000..0161637
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_buddycast_msg.py singtest_good_buddycast3
+python test_buddycast_msg.py singtest_good_buddycast4
+python test_buddycast_msg.py singtest_good_buddycast6
+python test_buddycast_msg.py singtest_bad_all
+python test_buddycast_msg8plus.py singtest_all_olproto_ver_current
+python test_buddycast_msg8plus.py singtest_all_olproto_ver_11
+python test_buddycast_msg8plus.py singtest_all_olproto_ver_8
diff --git a/instrumentation/next-share/BaseLib/Test/test_buddycast_msg8plus.py b/instrumentation/next-share/BaseLib/Test/test_buddycast_msg8plus.py
new file mode 100644 (file)
index 0000000..aae6a6d
--- /dev/null
@@ -0,0 +1,432 @@
+# Written by Nicolas Neubauer, Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# Test case for BuddyCast overlay version 12 (and 8). To be integrated into\r
+# test_buddycast_msg.py\r
+#\r
+# Very sensitive to the order in which things are put into DB,\r
+# so not a robust test\r
+\r
+\r
+import unittest\r
+import os\r
+import sys\r
+import time\r
+import tempfile\r
+import shutil\r
+from sha import sha\r
+from random import randint,shuffle\r
+from traceback import print_exc\r
+from types import StringType, ListType, DictType\r
+from threading import Thread\r
+from time import sleep\r
+from M2Crypto import Rand,EC\r
+\r
+\r
+from BaseLib.Test.test_as_server import TestAsServer\r
+from olconn import OLConnection\r
+from BaseLib.__init__ import LIBRARYNAME\r
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode\r
+from BaseLib.Core.BitTornado.BT1.MessageID import *\r
+\r
+from BaseLib.Core.CacheDB.CacheDBHandler import BarterCastDBHandler\r
+\r
+from BaseLib.Core.BuddyCast.buddycast import BuddyCastFactory, BuddyCastCore\r
+\r
+from BaseLib.Core.Overlay.SecureOverlay import OLPROTO_VER_FIRST, OLPROTO_VER_SECOND, OLPROTO_VER_THIRD, OLPROTO_VER_FOURTH, OLPROTO_VER_FIFTH, OLPROTO_VER_SIXTH, OLPROTO_VER_SEVENTH, OLPROTO_VER_EIGHTH, OLPROTO_VER_ELEVENTH, OLPROTO_VER_CURRENT, OLPROTO_VER_LOWEST\r
+from BaseLib.Core.simpledefs import *\r
+\r
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import *\r
+from BaseLib.Core.CacheDB.sqlitecachedb import CURRENT_MAIN_DB_VERSION\r
+\r
+DEBUG=True\r
+\r
+    \r
+\r
+class TestBuddyCastMsg8Plus(TestAsServer):\r
+    """ \r
+    Testing BuddyCast 5 / overlay protocol v12+v8 interactions:\r
+    swarm size info exchange.\r
+    """\r
+    \r
+    def setUp(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUp(self)\r
+        Rand.load_file('randpool.dat', -1)\r
+\r
+    def setUpPreSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPreSession(self)\r
+        # Enable buddycast\r
+        self.config.set_buddycast(True)\r
+        BuddyCastCore.TESTASSERVER = True\r
+        self.config.set_start_recommender(True)\r
+        self.config.set_bartercast(True)\r
+        \r
+        # Arno, 2010-02-02: Install empty superpeers.txt so no interference from \r
+        # real BuddyCast.\r
+        self.config.set_crawler(False)\r
+        \r
+        # Write superpeers.txt\r
+        self.install_path = tempfile.mkdtemp()\r
+        spdir = os.path.join(self.install_path, LIBRARYNAME, 'Core')\r
+        os.makedirs(spdir)\r
+\r
+        statsdir = os.path.join(self.install_path, LIBRARYNAME, 'Core', 'Statistics')\r
+        os.makedirs(statsdir)\r
+        \r
+        superpeerfilename = os.path.join(spdir, 'superpeer.txt')\r
+        print >> sys.stderr,"test: writing empty superpeers to",superpeerfilename\r
+        f = open(superpeerfilename, "w")\r
+        f.write('# Leeg')\r
+        f.close()\r
+\r
+        self.config.set_install_dir(self.install_path)\r
+        \r
+        srcfiles = []\r
+        srcfiles.append(os.path.join(LIBRARYNAME,"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql"))\r
+        for srcfile in srcfiles:\r
+            sfn = os.path.join('..','..',srcfile)\r
+            dfn = os.path.join(self.install_path,srcfile)\r
+            print >>sys.stderr,"test: copying",sfn,dfn\r
+            shutil.copyfile(sfn,dfn)\r
+\r
+        \r
+\r
+    def setUpPostSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPostSession(self)\r
+\r
+        self.mypermid = str(self.my_keypair.pub().get_der())\r
+        self.hispermid = str(self.his_keypair.pub().get_der())        \r
+        self.myhash = sha(self.mypermid).digest()\r
+        \r
+        self.buddycast = BuddyCastFactory.getInstance(superpeer=True)\r
+        self.buddycast.olthread_register(True)\r
+        \r
+#        arg0 = sys.argv[0].lower()\r
+#        if arg0.endswith('.exe'):\r
+#            installdir = os.path.abspath(os.path.dirname(sys.argv[0]))\r
+#        else:\r
+#           installdir = os.getcwd()          \r
+#       self.utility = Utility(installdir)        \r
+\r
+        \r
+        # wait for buddycast to have completed on run cycle,\r
+        # seems to create problems otherwise\r
+        while not self.buddycast.ranonce:\r
+            pass\r
+            \r
+    def tearDown(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.tearDown(self)\r
+        try:\r
+            os.remove('randpool.dat')\r
+        except:\r
+            pass\r
+\r
+\r
+    def singtest_all_olproto_ver_current(self):\r
+        self._test_all(OLPROTO_VER_CURRENT)\r
+\r
+    def singtest_all_olproto_ver_11(self):\r
+        self._test_all(11)\r
+\r
+    def singtest_all_olproto_ver_8(self):\r
+        self._test_all(8)\r
+\r
+    def _test_all(self,myoversion):\r
+        """ \r
+            I want to start a Tribler client once and then connect to\r
+            it many times. So there must be only one test method\r
+            to prevent setUp() from creating a new client every time.\r
+\r
+            The code is constructed so unittest will show the name of the\r
+            (sub)test where the error occured in the traceback it prints.\r
+        """\r
+        # Arno, 2010-02-03: clicklog 1,2,3 must be run consecutively\r
+        # create_mypref() must be called after clicklog 1,2,3\r
+        self.subtest_good_buddycast_clicklog(1,myoversion)\r
+        self.subtest_good_buddycast_clicklog(2,myoversion)\r
+        self.subtest_good_buddycast_clicklog(3,myoversion)\r
+        self.subtest_terms(myoversion)\r
+        self.subtest_create_mypref()\r
+        self.subtest_create_bc(myoversion)\r
+\r
+    \r
+    def get_good_clicklog_msg(self,n,myoversion=8):\r
+        if n==1:\r
+            # OLv8:\r
+            # infohash\r
+            # search terms\r
+            # click position\r
+            # reranking strategy\r
+            # OLv11:\r
+            # number of seeders\r
+            # number of leechers\r
+            # age of checking\r
+            # number of sources seen'\r
+            prec = ["hash1hash1hash1hash1", ["linux","ubuntu"], 1, 2]\r
+            if myoversion >= 11:\r
+                prec += [400, 500, 1000, 50]\r
+            preferences = [prec]\r
+            if myoversion >= 11:\r
+                prec = ['hash0hash0hash0hash0', 300, 800, 5000, 30]\r
+                collected_torrents = [prec]\r
+            else:\r
+                collected_torrents = ['hash0hash0hash0hash0'] \r
+\r
+        elif n==2:\r
+            prec = ["hash2hash2hash2hash2", ["linux", "ubuntu"], 2, 2]\r
+            if myoversion >= 11:\r
+                prec += [600, 700,20000,60]\r
+            preferences = [prec]\r
+            if myoversion >= 11:\r
+                prec = ['hash2hash2hash2hash2', 500, 200, 70000, 8000]\r
+                collected_torrents = [prec]\r
+            else:\r
+                collected_torrents = ["hash2hash2hash2hash2"]            \r
+        elif n==3:\r
+            prec = ["hash3hash3hash3hash3", ["linux","redhat"], 5 ,2 ]\r
+            if myoversion >= 11:\r
+                prec += [800, 900, 30000, 70]\r
+            preferences = [prec]\r
+            if myoversion >= 11:\r
+                prec = ['hash3hash3hash3hash3', 700, 200, 45000, 75]\r
+                collected_torrents = [prec]\r
+            else:\r
+                collected_torrents = ['hash3hash3hash3hash3'] \r
+\r
+            \r
+        return {\r
+                'preferences': preferences, \r
+                'ndls': 1, \r
+                'permid': self.mypermid,\r
+                'ip': '127.0.0.1', #'130.149.146.117', \r
+                'taste buddies': [], \r
+                'name': 'nic', \r
+                'random peers': [], \r
+                'collected torrents': collected_torrents, \r
+                'nfiles': 0, \r
+                'npeers': 0, \r
+                'port': self.hisport, \r
+                'connectable': 1}\r
+            \r
+\r
+            \r
+            \r
+    def subtest_good_buddycast_clicklog(self, i, myoversion):\r
+        """sends two buddy cast messages containing clicklog data,\r
+           then checks in the DB to find out whether the correct\r
+           data was stored.\r
+           \r
+           This in fact checks quite a lot of things.\r
+           For example, the messages always contain terms [1,2]\r
+        """\r
+           \r
+        print >>sys.stderr,"\ntest: subtest_good_buddycast_clicklog",i,"selversion",myoversion    \r
+           \r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,myoversion=myoversion)\r
+        \r
+        prefmsg = self.get_good_clicklog_msg(i,myoversion)\r
+        \r
+        print >>sys.stderr,myoversion,`prefmsg`\r
+        \r
+        msg = self.create_payload(prefmsg)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        if len(resp)>0:\r
+            print >>sys.stderr,"test: reply message %s:%s" % (getMessageName(resp[0]), resp[1:])\r
+        else:\r
+            print >>sys.stderr,"no reply message"\r
+        self.assert_(len(resp) > 0)\r
+            \r
+        #if we have survived this, check if the content of the remote database is correct\r
+        search_db = self.session.open_dbhandler(NTFY_SEARCH)\r
+        term_db = self.session.open_dbhandler(NTFY_TERM)\r
+        pref_db = self.session.open_dbhandler(NTFY_PREFERENCES)\r
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)\r
+\r
+        torrent_id = None\r
+        while not torrent_id:\r
+            hash = prefmsg['preferences'][0][0]\r
+            print >> sys.stderr, "hash: %s, bin2str: %s" % (hash, bin2str(hash))\r
+            torrent_data =  torrent_db.getTorrentID(hash)\r
+            print >> sys.stderr, "Torrent data for torrent %s: %s" % (prefmsg['preferences'][0][0], torrent_data)\r
+            torrent_id = torrent_data\r
+            if not torrent_id:\r
+                print >> sys.stderr, "torrent not yet saved, waiting..."\r
+                sleep(1)\r
+        \r
+\r
+        # self.getAll("rowid, peer_id, torrent_id, click_position,reranking_strategy", order_by="peer_id, torrent_id")\r
+        real_prefs = pref_db.getAllEntries()\r
+        print >>sys.stderr,"test: getAllEntries returned",real_prefs\r
+        \r
+        my_peer_id = real_prefs[0][1] \r
+        real_terms = term_db.getAllEntries()\r
+        real_search = search_db.getAllEntries()\r
+        \r
+\r
+        if i==1:\r
+            wanted_prefs = [[1,my_peer_id,1,1,2]]\r
+            wanted_terms = [[1,u'linux'], [2,u'ubuntu']]\r
+            wanted_search = [[1,my_peer_id,'?',1,0],\r
+                             [2,my_peer_id,'?',2,1]]\r
+        elif i==2:\r
+            # Arno, 2010-02-04: Nicolas assumed the collected torrent for i=1\r
+            # wouldn't be stored in DB?\r
+            wanted_prefs = [[1,my_peer_id,'?',1,2],[2,my_peer_id,torrent_id,2,2]]\r
+            wanted_terms = [[1,u'linux'], [2,u'ubuntu']]\r
+            wanted_search = [[1,my_peer_id,'?',1,0],\r
+                             [2,my_peer_id,'?',2,1],\r
+                             [3,my_peer_id,'?',1,0],\r
+                             [4,my_peer_id,'?',2,1]]\r
+            \r
+        elif i==3:\r
+            wanted_prefs = [[1,my_peer_id,'?',1,2],[2,my_peer_id,'?',2,2],[3,my_peer_id,torrent_id,5,2]]\r
+            wanted_terms = [[1,u'linux'], [2,u'ubuntu'], [3, u'redhat']]\r
+            wanted_search = [[1,my_peer_id,'?',1,0],\r
+                             [2,my_peer_id,'?',2,1],\r
+                             [3,my_peer_id,'?',1,0],\r
+                             [4,my_peer_id,'?',2,1],\r
+                             [5,my_peer_id,'?',1,0],\r
+                             [6,my_peer_id,'?',3,1]]\r
+            \r
+                \r
+        \r
+        print >> sys.stderr, "real_prefs: %s" % real_prefs\r
+        print >> sys.stderr, "real_terms: %s" % real_terms\r
+        print >> sys.stderr, "real_search: %s " % real_search\r
+\r
+        print >> sys.stderr, "wanted_prefs: %s" % wanted_prefs\r
+        print >> sys.stderr, "wanted_terms: %s" % wanted_terms\r
+        print >> sys.stderr, "wanted_search: %s " % wanted_search\r
+\r
+        self.assert_(self.lol_equals(real_search, wanted_search, "good buddycast %d: search" % i))\r
+        self.assert_(self.lol_equals(real_terms, wanted_terms, "good buddycast %d: terms" % i))\r
+        self.assert_(self.lol_equals(real_prefs, wanted_prefs, "good buddycast %d: prefs" % i))\r
+        \r
+    def subtest_terms(self,myoversion):\r
+        """assumes clicklog message 1 and 2 have been sent and digested"""\r
+        \r
+        print >>sys.stderr,"\ntest: subtest_terms"\r
+        \r
+        term_db = self.session.open_dbhandler(NTFY_TERM)\r
+        \r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,myoversion=myoversion)        \r
+        msg = self.get_good_clicklog_msg(3,myoversion)\r
+        msg = self.create_payload(msg)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        self.assert_(len(resp) > 0)\r
+        \r
+        termid = term_db.getTermID(u"linux")\r
+        print >>sys.stderr, "TermID for Linux: %s" % termid\r
+        #self.assert_(termid == 1)\r
+        \r
+        #self.assert_(term_db.getTerm(1)==bin2str(str(u"linux")))\r
+        \r
+        completedTerms = term_db.getTermsStartingWith("li")\r
+        print >> sys.stderr, "terms starting with l: %s" % completedTerms  \r
+        self.assert_(len(completedTerms)==1)\r
+        self.assert_(u'linux' in completedTerms)\r
+        \r
+        term_db.insertTerm("asd#")\r
+        completedTerms = term_db.getTermsStartingWith("asd")\r
+        print >> sys.stderr, "terms starting with asd: %s" % completedTerms  \r
+        self.assert_(len(completedTerms)==1)\r
+        # Arno, 2010-02-03: Nicolas had 'asd' here, but I don't see any place\r
+        # where the # should have been stripped.\r
+        #\r
+        self.assert_(u'asd#' in completedTerms)\r
+        \r
+\r
+\r
+\r
+    def subtest_create_mypref(self):\r
+        print >>sys.stderr,"\ntest: creating test MyPreference data"\r
+        \r
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)\r
+        torrent_db.addInfohash('mhashmhashmhashmhash')\r
+        torrent_id = torrent_db.getTorrentID('mhashmhashmhashmhash')\r
+        mypref_db = self.session.open_dbhandler(NTFY_MYPREFERENCES)\r
+        search_db = self.session.open_dbhandler(NTFY_SEARCH)\r
+        \r
+        mypref_db.addMyPreference('mhashmhashmhashmhash', {'destination_path':''}, commit=True)\r
+        clicklog_data = {\r
+                            'click_position': 1,\r
+                            'reranking_strategy': 2,\r
+                            'keywords': ['linux', 'fedora']\r
+                        }\r
+        mypref_db.addClicklogToMyPreference('mhashmhashmhashmhash', clicklog_data, commit=True)\r
+        \r
+        # self.getAll("torrent_id, click_position, reranking_strategy", order_by="torrent_id")\r
+        allEntries = mypref_db.getAllEntries()\r
+        print >> sys.stderr, "all mypref entries: %s" % allEntries\r
+        self.assert_(len(allEntries)==1)\r
+        # (torrent_id, click_pos, rerank_strategy)\r
+        mypref_wanted = [['?',1,2]]\r
+        self.assert_(self.lol_equals(allEntries, mypref_wanted, "create mypref all"))\r
+        \r
+        # self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", order_by="rowid")\r
+        real_search = search_db.getAllOwnEntries()\r
+        wanted_search = [[7,0,torrent_id,1,0],\r
+                         [8,0,torrent_id,5,1]] # is now 5 for some reason\r
+        self.assert_(self.lol_equals(real_search, wanted_search, "create mypref allown"))        \r
+        \r
+        \r
+    def subtest_create_bc(self,myoversion):\r
+        print >>sys.stderr,"\ntest: creating test create_bc"\r
+        \r
+        torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)\r
+        torrent_db._db.update("Torrent", status_id=1)\r
+        pref_db = self.session.open_dbhandler(NTFY_MYPREFERENCES)\r
+        pref_db.loadData()\r
+        msg = self.buddycast.buddycast_core.createBuddyCastMessage(0, myoversion, target_ip="127.0.0.1", target_port=80)\r
+        print >> sys.stderr, "created bc pref: %s" % msg\r
+        \r
+        wantpref = ['mhashmhashmhashmhash',['linux','fedora'],1,2]\r
+        if myoversion >= OLPROTO_VER_ELEVENTH:\r
+            wantpref += [-1,-1,-1,-1]  \r
+        wantprefs = [wantpref]\r
+                \r
+        self.assert_(msg['preferences']==wantprefs)\r
+        \r
+\r
+                \r
+    def lol_equals(self, lol1, lol2, msg):\r
+        ok = True\r
+        for (l1, l2) in zip(lol1, lol2):\r
+            for (e1, e2) in zip(l1, l2):\r
+                if e1=='?' or e2=='?':\r
+                    continue\r
+                if not e1==e2:\r
+                    print >> sys.stderr, "%s != %s!" % (e1, e2)\r
+                    ok = False\r
+                    break\r
+        if not ok:\r
+            print >> sys.stderr, "%s: lol != lol:\nreal   %s\nwanted %s" % (msg, lol1, lol2)\r
+        return ok\r
+        \r
+\r
+    def create_payload(self,r):\r
+        return BUDDYCAST+bencode(r)\r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    # We should run the tests in a separate Python interpreter to prevent \r
+    # problems with our singleton classes, e.g. PeerDB, etc.\r
+    if len(sys.argv) != 2:\r
+        print "Usage: python test_buddycast_msg8plus.py <method name>"\r
+    else:\r
+        suite.addTest(TestBuddyCastMsg8Plus(sys.argv[1]))\r
+    \r
+    return suite\r
+\r
+def main():\r
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])\r
+\r
+if __name__ == "__main__":\r
+    main()\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_cachingstream.py b/instrumentation/next-share/BaseLib/Test/test_cachingstream.py
new file mode 100644 (file)
index 0000000..6ef9774
--- /dev/null
@@ -0,0 +1,145 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+#\r
+\r
+import unittest\r
+\r
+import sys\r
+import os\r
+import tempfile\r
+import shutil\r
+from traceback import print_exc\r
+\r
+from BaseLib.Video.CachingStream import SmartCachingStream\r
+\r
+\r
+class TestCachingStream(unittest.TestCase):\r
+    \r
+    """ Note: CachingStream currently actually reads 4x the blocksize into\r
+    the buffer, so the tests are slightly different than suggested, but\r
+    in any case all should pass.\r
+    """ \r
+    \r
+    def setUp(self):\r
+        \r
+        self.tempdir = tempfile.mkdtemp()\r
+\r
+        # Generate source file\r
+        self.srcfilename = os.path.join(self.tempdir,"src.mkv")\r
+        f = open(self.srcfilename,"wb")\r
+        d = '*' * (1024*1024)\r
+        for i in range(0,10):\r
+            f.write(d)\r
+        f.write(d[:34235])\r
+        f.close()\r
+        \r
+        self.f = open(self.srcfilename,"rb")\r
+        self.destfilename = os.path.join(self.tempdir,"dest.mkv")\r
+        self.g = open(self.destfilename,"wb")\r
+        self.c = SmartCachingStream(self.f,blocksize=65536)\r
+\r
+    def tearDown(self):\r
+        try:\r
+            shutil.rmtree(self.tempdir,ignore_errors=True)\r
+        except:\r
+            print_exc()\r
+\r
+    def test_sequential_2xblocksize(self):\r
+        while True:\r
+            data = self.c.read(65536*2)\r
+            if len(data) == 0:\r
+                break\r
+            self.g.write(data)\r
+            print >>sys.stderr,".",\r
+            \r
+        self.g.close()\r
+        self.cmp_files()\r
+        \r
+    def test_sequential_halfxblocksize(self):\r
+        while True:\r
+            data = self.c.read(32768)\r
+            print >>sys.stderr,"test: Got bytes",len(data)\r
+            if len(data) == 0:\r
+                break\r
+            self.g.write(data)\r
+            print >>sys.stderr,".",\r
+            \r
+        self.g.close()\r
+        self.cmp_files()\r
+\r
+    def test_sequential_bs32767(self):\r
+        while True:\r
+            data = self.c.read(32767)\r
+            print >>sys.stderr,"test: Got bytes",len(data)\r
+            if len(data) == 0:\r
+                break\r
+            self.g.write(data)\r
+            print >>sys.stderr,".",\r
+            \r
+        self.g.close()\r
+        self.cmp_files()\r
+\r
+\r
+    def test_sequential_readnseek(self):\r
+        pos = 0\r
+        while True:\r
+            data = self.c.read(32767)\r
+            print >>sys.stderr,"test: Got bytes",len(data)\r
+            if len(data) == 0:\r
+                break\r
+            self.g.write(data)\r
+            \r
+            pos += len(data)\r
+            self.c.seek(pos)\r
+            print >>sys.stderr,".",\r
+            \r
+        self.g.close()\r
+        self.cmp_files()\r
+\r
+\r
+    def test_read1sttwice(self):\r
+        data1 = self.c.read(32768)\r
+        print >>sys.stderr,"test: Got bytes",len(data1)\r
+        self.c.seek(0)\r
+        data2 = self.c.read(32768)\r
+        print >>sys.stderr,"test: Got bytes",len(data2)\r
+        self.assert_(data1 == data2)\r
+\r
+\r
+    def test_inside_1stblock(self):\r
+        data1 = self.c.read(32768)\r
+        print >>sys.stderr,"test: Got bytes",len(data1)\r
+        self.c.seek(16384)\r
+        data2 = self.c.read(16384)\r
+        print >>sys.stderr,"test: Got bytes",len(data2)\r
+        self.assert_(data1[16384:] == data2)\r
+\r
+        self.c.seek(10000)\r
+        data3 = self.c.read(20000)\r
+        print >>sys.stderr,"test: Got bytes",len(data3)\r
+        self.assert_(data1[10000:10000+20000] == data3)\r
+\r
+\r
+    def cmp_files(self):\r
+        f1 = open(self.srcfilename,"rb")\r
+        f2 = open(self.destfilename,"rb")\r
+        while True:\r
+            data1 = f1.read(65536)\r
+            data2 = f2.read(65536)\r
+            if len(data1) == 0:\r
+                break\r
+            self.assert_(data1 == data2)\r
+        f1.close()\r
+        f2.close()\r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    suite.addTest(unittest.makeSuite(TestCachingStream))\r
+    \r
+    return suite\r
+\r
+if __name__ == "__main__":\r
+    unittest.main()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_channelcast.bat b/instrumentation/next-share/BaseLib/Test/test_channelcast.bat
new file mode 100644 (file)
index 0000000..701cd90
--- /dev/null
@@ -0,0 +1,5 @@
+\r
+set PYTHONPATH=..\..\r
+\r
+python test_channelcast.py singtest_plain_nickname \r
+python test_channelcast.py singtest_unicode_nickname \r
diff --git a/instrumentation/next-share/BaseLib/Test/test_channelcast.py b/instrumentation/next-share/BaseLib/Test/test_channelcast.py
new file mode 100644 (file)
index 0000000..1245dde
--- /dev/null
@@ -0,0 +1,373 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+# TODO: let one hit to SIMPLE+METADATA be P2PURL\r
+import unittest\r
+import os\r
+import sys\r
+import time\r
+import tempfile\r
+import shutil\r
+from BaseLib.Core.Utilities.Crypto import sha\r
+from types import StringType, DictType, IntType\r
+from M2Crypto import EC\r
+from copy import deepcopy\r
+from BaseLib.Test.test_as_server import TestAsServer\r
+from olconn import OLConnection\r
+from BaseLib.Core.API import *\r
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode\r
+from BaseLib.Core.BitTornado.BT1.MessageID import *\r
+from BaseLib.Core.BuddyCast.moderationcast_util import validChannelCastMsg, validVoteCastMsg\r
+from BaseLib.Core.BuddyCast.channelcast import ChannelCastCore\r
+from BaseLib.Core.BuddyCast.buddycast import BuddyCastCore\r
+from BaseLib.Core.BuddyCast.votecast import VoteCastCore\r
+from BaseLib.Core.CacheDB.sqlitecachedb import str2bin,bin2str\r
+\r
+DEBUG=True\r
+\r
+class TestChannels(TestAsServer):\r
+    """ \r
+    Testing QUERY message of Social Network extension V1\r
+    """\r
+    \r
+    def setUpPreSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPreSession(self)\r
+        self.config.set_buddycast(True)\r
+        BuddyCastCore.TESTASSERVER = True\r
+        ChannelCastCore.TESTASSERVER = True\r
+        VoteCastCore.TESTASSERVER = True\r
+        self.config.set_start_recommender(True)\r
+        self.config.set_bartercast(True) \r
+        self.config.set_remote_query(True)\r
+        self.config.set_crawler(False)       \r
+        self.config.set_torrent_collecting_dir(os.path.join(self.config_path, "tmp_torrent_collecting"))\r
+\r
+        # Write superpeers.txt and DB schema\r
+        self.install_path = tempfile.mkdtemp()\r
+        spdir = os.path.join(self.install_path, LIBRARYNAME, 'Core')\r
+        os.makedirs(spdir)\r
+\r
+        statsdir = os.path.join(self.install_path, LIBRARYNAME, 'Core', 'Statistics')\r
+        os.makedirs(statsdir)\r
+        \r
+        superpeerfilename = os.path.join(spdir, 'superpeer.txt')\r
+        print >> sys.stderr,"test: writing empty superpeers to",superpeerfilename\r
+        f = open(superpeerfilename, "w")\r
+        f.write('# Leeg')\r
+        f.close()\r
+\r
+        self.config.set_install_dir(self.install_path)\r
+        \r
+        srcfiles = []\r
+        srcfiles.append(os.path.join(LIBRARYNAME,"schema_sdb_v5.sql"))\r
+        for srcfile in srcfiles:\r
+            sfn = os.path.join('..','..',srcfile)\r
+            dfn = os.path.join(self.install_path,srcfile)\r
+            print >>sys.stderr,"test: copying",sfn,dfn\r
+            shutil.copyfile(sfn,dfn)\r
+\r
+\r
+    def setUpPostSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPostSession(self)\r
+\r
+        self.mypermid = str(self.my_keypair.pub().get_der())\r
+        self.hispermid = str(self.his_keypair.pub().get_der())\r
+\r
+        \r
+    def setupDB(self,nickname):\r
+        # Change at runtime. Must be set before DB inserts\r
+        self.session.set_nickname(nickname)\r
+        \r
+        self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)\r
+        self.channelcast_db = self.session.open_dbhandler(NTFY_CHANNELCAST)\r
+        self.votecast_db = self.session.open_dbhandler(NTFY_VOTECAST)\r
+        try:\r
+            # Add some torrents belonging to own channel\r
+            tdef1, self.bmetainfo1 = self.get_default_torrent('sumfilename1','Hallo S01E10')\r
+            dbrec= self.torrent_db.addExternalTorrent(tdef1, extra_info={"filename":"sumfilename1"})\r
+            self.infohash1 = tdef1.get_infohash()\r
+            self.channelcast_db.addOwnTorrent(tdef1)\r
+            \r
+            tdef2, self.bmetainfo2 = self.get_default_torrent('sumfilename2','Hallo S02E01')\r
+            dbrec = self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"sumfilename2"})\r
+            self.infohash2 = tdef2.get_infohash()\r
+            self.torrenthash2 = sha(self.bmetainfo2).digest()\r
+            self.channelcast_db.addOwnTorrent(tdef2)\r
+    \r
+            tdef3, self.bmetainfo3 = self.get_default_torrent('sumfilename3','Halo Demo')\r
+            self.torrent_db.addExternalTorrent(tdef3, extra_info={"filename":"sumfilename3"})\r
+            self.infohash3 = tdef3.get_infohash()\r
+            self.torrenthash3 = sha(self.bmetainfo3).digest()\r
+            self.channelcast_db.addOwnTorrent(tdef3)\r
+            \r
+            # Now, add some votes\r
+            self.votecast_db.subscribe("MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU")\r
+            self.votecast_db.spam("MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTX")\r
+            vote = {'mod_id':"MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU", 'voter_id':"MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTX",'vote':1, 'time_stamp':132314}\r
+            self.votecast_db.addVote(vote)\r
+        except:\r
+            print_exc()\r
+        \r
+\r
+    def tearDown(self):\r
+        TestAsServer.tearDown(self)\r
+        self.session.close_dbhandler(self.torrent_db)\r
+      \r
+\r
+    def get_default_torrent(self,filename,title,paths=None):\r
+        metainfo = {}\r
+        metainfo['announce'] = 'http://localhost:0/announce'\r
+        metainfo['announce-list'] = []\r
+        metainfo['creation date'] = int(time.time())\r
+        metainfo['encoding'] = 'UTF-8'\r
+        info = {}\r
+        info['name'] = title.encode("UTF-8")\r
+        info['piece length'] = 2 ** 16\r
+        info['pieces'] = '*' * 20\r
+        if paths is None:\r
+            info['length'] = 481\r
+        else:\r
+            d1 = {}\r
+            d1['path'] = [paths[0].encode("UTF-8")]\r
+            d1['length'] = 201\r
+            d2 = {}\r
+            d2['path'] = [paths[1].encode("UTF-8")]\r
+            d2['length'] = 280\r
+            info['files'] = [d1,d2]\r
+            \r
+        metainfo['info'] = info\r
+        path = os.path.join(self.config.get_torrent_collecting_dir(),filename)\r
+        tdef = TorrentDef.load_from_dict(metainfo)\r
+        tdef.save(path)\r
+        return tdef, bencode(metainfo)\r
+\r
+\r
+    def singtest_plain_nickname(self):\r
+        self._test_all("nick")\r
+        \r
+    def singtest_unicode_nickname(self):\r
+        self._test_all(u"nick\u00f3")\r
+\r
+\r
+    def _test_all(self,nickname):\r
+        """ \r
+            I want to start a Tribler client once and then connect to\r
+            it many times. So there must be only one test method\r
+            to prevent setUp() from creating a new client every time.\r
+\r
+            The code is constructed so unittest will show the name of the\r
+            (sub)test where the error occured in the traceback it prints.\r
+        """\r
+        \r
+        self.setupDB(nickname)\r
+        \r
+        # test ChannelCast\r
+        self.subtest_channelcast()\r
+        \r
+        # test VoteCast\r
+        self.subtest_votecast()\r
+        \r
+        # test ChannelQuery-keyword\r
+        self.subtest_channel_keyword_query(nickname)\r
+        \r
+        # test ChannelQuery-permid\r
+        self.subtest_channel_permid_query(nickname)\r
+        \r
+        #test voting\r
+        self.subtest_voting()\r
+\r
+    def subtest_voting(self):\r
+        self.votecast_db.unsubscribe(bin2str(self.mypermid))\r
+        self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),None)\r
+        #print >> sys.stderr, self.votecast_db.getAll()\r
+\r
+        self.votecast_db.spam(bin2str(self.mypermid))\r
+        self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),-1)\r
+        #print >> sys.stderr, self.votecast_db.getAll()\r
+                \r
+        self.votecast_db.subscribe(bin2str(self.mypermid))\r
+        self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),2)\r
+        #print >> sys.stderr, self.votecast_db.getAll()\r
+        \r
+        self.votecast_db.unsubscribe(bin2str(self.mypermid))\r
+        self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),None)\r
+        #print >> sys.stderr, self.votecast_db.getAll()\r
+        \r
+        self.votecast_db.spam(bin2str(self.mypermid))\r
+        self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),-1)\r
+        #print >> sys.stderr, self.votecast_db.getAll()\r
+        \r
+    def check_chquery_reply(self, data, nickname):\r
+        d = bdecode(data)\r
+        self.assert_(type(d) == DictType)\r
+        self.assert_(d.has_key('a'))\r
+        self.assert_(d.has_key('id'))\r
+        id = d['id']\r
+        self.assert_(type(id) == StringType)\r
+        self.assert_(validChannelCastMsg(d['a'])==True)\r
+        self.assert_(len(d['a']) > 0)\r
+        for key,val in d['a'].iteritems():\r
+            self.assert_(val['publisher_name'] == nickname.encode("UTF-8"))\r
+            self.assert_(val['publisher_id'] == self.hispermid)\r
+\r
+    def subtest_channel_permid_query(self,nickname):\r
+        print >>sys.stderr,"test: chquery permid-----------------------------"\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        data = {}\r
+        uq = u'CHANNEL p '+ bin2str(self.hispermid)\r
+        data['q'] = uq.encode("UTF-8")\r
+        data['id'] = 'b' * 20\r
+        msg = QUERY + bencode(data)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        #print >> sys.stderr, "printing resp", resp\r
+        if len(resp) > 0:\r
+            print >>sys.stderr,"test: chquery: got",getMessageName(resp[0])\r
+        self.assert_(resp[0]==QUERY_REPLY)\r
+        self.check_chquery_reply(resp[1:],nickname)\r
+        print >>sys.stderr,"test:",`bdecode(resp[1:])`\r
+        s.close()\r
+        \r
+    def subtest_channel_keyword_query(self,nickname):\r
+        print >>sys.stderr,"test: chquery keyword-----------------------------"\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        data = {}\r
+        uq = u'CHANNEL k '+nickname\r
+        data['q'] = uq.encode("UTF-8")\r
+        data['id'] = 'b' * 20\r
+        msg = QUERY + bencode(data)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        #print >> sys.stderr, "printing resp", resp\r
+        if len(resp) > 0:\r
+            print >>sys.stderr,"test: chquery: got",getMessageName(resp[0])\r
+        self.assert_(resp[0]==QUERY_REPLY)\r
+        self.check_chquery_reply(resp[1:],nickname)\r
+        print >>sys.stderr,"test:",`bdecode(resp[1:])`\r
+        s.close()\r
+        \r
+    def subtest_votecast(self):\r
+        print >>sys.stderr,"test: votecast-----------------------------"\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        vcast = VoteCastCore(None, s, self.session, None, log = '', dnsindb = None)\r
+        \r
+        #Send Good VoteCast message\r
+        vdata = {self.hispermid:{'vote':-1,'time_stamp':12345345}}\r
+        print >> sys.stderr, "Test Good VoteCast", `vdata`\r
+        msg = VOTECAST+bencode(vdata)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        #print >> sys.stderr, "printing resp", resp\r
+        if len(resp) > 0:\r
+            print >>sys.stderr,"test: votecast: got",getMessageName(resp[0])\r
+        self.assert_(resp[0]==VOTECAST)\r
+        print >>sys.stderr, "test: votecast: got msg", `bdecode(resp[1:])`\r
+        vdata_rcvd = bdecode(resp[1:])\r
+        self.assert_(validVoteCastMsg(vdata_rcvd)==True)\r
+        s.close()\r
+        \r
+        #Now, send a bad ChannelCast messages\r
+        # The other side should close the connection\r
+        \r
+        #Bad time_stamp: it can only int\r
+        vdata = {bin2str(self.hispermid):{'vote':-1,'time_stamp':'halo'}}\r
+        self.subtest_bad_votecast(vdata)\r
+        \r
+        #Bad Vote: Vote can only -1 or 2\r
+        vdata = {bin2str(self.hispermid):{'vote':-15,'time_stamp':12345345}}\r
+        self.subtest_bad_votecast(vdata)\r
+        \r
+        # Bad Message format ... Correct format is 'time_stamp'\r
+        vdata = {bin2str(self.hispermid):{'vote':-15,'timestamp':12345345}}\r
+        self.subtest_bad_votecast(vdata)\r
+        \r
+        print>>sys.stderr, "End of votecast test"\r
+    \r
+    def subtest_bad_votecast(self, vdata):\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        vcast = VoteCastCore(None, s, self.session, None, log = '', dnsindb = None)\r
+        print >> sys.stderr, "Test Bad VoteCast", `vdata`\r
+        msg = VOTECAST+bencode(vdata)\r
+        s.send(msg)\r
+        self.assert_(len(s.recv())==0)\r
+        s.close()\r
+                    \r
+    def subtest_channelcast(self):\r
+        print >>sys.stderr,"test: channelcast----------------------"\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        chcast = ChannelCastCore(None, s, self.session, None, log = '', dnsindb = None)\r
+        \r
+        #Send Empty ChannelCast message\r
+        chdata = {}\r
+        print >> sys.stderr, "Test Good ChannelCast", `chdata`\r
+        msg = CHANNELCAST+bencode(chdata)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        if len(resp) > 0:\r
+            print >>sys.stderr,"test: channelcast: got",getMessageName(resp[0])\r
+        self.assert_(resp[0]==CHANNELCAST)\r
+        print >>sys.stderr, "test: channelcast: got msg", `bdecode(resp[1:])`\r
+        chdata_rcvd = bdecode(resp[1:])\r
+        self.assert_(validChannelCastMsg(chdata_rcvd)==True)\r
+        s.close() \r
+        \r
+        #Now, send a bad ChannelCast message.\r
+        # The other side should close the connection\r
+        # Create bad message by manipulating a good one\r
+        #bad infohash\r
+        chdata = deepcopy(chdata_rcvd)\r
+        for k,v in chdata.items():\r
+            v['infohash'] = 234\r
+        self.subtest_bad_channelcast(chdata)\r
+        \r
+        #bad torrentname\r
+        chdata = deepcopy(chdata_rcvd)\r
+        for k,v in chdata.items():\r
+            v['torrentname'] = 1231\r
+        self.subtest_bad_channelcast(chdata)\r
+        \r
+        #bad signature.. temporarily disabled. \r
+        # Got to enable when signature validation in validChannelCastMsg are enabled\r
+#        chdata = deepcopy(chdata_rcvd)\r
+#        value_list = chdata.values()\r
+#        if len(value_list)>0:\r
+#            chdata['sdfg234sadf'] = value_list[0]\r
+#            self.subtest_bad_channelcast(chdata)\r
+                \r
+        #Bad message format\r
+        chdata = {'2343ww34':''}\r
+        self.subtest_bad_channelcast(chdata)\r
+        \r
+        #Bad \r
+        print>>sys.stderr, "End of channelcast test---------------------------"\r
+               \r
+    \r
+    def subtest_bad_channelcast(self, chdata):\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        chcast = ChannelCastCore(None, s, self.session, None, log = '', dnsindb = None)\r
+        print >> sys.stderr, "Test Bad ChannelCast", `chdata`\r
+        msg = CHANNELCAST+bencode(chdata)\r
+        s.send(msg)\r
+        self.assert_(len(s.recv())==0)\r
+        s.close()\r
+            \r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    # We should run the tests in a separate Python interpreter to prevent \r
+    # problems with our singleton classes, e.g. PeerDB, etc.\r
+    if len(sys.argv) != 2:\r
+        print "Usage: python test_channelcasst.py <method name>"\r
+    else:\r
+        suite.addTest(TestChannels(sys.argv[1]))\r
+    \r
+    return suite\r
+\r
+def main():\r
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])\r
+\r
+if __name__ == "__main__":\r
+    main()\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_channelcast.sh b/instrumentation/next-share/BaseLib/Test/test_channelcast.sh
new file mode 100644 (file)
index 0000000..c59c2ae
--- /dev/null
@@ -0,0 +1,5 @@
+
+export PYTHONPATH=../..:"$PYTHONPATH"
+
+python test_channelcast.py singtest_plain_nickname 
+python test_channelcast.py singtest_unicode_nickname 
diff --git a/instrumentation/next-share/BaseLib/Test/test_channelcast_plus_subtitles.py b/instrumentation/next-share/BaseLib/Test/test_channelcast_plus_subtitles.py
new file mode 100644 (file)
index 0000000..f0ed64c
--- /dev/null
@@ -0,0 +1,154 @@
+# Written by Andrea Reale\r
+# see LICENSE.txt for license information\r
+\r
+\r
+import unittest\r
+from copy import deepcopy\r
+from olconn import OLConnection\r
+from BaseLib.Core.API import *\r
+from BaseLib.Core.BitTornado.BT1.MessageID import *\r
+from BaseLib.Core.BuddyCast.moderationcast_util import validChannelCastMsg\r
+from BaseLib.Core.BuddyCast.channelcast import ChannelCastCore\r
+from BaseLib.Test.test_channelcast import TestChannels\r
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO\r
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo\r
+import os.path\r
+\r
+DEBUG=True\r
+\r
+RES_DIR = "subtitles_test_res"\r
+\r
+class TestChannelsPlusSubtitles(TestChannels):\r
+    """ \r
+    Testing the rich metadata extension of channelcast.\r
+    \r
+    The test suite defined in this module executes all the old \r
+    channelcast tests, plus a test to validate that the rich metadata\r
+    (currently subtitles) extension works properly\r
+    """\r
+\r
+\r
+        \r
+    def setupDB(self,nickname):\r
+        TestChannels.setupDB(self,nickname)\r
+        try:\r
+            self.richMetadata_db = self.session.open_dbhandler(NTFY_RICH_METADATA)\r
+            #add some metadata for torrents (they are defined in TestChannels.setupDB()\r
+            self.mdto = MetadataDTO(self.hispermid, self.infohash1)\r
+            subtitle1 = SubtitleInfo("nld", os.path.join(RES_DIR,"fake.srt"))\r
+            subtitle1.computeChecksum()\r
+            \r
+            subtitle2 = SubtitleInfo("eng", os.path.join(RES_DIR, "fake0.srt"))\r
+            subtitle2.computeChecksum()\r
+            self.mdto.addSubtitle(subtitle1)\r
+            self.mdto.addSubtitle(subtitle2)\r
+            \r
+            self.mdto.sign(self.his_keypair)\r
+            \r
+            self.richMetadata_db.insertMetadata(self.mdto)\r
+        except:\r
+            print_exc()\r
+        \r
+        \r
+    def tearDown(self):\r
+        TestChannels.tearDown(self)\r
+        self.session.close_dbhandler(self.richMetadata_db)\r
+        \r
+    def _test_all(self,nickname):\r
+        """ \r
+            I want to start a Tribler client once and then connect to\r
+            it many times. So there must be only one test method\r
+            to prevent setUp() from creating a new client every time.\r
+\r
+            The code is constructed so unittest will show the name of the\r
+            (sub)test where the error occured in the traceback it prints.\r
+        """\r
+        \r
+        TestChannels._test_all(self,nickname)\r
+        self.subtest_channelcastPlusMetadata()\r
+        \r
+      \r
+    def subtest_channelcastPlusMetadata(self):\r
+        '''\r
+        Extends channelcast test to channelcast messages enriched with\r
+        metadata (subtitles) informations\r
+        '''\r
+        print >>sys.stderr,"test: channelcast_subtitles ---------------------------"\r
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)\r
+        chcast = ChannelCastCore(None, s, self.session, None, log = '', dnsindb = None)\r
+        \r
+        #test send standard channelcast\r
+        chdata = {}\r
+        print >> sys.stderr, "Test Good ChannelCast Plus Subtitles", `chdata`\r
+        msg = CHANNELCAST+bencode(chdata)\r
+        s.send(msg)\r
+        resp = s.recv()\r
+        if len(resp) > 0:\r
+            print >>sys.stderr,"test: channelcast_subtitles: got",getMessageName(resp[0])\r
+        self.assert_(resp[0]==CHANNELCAST)\r
+        print >>sys.stderr, "test: channelcast_subtitles: got msg", `bdecode(resp[1:])`\r
+        chdata_rcvd = bdecode(resp[1:])\r
+        self.assertTrue(validChannelCastMsg(chdata_rcvd))\r
+        \r
+        for entry in chdata_rcvd.itervalues():\r
+            if entry['infohash'] == self.infohash1: #the torrent for which two subtitles exist\r
+                self.assertTrue('rich_metadata' in entry.keys())\r
+                richMetadata = entry['rich_metadata']\r
+                print >> sys.stderr, "test: channelcast_subtitles: richMetadata entry is ", richMetadata\r
+                self.assertEquals(6, len(richMetadata))\r
+                self.assertEquals(self.mdto.description, richMetadata[0])\r
+                self.assertEquals(4, len(richMetadata[2])) #the subtitles mask 4 bytes\r
+                self.assertTrue(isinstance(richMetadata[3],list)) #the subtitles checsums\r
+                for checksum in richMetadata[3]:\r
+                    self.assertEquals(20,len(checksum)) #160 bit sha1 checksum\r
+                self.assertEquals(self.mdto.signature, richMetadata[4])\r
+                self.assertEquals(4,len(richMetadata[5])) #the subtitles have mask 32 bit\r
+                #also must (in this case) be equal to the subtitles mask\r
+                self.assertEquals(richMetadata[2], richMetadata[5])\r
+                \r
+                print >> sys.stderr, "test: channelcast_subtitles; richMetadata entry is valid and correct"\r
+            else:\r
+                self.assertFalse('rich_metadata' in entry.keys())\r
+                \r
+        s.close()\r
+        \r
+        #Now, send a bad ChannelCast message.\r
+        # The other side should close the connection\r
+        # Create bad message by manipulating a good one\r
+        #bad bitmask\r
+        chdata = deepcopy(chdata_rcvd)\r
+        for k,v in chdata.items():\r
+            if 'rich_metadata' in v:\r
+                v['rich_metadata'][2] = 44 #an integer instead of a 4bytes bitmask\r
+        self.subtest_bad_channelcast(chdata)\r
+    \r
+                \r
+        #Bad message format\r
+        chdata = deepcopy(chdata_rcvd)\r
+        for k,v in chdata.items():\r
+            if 'rich_metadata' in v:\r
+                v['rich_metadata'].insert(0, u"asdfafa22")\r
+        self.subtest_bad_channelcast(chdata)\r
+        \r
+        #Bad \r
+        print>>sys.stderr, "End of channelcast_subtitles test ---------------------------"\r
+    \r
+            \r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    # We should run the tests in a separate Python interpreter to prevent \r
+    # problems with our singleton classes, e.g. PeerDB, etc.\r
+    if len(sys.argv) != 2:\r
+        print "Usage: python test_channelcast_plus_subtitles.py <method name>"\r
+    else:\r
+        suite.addTest(TestChannelsPlusSubtitles(sys.argv[1]))\r
+    \r
+    return suite\r
+\r
+def main():\r
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])\r
+\r
+if __name__ == "__main__":\r
+    main()\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_closedswarm.py b/instrumentation/next-share/BaseLib/Test/test_closedswarm.py
new file mode 100644 (file)
index 0000000..757b1da
--- /dev/null
@@ -0,0 +1,280 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+#
+import time
+from base64 import encodestring,decodestring
+
+import unittest
+
+import os.path
+from BaseLib.Core.Overlay import permid
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.ClosedSwarm import ClosedSwarm
+
+class ClosedSwarmTest(unittest.TestCase):
+
+    def setUp(self):
+        self.keyfiles = [".node_a_keypair",".node_b_keypair",".torrent_keypair"]
+        for filename in self.keyfiles:
+            if not os.path.exists(filename):
+                keypair = permid.generate_keypair()
+                permid.save_keypair(keypair, filename)
+                
+        self.node_a_keypair = permid.read_keypair(".node_a_keypair")
+        self.node_b_keypair = permid.read_keypair(".node_b_keypair")
+        self.torrent_keypair = permid.read_keypair(".torrent_keypair")
+
+        self.torrent_id = "1234"
+
+        # Shortcuts
+        self.node_a_pub_permid = str(self.node_a_keypair.pub().get_der())
+        self.node_b_pub_permid = str(self.node_b_keypair.pub().get_der())
+        self.torrent_pubkeys = [encodestring(str(self.torrent_keypair.pub().get_der())).replace("\n","")]
+        
+        # Create the certificate for this torrent ("proof of access")
+        self.poa_a = ClosedSwarm.create_poa(self.torrent_id,
+                                            self.torrent_keypair,
+                                            self.node_a_pub_permid)
+
+        self.poa_b = ClosedSwarm.create_poa(self.torrent_id,
+                                            self.torrent_keypair,
+                                            self.node_b_pub_permid)
+        
+        self.cs_a = ClosedSwarm.ClosedSwarm(self.node_a_keypair,
+                                            self.torrent_id,
+                                            self.torrent_pubkeys,
+                                            self.poa_a)
+        
+        self.cs_b = ClosedSwarm.ClosedSwarm(self.node_b_keypair,
+                                            self.torrent_id,
+                                            self.torrent_pubkeys,
+                                            self.poa_b)
+
+
+    def tearDown(self):
+        for filename in self.keyfiles:
+            try:
+                os.remove(filename)
+            except:
+                pass
+
+    def _verify_poas(self, poa_a, poa_b):
+        self.assertEquals(poa_a.torrent_id, poa_b.torrent_id)
+        self.assertEquals(poa_a.torrent_pub_key, poa_b.torrent_pub_key)
+        self.assertEquals(poa_a.node_pub_key, poa_b.node_pub_key)
+        self.assertEquals(poa_a.signature, poa_b.signature)
+        self.assertEquals(poa_a.expire_time, poa_b.expire_time)
+        
+    def test_poa_serialization(self):
+
+
+        serialized = self.poa_a.serialize()
+        deserialized = ClosedSwarm.POA.deserialize(serialized)
+        self._verify_poas(self.poa_a, deserialized)
+        deserialized.verify()
+
+        self.poa_a.save("poa.tmp")
+        new_poa = ClosedSwarm.POA.load("poa.tmp")
+        new_poa.verify()
+        
+        # Also serialize/deserialize using lists
+        serialized = self.poa_a.serialize_to_list()
+        deserialized = self.poa_a.deserialize_from_list(serialized)
+        self._verify_poas(self.poa_a, deserialized)
+        deserialized.verify()
+        
+        
+    def test_poa(self):
+        self.poa_a.verify()
+        self.poa_b.verify()
+
+
+        # Test poa expiretime
+        expire_time = time.mktime(time.gmtime())+60 # Expire in one minute
+        
+        self.poa_a = ClosedSwarm.create_poa(self.torrent_id,
+                                            self.torrent_keypair,
+                                            self.node_a_pub_permid,
+                                            expire_time=expire_time)
+        try:
+            self.poa_a.verify()
+        except ClosedSwarm.POAExpiredException:
+            self.fail("POA verify means expired, but it is not")
+
+        expire_time = time.mktime(time.gmtime())-1 # Expire one second ago
+        
+        self.poa_a = ClosedSwarm.create_poa(self.torrent_id,
+                                            self.torrent_keypair,
+                                            self.node_a_pub_permid,
+                                            expire_time=expire_time)
+        try:
+            self.poa_a.verify()
+            self.fail("POA verify does not honor expire time")
+        except ClosedSwarm.POAExpiredException:
+            pass
+
+
+    def test_basic(self):
+        self.assertFalse(self.cs_a.remote_node_authorized)
+        self.assertFalse(self.cs_b.remote_node_authorized)
+
+    def test_node_a_valid(self):
+        """
+        Test that the protocol works if only node A wants to be authorized
+        """
+        msg_1 = self.cs_a.a_create_challenge()
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+        msg_3 = self.cs_a.a_provide_poa_message(msg_2)
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3, i_am_seeding=True)
+        if msg_4:
+            self.fail("Made POA message for node B even though it is seeding")
+
+        self.assertFalse(self.cs_a.is_remote_node_authorized())
+        self.assertTrue(self.cs_b.is_remote_node_authorized())
+
+
+    def test_poa_message_creation(self):
+        
+        msg_1 = self.cs_a.a_create_challenge()
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+
+        
+        msg = self.cs_a._create_poa_message(CS_POA_EXCHANGE_A, self.cs_a.my_nonce, self.cs_b.my_nonce)
+        try:
+            self.cs_a._validate_poa_message(msg, self.cs_a.my_nonce, self.cs_b.my_nonce)
+        except Exception,e:
+            self.fail("_create_poa_message and _validate_poa_message do not agree: %s"%e)
+
+
+    def test_both_valid(self):
+        """
+        Test that the protocol works if both nodes wants to be authorized
+        """
+        msg_1 = self.cs_a.a_create_challenge()
+        nonce_a = self.cs_a.my_nonce
+        
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+        nonce_b = self.cs_b.my_nonce
+
+        msg_3 = self.cs_a.a_provide_poa_message(msg_2)
+
+        self.assertEquals(self.cs_a.remote_nonce, nonce_b, "A's remote nonce is wrong")
+            
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3)
+        self.assertEquals(self.cs_b.remote_nonce, nonce_a, "B's remote nonce is wrong")
+
+        self.assertEquals(self.cs_a.my_nonce, self.cs_b.remote_nonce, "B's remote nonce is not A's nonce")
+        self.assertEquals(self.cs_a.remote_nonce, self.cs_b.my_nonce, "A's remote nonce is not B's nonce")
+
+        self.cs_a.a_check_poa_message(msg_4)
+        
+        
+        self.assertTrue(self.cs_a.is_remote_node_authorized())
+        self.assertTrue(self.cs_b.is_remote_node_authorized())
+
+        
+    def test_not_fresh_node_a(self):
+
+        msg_1 = self.cs_a.a_create_challenge()
+        bad_msg_1 = [CS_CHALLENGE_A,
+                     self.torrent_id,
+                     "badchallenge_a"]
+        msg_2 = self.cs_b.b_create_challenge(bad_msg_1)
+        msg_3 = self.cs_a.a_provide_poa_message(msg_2)
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3)
+        try:
+            self.cs_a.a_check_poa_message(msg_4)
+            self.fail("Did not discover bad signature")
+        except ClosedSwarm.InvalidSignatureException,e:
+            pass
+
+        # Nobody can succeed now, the challenges are bad
+        self.assertFalse(self.cs_a.is_remote_node_authorized())
+        self.assertFalse(self.cs_b.is_remote_node_authorized())
+        
+
+    def test_not_fresh_node_b(self):
+
+        msg_1 = self.cs_a.a_create_challenge()
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+        bad_msg_2 = [CS_CHALLENGE_B,
+                     self.torrent_id,
+                     "badchallenge_b"]
+        msg_3 = self.cs_a.a_provide_poa_message(bad_msg_2)
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3)
+        try:
+            self.cs_a.a_check_poa_message(msg_4)
+            self.fail("Failed to discover bad POA from B")
+        except:
+            pass
+
+        # Nobody can succeed now, the challenges are bad
+        self.assertFalse(self.cs_a.is_remote_node_authorized())
+        self.assertFalse(self.cs_b.is_remote_node_authorized())
+
+
+    def test_invalid_poa_node_a(self):
+
+        self.cs_a.poa = ClosedSwarm.POA("bad_poa_a", "stuff", "stuff2")
+
+        # Update to a bad POA
+        msg_1 = self.cs_a.a_create_challenge()
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+        msg_3 = self.cs_a.a_provide_poa_message(msg_2)
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3)
+        self.cs_a.a_check_poa_message(msg_4)
+        
+        self.assertTrue(self.cs_a.is_remote_node_authorized())
+        self.assertFalse(self.cs_b.is_remote_node_authorized())
+        
+
+    def test_very_invalid_poa_node_a(self):
+
+        # Update to a bad POA
+        try:
+            self.cs_a.set_poa("Wrong class!")
+            self.fail("Allows a string as POA!")
+        except:
+            pass
+
+    def test_invalid_swarm_node_b(self):
+
+        # Update to a bad POA
+        self.cs_b.poa = ClosedSwarm.POA("bad_poa_b", "stuff", "stuff2")
+
+        # Update to a bad POA
+        msg_1 = self.cs_a.a_create_challenge()
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+        msg_3 = self.cs_a.a_provide_poa_message(msg_2)
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3)
+        try:
+            self.cs_a.a_check_poa_message(msg_4)
+            self.fail("Node B failed to discover bad POA")
+        except ClosedSwarm.WrongSwarmException,e:
+            pass
+
+    def test_invalid_poa_node_b(self):
+        self.cs_b.poa = ClosedSwarm.POA(self.torrent_id, "stuff", "stuff2")
+
+        # Update to a bad POA
+        msg_1 = self.cs_a.a_create_challenge()
+        msg_2 = self.cs_b.b_create_challenge(msg_1)
+        msg_3 = self.cs_a.a_provide_poa_message(msg_2)
+        msg_4 = self.cs_b.b_provide_poa_message(msg_3)
+        try:
+            self.cs_a.a_check_poa_message(msg_4)
+            self.fail("Node B failed to discover bad POA")
+        except ClosedSwarm.InvalidPOAException,e:
+            pass
+
+    
+if __name__ == "__main__":
+
+    print "Performing ClosedSwarm unit tests"
+
+
+    unittest.main()
+
+    print "All done"
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_connect_overlay.py b/instrumentation/next-share/BaseLib/Test/test_connect_overlay.py
new file mode 100644 (file)
index 0000000..e8d9486
--- /dev/null
@@ -0,0 +1,197 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# Test whether Tribler tries to establish an overlay connection when it meets
+# another Tribler peer in a swarm.
+#
+# Like test_secure_overlay, we start a new python interpreter for each test. 
+# Although we don't have the singleton problem here, we do need to do this as the
+# HTTPServer that MyTracker uses won't relinquish the listen socket, causing 
+# "address in use" errors in the next test. This is probably due to the fact that
+# MyTracker has a thread mixed in, as a listensocket.close() normally releases it
+# (according to lsof).
+#
+
+import unittest
+import os
+import sys
+import time
+from traceback import print_exc
+import socket
+import thread
+import BaseHTTPServer
+from SocketServer import ThreadingMixIn
+
+from BaseLib.Test.test_as_server import TestAsServer
+from btconn import BTConnection
+from olconn import OLConnection
+from BaseLib.Core.TorrentDef import *
+from BaseLib.Core.DownloadConfig import *
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+DEBUG=True
+
+
+class MyTracker(ThreadingMixIn,BaseHTTPServer.HTTPServer):
+    
+    def __init__(self,trackport,myid,myip,myport):
+        self.myid = myid
+        self.myip = myip
+        self.myport = myport
+        BaseHTTPServer.HTTPServer.__init__( self, ("",trackport), SimpleServer )
+        self.daemon_threads = True
+        
+    def background_serve( self ):
+        thread.start_new_thread( self.serve_forever, () )
+
+    def shutdown(self):
+        self.socket.close()
+
+
+class SimpleServer(BaseHTTPServer.BaseHTTPRequestHandler):
+
+    def do_GET(self):
+        
+        print >>sys.stderr,"test: tracker: Got GET request",self.path
+
+        p = []
+        p1 = {'peer id':self.server.myid,'ip':self.server.myip,'port':self.server.myport}
+        p.append(p1)
+        d = {}
+        d['interval'] = 1800
+        d['peers'] = p
+        bd = bencode(d)
+        size = len(bd)
+
+        self.send_response(200)
+        self.send_header("Content-Type", "application/octet-stream")
+        self.send_header("Content-Length", size)
+        self.end_headers()
+        
+        try:
+            self.wfile.write(bd)
+        except Exception,e:
+            print_exc()
+
+
+
+class TestConnectOverlay(TestAsServer):
+    """ 
+    Testing download helping
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+        # Start our server side, to with Tribler will try to connect
+        self.mylistenport = 4810
+        self.myss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss.bind(('', self.mylistenport))
+        self.myss.listen(1)
+        
+        # Must be changed in test/extend_hs_dir/dummydata.merkle.torrent as well
+        self.mytrackerport = 4901
+        self.myid = 'R410-----HgUyPu56789'
+        self.mytracker = MyTracker(self.mytrackerport,self.myid,'127.0.0.1',self.mylistenport)
+        self.mytracker.background_serve()
+
+        self.myid2 = 'R410-----56789HuGyx0'
+
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())  
+        
+        # This is the infohash of the torrent in test/extend_hs_dir
+        self.infohash = '\xccg\x07\xe2\x9e!]\x16\xae{\xb8\x10?\xf9\xa5\xf9\x07\xfdBk'
+        self.torrentfile = os.path.join('extend_hs_dir','dummydata.merkle.torrent')
+
+        # Let Tribler start downloading an non-functioning torrent, so
+        # we can talk to a normal download engine.
+        
+        tdef = TorrentDef.load(self.torrentfile)
+
+        dscfg = DownloadStartupConfig()
+        dscfg.set_dest_dir(self.config_path)
+        
+        self.session.start_download(tdef,dscfg)
+
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+        self.mytracker.shutdown()
+        self.myss.close()
+
+
+    #
+    #
+    def singtest_connect_overlay(self):
+        """ 
+        """
+        # 1. Accept the data connection Tribler wants to establish with us
+        self.myss.settimeout(10.0)
+        conn, addr = self.myss.accept()
+        s = BTConnection('',0,conn,user_infohash=self.infohash,myid=self.myid)
+        s.read_handshake_medium_rare()
+
+        extmsg = self.create_good_tribler_extend_hs()
+        s.send(extmsg)
+        resp = s.recv()
+        self.assert_(len(resp) > 0)
+        print >> sys.stderr,"test: Data conn replies",getMessageName(resp[0])
+        
+        # 2. Tribler should now try to establish an overlay connection with us
+        self.myss.settimeout(10.0)
+        conn, addr = self.myss.accept()
+        options = '\x00\x00\x00\x00\x00\x00\x00\x00'
+        s2 = OLConnection(self.my_keypair,'',0,conn,mylistenport=self.mylistenport)
+
+        # Desired behaviour is that the accept() succeeds. If not it will time
+        # out, and throw an exception, causing this test to fail.
+        time.sleep(3)
+        
+        s.close()
+        s2.close()
+
+
+    def create_good_tribler_extend_hs(self,pex_id=1):
+        d = {}
+        d['m'] = {'Tr_OVERLAYSWARM':253,'ut_pex':pex_id}
+        d['p'] = self.mylistenport
+        d['v'] = 'Tribler 3.5.1'
+        d['e'] = 0
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_connect_overlay.py <method name>"
+    else:
+        suite.addTest(TestConnectOverlay(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_crawler.py b/instrumentation/next-share/BaseLib/Test/test_crawler.py
new file mode 100644 (file)
index 0000000..1195a7c
--- /dev/null
@@ -0,0 +1,312 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+import socket
+import unittest
+import os
+import sys
+import time
+from BaseLib.Core.Utilities.Crypto import sha
+from M2Crypto import Rand
+import cPickle
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_REQUEST, CRAWLER_REPLY, CRAWLER_DATABASE_QUERY, getMessageName
+
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler
+
+DEBUG=True
+
+class TestCrawler(TestAsServer):
+    """ 
+    Testing the user side of the crawler
+    """
+    
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        Rand.load_file('randpool.dat', -1)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+        # Enable buddycast and crawler handling
+        self.config.set_buddycast(True)
+        self.config.set_crawler(True)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.my_permid = str(self.my_keypair.pub().get_der())
+        self.my_hash = sha(self.my_permid).digest()
+        self.his_permid = str(self.his_keypair.pub().get_der())        
+
+        # Start our server side, to with Tribler will try to connect
+        self.listen_port = 4123
+        self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.listen_socket.bind(("", self.listen_port))
+        self.listen_socket.listen(10)
+        self.listen_socket.settimeout(10)
+        
+    def tearDown(self):
+        """ override TestAsServer """
+        TestAsServer.tearDown(self)
+        try:
+            os.remove('randpool.dat')
+        except:
+            pass
+
+    def test_all(self):
+        """
+        I want to start a Tribler client once and then connect to it
+        many times. So there must be only one test method to prevent
+        setUp() from creating a new client every time.
+
+        The code is constructed so unittest will show the name of the
+        (sub)test where the error occured in the traceback it prints.
+        """
+        self.subtest_invalid_permid()
+        self.subtest_invalid_messageid()
+        self.subtest_invalid_sql_query()
+        self.subtest_invalid_frequency()
+        self.subtest_invalid_tablename()
+        self.subtest_valid_messageid()
+        self.subtest_dialback()
+
+    def subtest_invalid_permid(self):
+        """
+        Send crawler messages from a non-crawler peer
+        """
+        print >>sys.stderr, "-"*80, "\ntest: invalid_permid"
+
+        # make sure that the OLConnection is NOT in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        assert not self.my_permid in crawler_db.getCrawlers()
+
+        # We are not a registered crawler, any request from us should
+        # be denied
+        messages = [CRAWLER_REQUEST,
+                    CRAWLER_REQUEST + CRAWLER_DATABASE_QUERY,
+                    CRAWLER_REQUEST + CRAWLER_DATABASE_QUERY,
+                    CRAWLER_REQUEST + chr(0)]
+        for msg in messages:
+            s = OLConnection(self.my_keypair, "localhost", self.hisport)
+            s.send(msg)
+            response  = s.recv()
+            assert response == "", "response type is %s" % getMessageName(response[0])
+
+        time.sleep(1)
+        s.close()
+
+    def subtest_invalid_messageid(self):
+        """
+        Send an invalid message-id from a registered crawler peer
+        """
+        print >>sys.stderr, "-"*80, "\ntest: invalid_messageid"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        # We are a registered crawler, start sending invalid messages
+        messages = [CRAWLER_REQUEST,
+                    CRAWLER_REQUEST + chr(0),
+                    CRAWLER_REPLY,
+                    CRAWLER_REPLY + chr(0)]
+        for msg in messages:
+            s = OLConnection(self.my_keypair, "localhost", self.hisport)
+            s.send(msg)
+            response  = s.recv()
+            assert response == "", "response type is %s" % getMessageName(response[0])
+
+        time.sleep(1)
+        s.close()
+
+    def subtest_invalid_sql_query(self):
+        """
+        Send an invalid sql query from a registered crawler peer
+        """
+        print >>sys.stderr, "-"*80, "\ntest: invalid_sql_query"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)
+
+        queries = ["FOO BAR"]
+        for query in queries:
+            self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 0, 0, query)
+
+            error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 0)
+            
+            
+            
+            assert error == 1
+            if DEBUG:
+                print >>sys.stderr, payload
+
+        time.sleep(1)
+        s.close()
+
+    def subtest_invalid_frequency(self):
+        """
+        Send two valid requests shortly after each other. However,
+        indicate that the frequency should be large. This should
+        result in a frequency error
+        """
+        print >>sys.stderr, "-"*80, "\ntest: invalid_invalid_frequency"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)
+        self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 42, 0, "SELECT * FROM peer")
+        error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 42)
+        assert error == 0
+
+        # try on the same connection
+        self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 42, 1000, "SELECT * FROM peer")
+        error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 42)
+        assert error == 254 # should give a frequency erro
+        s.close()
+        
+        # try on a new connection
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)
+        self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 42, 1000, "SELECT * FROM peer")
+        error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 42)
+        assert error == 254 # should give a frequency error
+        time.sleep(1)
+        s.close()
+        
+
+    def subtest_invalid_tablename(self):
+        """
+        Send an invalid query and check that we get the actual sql
+        exception back
+        """
+        print >>sys.stderr, "-"*80, "\ntest: invalid_tablename"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)
+        self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 42, 0, "SELECT * FROM nofoobar")
+        error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 42)
+        assert error != 0
+        assert payload == "SQLError: no such table: nofoobar", payload
+
+    def subtest_valid_messageid(self):
+        """
+        Send a valid message-id from a registered crawler peer
+        """
+        print >>sys.stderr, "-"*80, "\ntest: valid_messageid"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)
+
+        queries = ["SELECT name FROM category", "SELECT * FROM peer", "SELECT * FROM torrent"]
+        for query in queries:
+            self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 0, 0, query)
+
+            error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 0)
+            assert error == 0
+            if DEBUG:
+                print >>sys.stderr, cPickle.loads(payload)
+
+        time.sleep(1)
+        s.close()
+
+    def subtest_dialback(self):
+        """
+        Send a valid request, disconnect, and wait for an incomming
+        connection with the reply
+        """
+        print >>sys.stderr, "-"*80, "\ntest: dialback"
+        
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport, mylistenport=self.listen_port)
+        self.send_crawler_request(s, CRAWLER_DATABASE_QUERY, 42, 0, "SELECT * FROM peer")
+        s.close()
+
+        # wait for reply
+        try:
+            conn, addr = self.listen_socket.accept()
+        except socket.timeout:
+            if DEBUG: print >> sys.stderr,"test_crawler: timeout, bad, peer didn't connect to send the crawler reply"
+            assert False, "test_crawler: timeout, bad, peer didn't connect to send the crawler reply"
+        s = OLConnection(self.my_keypair, "", 0, conn, mylistenport=self.listen_port)
+
+        # read reply
+        error, payload = self.receive_crawler_reply(s, CRAWLER_DATABASE_QUERY, 42)
+        assert error == 0
+        if DEBUG: print >>sys.stderr, cPickle.loads(payload)
+
+        time.sleep(1)
+
+    def send_crawler_request(self, sock, message_id, channel_id, frequency, payload):
+        # Sending a request from a Crawler to a Tribler peer
+        #     SIZE    INDEX
+        #     1 byte: 0      CRAWLER_REQUEST (from BaseLib.Core.BitTornado.BT1.MessageID)
+        #     1 byte: 1      --MESSAGE-SPECIFIC-ID--
+        #     1 byte: 2      Channel id
+        #     2 byte: 3+4    Frequency
+        #     n byte: 5...   Request payload
+        sock.send("".join((CRAWLER_REQUEST,
+                           message_id,
+                           chr(channel_id & 0xFF),
+                           chr((frequency >> 8) & 0xFF) + chr(frequency & 0xFF),
+                           payload)))
+
+    def receive_crawler_reply(self, sock, message_id, channel_id):
+        # Sending a reply from a Tribler peer to a Crawler
+        #     SIZE    INDEX
+        #     1 byte: 0      CRAWLER_REPLY (from BaseLib.Core.BitTornado.BT1.MessageID)
+        #     1 byte: 1      --MESSAGE-SPECIFIC-ID--
+        #     1 byte: 2      Channel id
+        #     1 byte: 3      Parts left
+        #     1 byte: 4      Indicating success (0) or failure (non 0)
+        #     n byte: 5...   Reply payload
+
+        if DEBUG:
+            print >>sys.stderr, "test_crawler: receive_crawler_reply: waiting for channel",channel_id
+
+        parts = []
+        while True:
+            response  = sock.recv()
+            if response:
+                if response[0] == CRAWLER_REPLY and response[1] == message_id and ord(response[2]) == channel_id:
+                    parts.append(response[5:])
+                    if DEBUG:
+                        print >>sys.stderr, "test_crawler: received", getMessageName(response[0:2]), "channel", channel_id, "length", sum([len(part) for part in parts]), "parts left", ord(response[3])
+
+                    if ord(response[3]):
+                        # there are parts left
+                        continue
+
+                    return ord(response[4]), "".join(parts)
+
+            return -1, ""
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestCrawler))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.bat b/instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.bat
new file mode 100755 (executable)
index 0000000..b009644
--- /dev/null
@@ -0,0 +1,15 @@
+set PYTHONPATH=..\..\r
+\r
+python test_dialback_conn_handler.py singtest_connect_dns_to_dead_peer\r
+python test_dialback_conn_handler.py singtest_connect_dns_to_live_peer\r
+python test_dialback_conn_handler.py singtest_send_unopenedA\r
+python test_dialback_conn_handler.py singtest_send_local_close\r
+python test_dialback_conn_handler.py singtest_send_remote_close\r
+python test_dialback_conn_handler.py singtest_send_opened\r
+python test_dialback_conn_handler.py singtest_close_unopened\r
+python test_dialback_conn_handler.py singtest_close_opened\r
+python test_dialback_conn_handler.py singtest_receive\r
+python test_dialback_conn_handler.py singtest_got_conn_incoming\r
+python test_dialback_conn_handler.py singtest_got_conn_outgoing\r
+python test_dialback_conn_handler.py singtest_got_conn_local_close\r
+python test_dialback_conn_handler.py singtest_got_conn_remote_close\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.py b/instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.py
new file mode 100644 (file)
index 0000000..117f993
--- /dev/null
@@ -0,0 +1,492 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# This test checks the new ReturnConnHandler class created in Fall 2006
+#
+# Note that we start a new Python interpreter for each test case.
+# Also note we create 2 peers and thus two networking stacks. In principle, 
+# they should use two different ReturnConnHandler instances (not a singleton), but 
+# there may be some interference.
+# 
+# To properly follow the test, enable debugging on BitTornado/SocketHandler,
+# BitTornado/ServerPortHandler and BitTornado/Rawserver in addition to
+# Tribler/NATFirewall/ReturnConnHandler
+#
+#
+
+import sys
+import unittest
+from threading import Event, Thread, currentThread
+from socket import error as socketerror
+from time import sleep
+
+from BaseLib.Core.BitTornado.RawServer import RawServer
+from BaseLib.Core.BitTornado.ServerPortHandler import MultiHandler
+from BaseLib.Core.BitTornado.BT1.MessageID import DIALBACK_REQUEST
+
+from BaseLib.Core.NATFirewall.ReturnConnHandler import ReturnConnHandler
+
+# Thread must come as first parent class!
+class Peer(Thread):
+    def __init__(self,testcase,port):
+        Thread.__init__(self)
+        self.setDaemon(True)
+
+        self.testcase = testcase
+
+        self.doneflag = Event()
+        config = {}
+        config['timeout_check_interval'] = 100000
+        config['timeout'] = 100000
+        config['ipv6_enabled'] = 0
+        config['minport'] = port
+        config['maxport'] = port+5
+        config['random_port'] = 0
+        config['bind'] = ''
+        config['ipv6_binds_v4'] = 0
+        config['max_message_length'] = 2 ** 23
+
+        self.rawserver = RawServer(self.doneflag,
+                                   config['timeout_check_interval'],
+                                   config['timeout'],
+                                   ipv6_enable = config['ipv6_enabled'],
+                                   failfunc = self.report_failure,
+                                   errorfunc = self.report_error)
+        while 1:
+            try:
+                self.listen_port = self.rawserver.find_and_bind(0, 
+                                config['minport'], config['maxport'], config['bind'], 
+                                reuse = True,
+                                ipv6_socket_style = config['ipv6_binds_v4'], 
+                                randomizer = config['random_port'])
+                print >> sys.stderr,"test: Got listen port", self.listen_port
+                break
+            except socketerror, e:
+                self.report_failure(str(e))
+                msg = "Couldn't not bind to listen port - " + str(e)
+                self.report_failure(msg)
+                return
+
+        self.multihandler = MultiHandler(self.rawserver, self.doneflag)
+        # Note: We don't want a singleton, we want
+        # two different instances for peer1 and peer2
+        self.dialback_connhand = ReturnConnHandler.getInstance()
+        self.dialback_connhand.resetSingleton()
+
+        self.dialback_connhand.register(self.rawserver,self.multihandler,self.listen_port,config['max_message_length'])
+        self.rawserver.sockethandler.set_handler(self.dialback_connhand)
+        self.dialback_connhand.start_listening()
+
+        # Stupid rawserver goes into very long wait if there are no short
+        # term tasks. Emulate this
+        self.rawserver.add_task(self.dummy_task,0)
+
+    def run(self):
+        print >> sys.stderr,"test: MyServer: run called by",currentThread().getName()
+        self.multihandler.listen_forever()
+
+    def report_failure(self,msg):
+        self.testcase.assertRaises(Exception, self.report_failure)
+
+    def report_error(self,msg):
+        self.testcase.assertRaises(Exception, self.report_error)
+
+    def dummy_task(self):
+        self.rawserver.add_task(self.dummy_task,1)
+
+    def shutdown(self):
+        self.doneflag.set()
+        self.rawserver.shutdown()
+
+
+class TestReturnConnHandler(unittest.TestCase):
+    
+    def setUp(self):
+        self.peer1 = Peer(self,1234)
+        self.peer2 = Peer(self,5678)
+        self.peer1.start()
+        self.peer2.start()
+        self.wanted = False
+        self.wanted2 = False
+        self.got = False
+        self.got2 = False
+        self.first = True
+
+        sleep(2) # let server threads start
+
+    def tearDown(self):
+        print >> sys.stderr,"test: tearDown: waiting 10 secs"
+        sleep(10)
+        if self.wanted and not self.got:
+            self.assert_(False,"callback was not called")
+        if self.wanted2 and not self.got2:
+            self.assert_(False,"other callback was not called")
+        self.peer1.shutdown()
+        self.peer2.shutdown()
+
+    #
+    # connect_dns() to an address that noone responds at
+    #
+    def singtest_connect_dns_to_dead_peer(self):
+        print >> sys.stderr,"test: test_connect_dns_to_dead_peer"
+        self.wanted = True
+        self.peer1.dialback_connhand.connect_dns(("127.0.0.1", 22220),self.connect_dns_to_dead_peer_callback)
+        # Arno, 2009-04-23: was 2 secs, somehow the failed event comes in real slow now.
+        sleep(4) # let rawserver thread establish connection, which should fail
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+    def connect_dns_to_dead_peer_callback(self,exc,dns):
+        print >> sys.stderr,"test: connect_dns_to_dead_peer_callback"
+        self.assert_(exc is not None)
+        self.assert_(dns == ("127.0.0.1", 22220))
+        self.got = True
+
+
+    #
+    # connect_dns() to an address that peer2 responds
+    #
+    def singtest_connect_dns_to_live_peer(self):
+        print >> sys.stderr,"test: test_connect_dns_to_live_peer"
+        self.wanted = True
+        self.peer1.dialback_connhand.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 1)
+        self.assert_(self.peer1.dialback_connhand.iplport2oc.has_key('127.0.0.1:5678'))
+
+    def connect_dns_to_live_peer_callback(self,exc,dns):
+        print >> sys.stderr,"test: connect_dns_to_live_peer_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got = True
+
+
+    #
+    # send() over a non-existing connection to peer2
+    #
+    def singtest_send_unopenedA(self):
+        print >> sys.stderr,"test: test_send_unopenedA"
+        self.wanted = True
+        hisdns = ("127.0.0.1", 5678)
+        self.peer1.dialback_connhand.send(hisdns,'msg=bla',self.send_unopenedA_send_callback)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+    def send_unopenedA_send_callback(self,exc,dns):
+        print >> sys.stderr,"test: send_unopenedA_send_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got = True
+
+    #
+    # send() over a connection to peer2 that peer1 closed
+    #
+    def singtest_send_local_close(self):
+        print >> sys.stderr,"test: test_send_local_close"
+        self.wanted = True
+
+        self.peer1.dialback_connhand.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 1)
+        self.assert_(self.peer1.dialback_connhand.iplport2oc.has_key('127.0.0.1:5678'))
+
+        hisdns = ("127.0.0.1", 5678)
+        self.peer1.dialback_connhand.close(hisdns)
+        self.peer1.dialback_connhand.send(hisdns,'msg=bla',self.send_local_close_send_callback)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+    def send_local_close_send_callback(self,exc,dns):
+        print >> sys.stderr,"test: send_local_close_send_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got = True
+
+    #
+    # send() over a connection to peer2 that peer2 closed
+    #
+    def singtest_send_remote_close(self):
+        print >> sys.stderr,"test: test_send_remote_close"
+
+        self.wanted = True
+        self.wanted2 = True
+
+        # register handler for connections
+        self.peer2.dialback_connhand.register_conns_callback(self.send_remote_close_conns_callback)
+
+        hisdns = ("127.0.0.1", 5678)
+        # peer2 will immediately the close connection
+        # (for SecureOverlay there are message exchanges, so behaviour is different)
+        self.peer1.dialback_connhand.connect_dns(hisdns,self.send_remote_close_connect_callback)
+        sleep(2)
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+    def send_remote_close_conns_callback(self,exc,dns,locally_initiated):
+        print  >> sys.stderr,"test: send_remote_close_conns_callback",exc,dns
+        hisdns = ("127.0.0.1", 1234)
+        if self.first:
+            print >>sys.stderr,"test: send_remote_close_conns_callback: FIRST:"
+            self.assert_(exc is None)
+            self.assert_(dns == hisdns)
+            self.assert_(not locally_initiated)
+            self.first = False
+            self.got2 = True
+
+            print >>sys.stderr,"test: send_remote_close_conns_callback: FIRST: BEFORE CLOSE"
+            self.peer2.dialback_connhand.close(hisdns)
+        else:
+            print >>sys.stderr,"test: send_remote_close_conns_callback: SECOND"
+            self.assert_(exc is not None)
+            self.assert_(dns == hisdns)
+            self.assert_(not locally_initiated)
+
+    def send_remote_close_connect_callback(self,exc,dns):
+        print >> sys.stderr,"test: send_remote_close_connect_callback"
+        self.assert_(exc is not None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got = True
+
+
+    #
+    # send() over an open connection to peer2
+    #
+    def singtest_send_opened(self):
+        print >> sys.stderr,"test: test_send_opened"
+        self.wanted = True
+        self.wanted2 = True
+        hisdns = ("127.0.0.1", 5678)
+        msg = DIALBACK_REQUEST+'12345678901234567890'
+        self.peer1.dialback_connhand.connect_dns(hisdns,lambda e,d: self.send_opened_connect_callback(e,d,msg))
+
+    def send_opened_connect_callback(self,exc,dns,msg):
+        print >> sys.stderr,"test: send_opened_connect_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.peer1.dialback_connhand.send(dns,msg,self.send_opened_send_callback)
+        self.got = True
+
+    def send_opened_send_callback(self,exc,dns):
+        print >> sys.stderr,"test: send_opened_send_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got2 = True
+
+
+    #
+    # close() a non-existing to peer2
+    #
+    def singtest_close_unopened(self):
+        print >> sys.stderr,"test: test_close_unopened"
+        hisdns = ("127.0.0.1", 5678)
+        self.peer1.dialback_connhand.close(hisdns)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+
+    #
+    # close() an open connection to peer2
+    #
+    def singtest_close_opened(self):
+        print >> sys.stderr,"test: test_close_opened"
+        hisdns = ("127.0.0.1", 5678)
+        self.peer1.dialback_connhand.connect_dns(hisdns,self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 1)
+        self.assert_(self.peer1.dialback_connhand.iplport2oc.has_key('127.0.0.1:5678'))
+
+        self.peer1.dialback_connhand.close(hisdns)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+
+    #
+    # Let peer2 register an receive callback and let peer1 send a message
+    #
+    def singtest_receive(self):
+        print >> sys.stderr,"test: test_receive"
+        self.wanted = True
+        self.wanted2 = True
+        # register handler for messages
+        self.peer2.dialback_connhand.register_recv_callback(self.receive_msg_callback)
+
+        hisdns = ("127.0.0.1", 5678)
+        msg = DIALBACK_REQUEST+'12345678901234567890'
+        self.peer1.dialback_connhand.connect_dns(hisdns,lambda e,d: self.receive_connect_callback(e,d,msg))
+
+    def receive_connect_callback(self,exc,dns,msg):
+        print >> sys.stderr,"test: receive_connect_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.peer1.dialback_connhand.send(dns,msg,self.receive_send_callback)
+        print >> sys.stderr,"test: test_receive exiting"
+
+    def receive_send_callback(self,exc,dns):
+        print >> sys.stderr,"test: receive_send_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got2 = True
+
+    def receive_msg_callback(self,dns,message):
+        print  >> sys.stderr,"test: testcase succesfully received message"
+        self.got = True
+        self.assert_(message[0] == DIALBACK_REQUEST)
+        self.assert_(dns == ("127.0.0.1", 1234))
+        return True
+
+    #
+    # Let peer2 register an connection callback and let peer1 send a message
+    # which implies setting up a connection
+    #
+    def singtest_got_conn_incoming(self):
+        print >> sys.stderr,"test: test_got_conn_incoming"
+        self.wanted = True
+        self.wanted2 = True
+        # register handler for messages
+        self.peer2.dialback_connhand.register_recv_callback(self.receive_msg_callback)
+        # register handler for connections
+        self.peer2.dialback_connhand.register_conns_callback(self.got_conn_incoming_conns_callback)
+
+
+        hisdns = ("127.0.0.1", 5678)
+        msg = DIALBACK_REQUEST+'12345678901234567890'
+        self.peer1.dialback_connhand.connect_dns(hisdns,lambda e,d:self.got_conn_incoming_connect_callback(e,d,msg))
+
+
+    def got_conn_incoming_connect_callback(self,exc,dns,msg):
+        print >> sys.stderr,"test: got_conn_incoming_connect_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.peer1.dialback_connhand.send(dns,msg,self.receive_send_callback)
+        print >> sys.stderr,"test: test_got_conn_incoming exiting"
+
+    def got_conn_incoming_conns_callback(self,exc,dns,locally_initiated):
+        print  >> sys.stderr,"test: got_conn_incoming_conns_callback",dns
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 1234))
+        self.assert_(not locally_initiated)
+        self.got = True
+
+
+    #
+    # Let peer1 register an connection callback and let peer1 send a message
+    # which implies setting up a connection
+    #
+    def singtest_got_conn_outgoing(self):
+        print >> sys.stderr,"test: test_got_conn_outgoing"
+        self.wanted = True
+        self.wanted2 = True
+        # register handler for connections
+        self.peer1.dialback_connhand.register_conns_callback(self.got_conn_outgoing_conns_callback)
+
+        hisdns = ("127.0.0.1", 5678)
+        msg = DIALBACK_REQUEST+'12345678901234567890'
+        self.peer1.dialback_connhand.connect_dns(hisdns,lambda e,d:self.got_conn_outgoing_connect_callback(e,d,msg))
+
+
+    def got_conn_outgoing_connect_callback(self,exc,dns,msg):
+        print >> sys.stderr,"test: got_conn_outgoing_connect_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.got2 = True
+
+    def got_conn_outgoing_conns_callback(self,exc,dns,locally_initiated):
+        print  >> sys.stderr,"test: got_conn_outgoing_conns_callback",exc,dns
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(locally_initiated)
+        self.got = True
+
+
+
+    #
+    # Let peer2 register a connection callback and let peer1 close the connection
+    # after succesful setup.
+    #
+    def singtest_got_conn_local_close(self):
+        print >> sys.stderr,"test: test_got_conn_local_close"
+
+        self.wanted = True
+        self.wanted2 = True
+
+        # register handler for connections
+        self.peer2.dialback_connhand.register_conns_callback(self.got_conn_local_close_conns_callback)
+
+        self.peer1.dialback_connhand.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 1)
+        self.assert_(self.peer1.dialback_connhand.iplport2oc.has_key('127.0.0.1:5678'))
+
+        hisdns = ("127.0.0.1", 5678)
+        self.peer1.dialback_connhand.close(hisdns)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+
+    def got_conn_local_close_conns_callback(self,exc,dns,locally_initiated):
+        print  >> sys.stderr,"test: got_conn_local_close_conns_callback",exc,dns
+        if self.first:
+            self.assert_(exc is None)
+            self.assert_(dns == ("127.0.0.1", 1234))
+            self.assert_(not locally_initiated)
+            self.first = False
+            self.got2 = True
+        else:
+            self.assert_(exc is not None)
+            self.assert_(dns == ("127.0.0.1", 1234))
+            self.assert_(not locally_initiated)
+            self.got = True
+
+    #
+    # Let peer2 register a connection callback and let peer2 close the connection
+    # after succesful setup.
+    #
+    def singtest_got_conn_remote_close(self):
+        print >> sys.stderr,"test: test_got_conn_remote_close"
+
+        self.wanted = True
+        self.wanted2 = True
+
+        # register handler for connections
+        self.peer2.dialback_connhand.register_conns_callback(self.got_conn_remote_close_conns_callback)
+
+        # peer2 will immediately the close connection
+        # (for SecureOverlay there are message exchanges, so behaviour is different)
+        self.peer1.dialback_connhand.connect_dns(("127.0.0.1", 5678),self.send_remote_close_connect_callback)
+        sleep(2)
+        self.assert_(len(self.peer1.dialback_connhand.iplport2oc) == 0)
+
+
+    def got_conn_remote_close_conns_callback(self,exc,dns,locally_initiated):
+        print  >> sys.stderr,"test: got_conn_remote_close_conns_callback",exc,dns
+        if self.first:
+            self.assert_(exc is None)
+            self.assert_(dns == ("127.0.0.1", 1234))
+            self.assert_(not locally_initiated)
+            self.first = False
+            self.got2 = True
+
+            hisdns = ("127.0.0.1", 1234)
+            self.peer2.dialback_connhand.close(hisdns)
+        else:
+            self.assert_(exc is not None)
+            self.assert_(dns == ("127.0.0.1", 1234))
+            self.assert_(not locally_initiated)
+            self.got = True
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_so.py <method name>"
+    else:
+        suite.addTest(TestReturnConnHandler(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.sh b/instrumentation/next-share/BaseLib/Test/test_dialback_conn_handler.sh
new file mode 100755 (executable)
index 0000000..a325421
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_dialback_conn_handler.py singtest_connect_dns_to_dead_peer
+python test_dialback_conn_handler.py singtest_connect_dns_to_live_peer
+python test_dialback_conn_handler.py singtest_send_unopenedA
+python test_dialback_conn_handler.py singtest_send_local_close
+python test_dialback_conn_handler.py singtest_send_remote_close
+python test_dialback_conn_handler.py singtest_send_opened
+python test_dialback_conn_handler.py singtest_close_unopened
+python test_dialback_conn_handler.py singtest_close_opened
+python test_dialback_conn_handler.py singtest_receive
+python test_dialback_conn_handler.py singtest_got_conn_incoming
+python test_dialback_conn_handler.py singtest_got_conn_outgoing
+python test_dialback_conn_handler.py singtest_got_conn_local_close
+python test_dialback_conn_handler.py singtest_got_conn_remote_close
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.bat b/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.bat
new file mode 100755 (executable)
index 0000000..328509b
--- /dev/null
@@ -0,0 +1,12 @@
+set PYTHONPATH=..\..\r
+\r
+python test_dialback_reply_active.py singtest_good_dreply\r
+python test_dialback_reply_active.py singtest_bad_not_bdecodable\r
+python test_dialback_reply_active.py singtest_bad_not_string\r
+python test_dialback_reply_active.py singtest_bad_not_validip\r
+python test_dialback_reply_active.py singtest_bad_diff_ips\r
+python test_dialback_reply_active2.py singtest_good_dreply\r
+python test_dialback_reply_active2.py singtest_bad_not_bdecodable\r
+python test_dialback_reply_active2.py singtest_bad_not_string\r
+python test_dialback_reply_active2.py singtest_bad_not_validip\r
+python test_dialback_reply_active2.py singtest_bad_diff_ips\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.py b/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.py
new file mode 100644 (file)
index 0000000..f31c99c
--- /dev/null
@@ -0,0 +1,252 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+import os
+import sys
+import time
+import socket
+import tempfile
+import shutil
+
+from M2Crypto import EC
+from BaseLib.__init__ import LIBRARYNAME
+from BaseLib.Core.BitTornado.bencode import bencode
+from BaseLib.Core.BitTornado.BT1.MessageID import DIALBACK_REQUEST, DIALBACK_REPLY, getMessageName
+from BaseLib.Core.Utilities.utilities import show_permid
+from BaseLib.Core.NATFirewall.ReturnConnHandler import dialback_infohash
+from BaseLib.Core.CacheDB.sqlitecachedb import CURRENT_MAIN_DB_VERSION
+
+from btconn import BTConnection
+from olconn import OLConnection
+from BaseLib.Test.test_as_server import TestAsServer
+
+DEBUG=True
+
+REPLY_IP='127.0.0.10'
+
+
+class TestDialbackReplyActive(TestAsServer):
+
+    """  
+    Testing DIALBACK_REPLY message of Dialback extension V1 
+
+    This test checks how the Tribler code responds to good and bad 
+    DIALBACK_REPLY messages. I.e. the Tribler client initiates
+    the dialback by connecting to us and sending a DIALBACK_REQUEST and we
+    reply with good and bad messages.
+
+    This test allows authoritative answers from superpeers.
+
+    WARNING: Each of the test_ methods should be tested by running the TestCase 
+    in a separate Python interpreter to prevent problems with our singleton 
+    classes, e.g. SuperPeerDB, etc.
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: Setup"
+        self.NLISTENERS=1
+        TestAsServer.setUp(self)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: Pre Tribler Init"
+        TestAsServer.setUpPreSession(self)
+        print >> sys.stderr,"test: Pre Tribler Init: config_path",self.config_path
+        # Enable dialback support
+        self.config.set_dialback(True)
+        self.config.set_buddycast(True) # make sure overlay connections are being made
+        self.config.set_start_recommender(True)
+
+        # Write superpeers.txt
+        self.install_path = tempfile.mkdtemp()
+        spdir = os.path.join(self.install_path, LIBRARYNAME, 'Core')
+        os.makedirs(spdir)
+
+        statsdir = os.path.join(self.install_path, LIBRARYNAME, 'Core', 'Statistics')
+        os.makedirs(statsdir)
+        
+        superpeerfilename = os.path.join(spdir, 'superpeer.txt')
+        print >> sys.stderr,"test: writing",self.NLISTENERS,"superpeers to",superpeerfilename
+        f = open(superpeerfilename, "w")
+
+        self.mylistenport = []
+        self.myss = []
+        self.mykeypairs = []
+        self.mypermids = []
+        for i in range(self.NLISTENERS):
+            # Start our server side, to with Tribler will try to connect
+            self.mylistenport.append(4810+i)
+            self.myss.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
+            self.myss[i].setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            self.myss[i].bind(('', self.mylistenport[i]))
+            self.myss[i].listen(1)
+
+            self.mykeypairs.append(EC.gen_params(EC.NID_sect233k1))
+            self.mykeypairs[i].gen_key()
+            self.mypermids.append(str(self.mykeypairs[i].pub().get_der()))
+
+            content = '127.0.0.1, '+str(self.mylistenport[i])+', '+show_permid(self.mypermids[i])+', FakeSuperPeer\n'
+            f.write(content)
+        f.close()
+        
+        self.config.set_install_dir(self.install_path)
+        
+        srcfiles = []
+        srcfiles.append(os.path.join(LIBRARYNAME,"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql"))
+        srcfiles.append(os.path.join(LIBRARYNAME,"Core","Statistics","tribler_seedingstats_sdb.sql"))
+        srcfiles.append(os.path.join(LIBRARYNAME,"Core","Statistics","tribler_friendship_stats_sdb.sql"))
+        srcfiles.append(os.path.join(LIBRARYNAME,"Core","Statistics","tribler_videoplayback_stats.sql"))
+        for srcfile in srcfiles:
+            sfn = os.path.join('..','..',srcfile)
+            dfn = os.path.join(self.install_path,srcfile)
+            print >>sys.stderr,"test: copying",sfn,dfn
+            shutil.copyfile(sfn,dfn)
+
+        """
+        # To avoid errors
+        cfilename = os.path.join(self.install_path, 'category.conf')
+        f = open(cfilename, "wb")
+        f.write('')
+        f.close()
+        """
+        
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+        
+        self.myoriginalip = self.session.get_external_ip()
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+
+        for i in range(self.NLISTENERS):
+            self.myss[i].close()
+        # risky: remove self.install_path which was overridden
+
+    #
+    # Good DIALBACK_REQUEST, builds on TestDialbackReply code
+    #    
+    def singtest_good_dreply(self):
+        self._test_dreply(self.create_good_dreply,True)
+
+    #
+    # Bad DIALBACK_REQUEST, builds on TestDialbackReply code
+    #    
+    def singtest_bad_not_bdecodable(self):
+        print >>sys.stderr,"test: *** NOT DECODABLE TEST"
+        self._test_dreply(self.create_not_bdecodable,False)
+
+    def singtest_bad_not_string(self):
+        self._test_dreply(self.create_not_string,False)
+
+    def singtest_bad_not_validip(self):
+        self._test_dreply(self.create_not_validip,False)
+
+    def singtest_bad_diff_ips(self):
+        self._test_dreply(self.create_diff_ips,False,diff_ips_test=True) # just semantically bad
+
+
+    ### TODO: send different valid answers so consensus not reached
+
+    #
+    # Main test code
+    #
+    def _test_dreply(self,gen_dreply,good,diff_ips_test=False):
+        for i in range(self.NLISTENERS):
+            print >> sys.stderr,"test: waiting for #",i,"listenport",self.mylistenport[i]
+            conn, addr = self.myss[i].accept()
+            s = OLConnection(self.mykeypairs[i],'',0,conn,self.mylistenport[i])
+            while True:
+                msg = s.recv()
+                self.assert_(len(msg) > 0)
+                print >> sys.stderr,"test: Received overlay message",getMessageName(msg[0])
+                if msg[0] == DIALBACK_REQUEST:
+                    break
+            self.assert_(msg[0] == DIALBACK_REQUEST)
+            self.check_drequest(msg[1:])
+            
+            # Proper behaviour is to try to send a reply using a new return connection
+            s2 = BTConnection('localhost',self.hisport,mylistenport=self.mylistenport[i],user_infohash=dialback_infohash)
+            s2.read_handshake_medium_rare(close_ok = True)
+            if gen_dreply is not None:
+                resp = gen_dreply(i)
+                print >> sys.stderr,"test: sending DIALBACK_REPLY #",i
+                s2.send(resp)
+            time.sleep(2)
+            # the other side should always close the 
+            # connection, either because we're done or he didn't like our
+            # bad DIALBACK_REPLY message
+            msg = s2.recv()
+            if len(msg) > 0:
+                print >> sys.stderr,"test: Received unexpected data",getMessageName(msg[0])
+            self.assert_(len(msg)==0)
+            s2.close()
+
+            # Not really necessary, but helps with test_dialback_active2
+            s.close()
+
+
+        ext_ip = self.session.get_external_ip()
+        print >>sys.stderr,"test: External IP address after test is",ext_ip
+        
+        if diff_ips_test:
+            if self.config.sessconfig['dialback_trust_superpeers'] == 1:
+                good = True
+            else:
+                good = False
+                
+        if good:
+            self.assert_(ext_ip == REPLY_IP)
+        else:
+            self.assert_(ext_ip == self.myoriginalip)
+
+    def create_good_dreply(self,i):
+        s = REPLY_IP
+        b = bencode(s)
+        return DIALBACK_REPLY+b
+
+    def create_not_bdecodable(self,i):
+        return DIALBACK_REPLY+"bla"
+
+    def create_not_string(self,i):
+        s = 481
+        b = bencode(s)
+        return DIALBACK_REPLY+b
+
+    def create_not_validip(self,i):
+        s = '127..0.0.1'
+        b = bencode(s)
+        return DIALBACK_REPLY+b
+
+    def create_diff_ips(self,i):
+        if self.NLISTENERS==1:
+            s = REPLY_IP
+        else:
+            s = '127.0.0.'+str(i)
+        b = bencode(s)
+        return DIALBACK_REPLY+b
+
+    def check_drequest(self,data):
+        self.assert_(len(data)==0)
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. SuperPeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_dra.py <method name>"
+    else:
+        suite.addTest(TestDialbackReplyActive(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+    
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.sh b/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active.sh
new file mode 100755 (executable)
index 0000000..386ffda
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_dialback_reply_active.py singtest_good_dreply
+python test_dialback_reply_active.py singtest_bad_not_bdecodable
+python test_dialback_reply_active.py singtest_bad_not_string
+python test_dialback_reply_active.py singtest_bad_not_validip
+python test_dialback_reply_active.py singtest_bad_diff_ips
+python test_dialback_reply_active2.py singtest_good_dreply
+python test_dialback_reply_active2.py singtest_bad_not_bdecodable
+python test_dialback_reply_active2.py singtest_bad_not_string
+python test_dialback_reply_active2.py singtest_bad_not_validip
+python test_dialback_reply_active2.py singtest_bad_diff_ips
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active2.py b/instrumentation/next-share/BaseLib/Test/test_dialback_reply_active2.py
new file mode 100644 (file)
index 0000000..e6d82e9
--- /dev/null
@@ -0,0 +1,44 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+import sys
+
+from BaseLib.Test.test_dialback_reply_active import TestDialbackReplyActive
+
+DEBUG=True
+
+class TestDialbackReplyActive2(TestDialbackReplyActive):
+    """  
+    Testing DIALBACK_REPLY message of Dialback extension V1 
+
+    This test checks how the Tribler code responds to good and bad 
+    DIALBACK_REPLY messages. I.e. the Tribler client initiates
+    the dialback by connecting to us and sending a DIALBACK_REQUEST and we
+    reply with good and bad messages.
+
+    This test does NOT allow authoritative answers from superpeers.
+    """
+
+    def setUpPreSession(self):
+        """ override TestDialbackReplyActive """
+        self.NLISTENERS=4 # Must be same as DialbackMsgHandler PEERS_TO_AGREE
+        TestDialbackReplyActive.setUpPreSession(self)
+        # H4X0R, testing only
+        self.config.sessconfig['dialback_trust_superpeers'] = 0
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. SuperPeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_dra2.py <method name>"
+    else:
+        suite.addTest(TestDialbackReplyActive2(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+    
diff --git a/instrumentation/next-share/BaseLib/Test/test_dialback_request.py b/instrumentation/next-share/BaseLib/Test/test_dialback_request.py
new file mode 100644 (file)
index 0000000..214bc23
--- /dev/null
@@ -0,0 +1,162 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+import sys
+import time
+import socket
+from traceback import print_exc
+from types import StringType
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from btconn import BTConnection
+from BaseLib.Core.BitTornado.bencode import bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.NATFirewall.ReturnConnHandler import dialback_infohash
+from BaseLib.Core.Utilities.utilities import isValidIP
+
+DEBUG=True
+
+class TestDialbackRequest(TestAsServer):
+    """ 
+    Testing DIALBACK_REQUEST message of Dialback extension V1
+    """
+    
+    #def setUp(self):
+    #    """ override TestAsServer """
+    #    TestAsServer.setUp(self)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+        # Enable dialback
+        self.config.set_dialback(True)
+        # H4X0R: testing only
+        self.config.sessconfig['dialback_active'] = 0
+
+        self.setUpMyListenSocket()
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+        self.myip = '127.0.0.1'
+
+    def setUpMyListenSocket(self):
+        # Start our server side, to with Tribler will try to connect
+        self.mylistenport = 4810
+        self.myss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss.bind(('', self.mylistenport))
+        self.myss.listen(1)
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+        self.tearDownMyListenSocket()
+
+    def tearDownMyListenSocket(self):
+        self.myss.close()
+
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        # 1. test good DIALBACK_REQUEST
+        self.subtest_good_drequest()
+        
+        # 2. test various bad DIALBACK_REQUEST messages
+        self.subtest_bad_not_empty()
+
+    #
+    # Good DIALBACK_REQUEST
+    #
+    def subtest_good_drequest(self):
+        """ 
+            test good DIALBACK_REQUEST messages
+        """
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,mylistenport=self.mylistenport)
+        msg = self.create_good_drequest()
+        s.send(msg)
+        time.sleep(5)
+
+        # And connect back to us
+        conn, addr = self.myss.accept()
+        s2 = BTConnection('',0,conn,mylistenport=self.mylistenport,user_infohash=dialback_infohash)
+        s2.read_handshake_medium_rare()
+        resp = s2.recv()
+        print >> sys.stderr,"test: Me got DIALBACK_REPLY from him, len",len(resp)
+        self.assert_(resp[0] == DIALBACK_REPLY)
+        self.check_drequest(resp[1:])
+
+    def create_good_drequest(self):
+        return str(DIALBACK_REQUEST)
+
+    def check_drequest(self,data):
+        s = bdecode(data)
+        self.assert_(type(s) == StringType)
+        self.assert_(isValidIP(s))
+        self.assert_(s == self.myip)
+
+    #
+    # Bad DIALBACK_REQUEST
+    #    
+    def subtest_bad_not_empty(self):
+        self._test_bad(self.create_not_empty)
+
+    #
+    # Main test code for bad DIALBACK_REQUEST messages
+    #
+    def _test_bad(self,gen_drequest_func):
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        print >> sys.stderr,"\ntest: ",gen_drequest_func
+        msg = gen_drequest_func()
+        s.send(msg)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        resp = s.recv()
+        self.assert_(len(resp)==0)
+        s.close()
+
+        # However, closing the connection is the specified behaviour, check
+        # that he doesn't connect back
+        try:
+            self.myss.settimeout(10.0)
+            print >> sys.stderr,"test: See if peer connects back (would be bad)"
+            conn, addr = self.myss.accept()
+            s = BTConnection('',0,conn,mylistenport=self.mylistenport,user_infohash=dialback_infohash)
+            s.read_handshake_medium_rare()
+            resp = s.recv()
+            print >> sys.stderr,"test: Got reply back, len",len(resp),"see if expected"
+            self.assert_(len(resp) > 0)
+            self.assert_(resp[0] != DIALBACK_REPLY)
+            print >> sys.stderr,"test: Reply was acceptable",getMessageName(resp[0])
+        except socket.timeout:
+            self.assert_(True)
+            print >> sys.stderr,"test: Good, accept() timed out"
+
+    #
+    # Bad message creators
+    # 
+    def create_not_empty(self):
+        return DIALBACK_REQUEST+"bla"
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestDialbackRequest))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_dlhelp.bat b/instrumentation/next-share/BaseLib/Test/test_dlhelp.bat
new file mode 100755 (executable)
index 0000000..8a0d8af
--- /dev/null
@@ -0,0 +1,12 @@
+set PYTHONPATH=..\..\r
+\r
+python test_dlhelp.py singtest_good_2fast\r
+python test_dlhelp.py singtest_bad_2fast_dlhelp\r
+python test_dlhelp.py singtest_bad_2fast_metadata_not_bdecodable\r
+python test_dlhelp.py singtest_bad_2fast_metadata_not_dict1\r
+python test_dlhelp.py singtest_bad_2fast_metadata_not_dict2\r
+python test_dlhelp.py singtest_bad_2fast_metadata_empty_dict\r
+python test_dlhelp.py singtest_bad_2fast_metadata_wrong_dict_keys\r
+python test_dlhelp.py singtest_bad_2fast_metadata_bad_torrent1\r
+python test_dlhelp.py singtest_bad_2fast_metadata_bad_torrent2\r
+python test_dlhelp.py singtest_bad_2fast_metadata_bad_torrent3\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_dlhelp.py b/instrumentation/next-share/BaseLib/Test/test_dlhelp.py
new file mode 100644 (file)
index 0000000..ab80da0
--- /dev/null
@@ -0,0 +1,426 @@
+# Written by Arno Bakker, George Milescu 
+# see LICENSE.txt for license information
+#
+# Like test_secure_overlay, we start a new python interpreter for each test. 
+# Although we don't have the singleton problem here, we do need to do this as the
+# HTTPServer that MyTracker uses won't relinquish the listen socket, causing 
+# "address in use" errors in the next test. This is probably due to the fact that
+# MyTracker has a thread mixed in, as a listensocket.close() normally releases it
+# (according to lsof).
+#
+
+import unittest
+import os
+import sys
+import time
+from traceback import print_exc
+import socket
+from types import ListType
+import tempfile
+
+from BaseLib.Test.test_as_server import TestAsServer
+from btconn import BTConnection
+from olconn import OLConnection
+from BaseLib.Core.RequestPolicy import AllowAllRequestPolicy
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.BT1.convert import toint
+from BaseLib.Core.CacheDB.CacheDBHandler import FriendDBHandler
+from BaseLib.Test.test_connect_overlay import MyTracker
+
+DEBUG=True
+
+class TestDownloadHelp(TestAsServer):
+    """ 
+    Testing download helping
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+        self.setUpMyListenSockets()
+        
+        # Must be changed in test/extend_hs_dir/dummydata.merkle.torrent as well
+        self.mytrackerport = 4901
+        self.myid = 'R410-----HgUyPu56789'
+        self.mytracker = MyTracker(self.mytrackerport,self.myid,'127.0.0.1',self.mylistenport)
+        self.mytracker.background_serve()
+
+        self.myid2 = 'R410-----56789HuGyx0'
+        
+        # Arno, 2009-12-15: Make sure coop downloads have their own destdir
+        destdir = tempfile.mkdtemp()
+        self.config.set_download_help_dir(destdir)
+        
+    def setUpMyListenSockets(self):
+        # Start our server side, to with Tribler will try to connect
+        self.mylistenport = 4810
+        self.myss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss.bind(('', self.mylistenport))
+        self.myss.listen(1)
+
+        self.mylistenport2 = 3726
+        self.myss2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss2.bind(('', self.mylistenport2))
+        self.myss2.listen(1)
+
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())  
+        
+        # This is the infohash of the torrent in test/extend_hs_dir
+        self.infohash = '\xccg\x07\xe2\x9e!]\x16\xae{\xb8\x10?\xf9\xa5\xf9\x07\xfdBk'
+        self.torrentfile = os.path.join('extend_hs_dir','dummydata.merkle.torrent')
+
+        # Add us as friend, so he will accept the ASK_FOR_HELP
+        if False:  # TEMP: friendsdb doesn't have an addFriend method
+            # friendsdb = FriendDBHandler.getInstance()
+            # friendsdb.addFriend(self.mypermid)
+            pass
+        else:
+            self.session.set_overlay_request_policy(AllowAllRequestPolicy())
+          
+        self.session.set_download_states_callback(self.states_callback)
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+        self.mytracker.shutdown()
+        self.tearDownMyListenSockets()
+
+
+    def tearDownMyListenSockets(self):
+        self.myss.close()
+        self.myss2.close()
+
+
+    def states_callback(self,dslist):
+        print >>sys.stderr,"stats: dslist",len(dslist)
+        for ds in dslist:
+            print >>sys.stderr,"stats: coordinator",`ds.get_coopdl_coordinator()`
+            print >>sys.stderr,"stats: helpers",`ds.get_coopdl_helpers()`
+        return (0.5,False)
+
+    #
+    # Good 2fast
+    #
+    def singtest_good_2fast(self):
+        genresdict = self.get_genresdict()
+        print >>sys.stderr,"test: good ASK_FOR_HELP"
+        self._test_2fast(genresdict)
+    
+
+    def get_genresdict(self):
+        genresdict = {}
+        genresdict[ASK_FOR_HELP] = (self.create_good_dlhelp,True)
+        genresdict[METADATA] = (self.create_good_metadata,True)
+        genresdict[PIECES_RESERVED] = (self.create_good_pieces_reserved,True)
+        genresdict[STOP_DOWNLOAD_HELP] = (self.create_good_stop_dlhelp,True)
+        return genresdict
+
+    #
+    # Bad 2fast
+    #
+    def singtest_bad_2fast_dlhelp(self):
+        genresdict = self.get_genresdict()
+        genresdict[ASK_FOR_HELP] = (self.create_bad_dlhelp_not_infohash,False)
+        print >>sys.stderr,"test: bad dlhelp"
+        self._test_2fast(genresdict)
+        
+    def singtest_bad_2fast_metadata_not_bdecodable(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_bdecodable,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_not_dict1(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict1,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_not_dict2(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict2,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    def singtest_bad_2fast_metadata_empty_dict(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_empty_dict,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_wrong_dict_keys(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_wrong_dict_keys,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_bad_torrent1(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent1,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    def singtest_bad_2fast_metadata_bad_torrent2(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent2,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_bad_torrent3(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent3,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    
+    def _test_2fast(self,genresdict):
+        """ 
+            test ASK_FOR_HELP, METADATA, PIECES_RESERVED and STOP_DOWNLOAD_HELP sequence
+        """
+        # 1. Establish overlay connection to Tribler
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,mylistenport=self.mylistenport2)
+        
+        (func,good) = genresdict[ASK_FOR_HELP]
+        msg = func()
+        s.send(msg)
+        if good:
+            resp = s.recv()
+            self.assert_(resp[0] == GET_METADATA)
+            self.check_get_metadata(resp[1:])
+            print >>sys.stderr,"test: Got GET_METADATA for torrent, good"
+        else:
+            resp = s.recv()
+            self.assert_(len(resp)==0)
+            s.close()
+            return
+
+        (func,good) = genresdict[METADATA]
+        msg = func()
+        s.send(msg)
+
+        if good:
+            # 2. Accept the data connection Tribler wants to establish with us, the coordinator
+            self.myss2.settimeout(10.0)
+            conn, addr = self.myss2.accept()
+            s3 = BTConnection('',0,conn,user_infohash=self.infohash,myid=self.myid2)
+            s3.read_handshake_medium_rare()
+            
+            msg = UNCHOKE
+            s3.send(msg)
+            print >>sys.stderr,"test: Got data connection to us, as coordinator, good"
+        else:
+            resp = s.recv()
+            self.assert_(len(resp)==0)
+            s.close()
+            return
+
+        # 3. Our tracker says there is another peer (also us) on port 4810
+        # Now accept a connection on that port and pretend we're a seeder
+        self.myss.settimeout(10.0)
+        conn, addr = self.myss.accept()
+        options = '\x00\x00\x00\x00\x00\x00\x00\x00'
+        s2 = BTConnection('',0,conn,user_option_pattern=options,user_infohash=self.infohash,myid=self.myid)
+        s2.read_handshake_medium_rare()
+        
+        numpieces = 10 # must correspond to the torrent in test/extend_hs_dir
+        b = Bitfield(numpieces)
+        for i in range(numpieces):
+            b[i] = True
+        self.assert_(b.complete())
+        msg = BITFIELD+b.tostring()
+        s2.send(msg)
+        msg = UNCHOKE
+        s2.send(msg)
+        print >>sys.stderr,"test: Got BT connection to us, as fake seeder, good"
+
+        # 4. Await a RESERVE_PIECES message on the overlay connection
+        resp = s.recv()
+        self.assert_(resp[0] == RESERVE_PIECES)
+        pieces = self.check_reserve_pieces(resp[1:])
+        print >>sys.stderr,"test: Got RESERVE_PIECES, good"
+
+        (func,good) = genresdict[PIECES_RESERVED]
+        
+        # 5. Reply with PIECES_RESERVED
+        msg = func(pieces)
+        s.send(msg)
+        
+        if good:
+            # 6. Await REQUEST on fake seeder
+            try:
+                while True:
+                    s2.s.settimeout(10.0)
+                    resp = s2.recv()
+                    self.assert_(len(resp) > 0)
+                    print "test: Fake seeder got message",getMessageName(resp[0])
+                    if resp[0] == REQUEST:
+                        self.check_request(resp[1:],pieces)
+                        print >>sys.stderr,"test: Fake seeder got REQUEST for reserved piece, good"
+                        break
+                    
+            except socket.timeout:
+                print >> sys.stderr,"test: Timeout, bad, fake seeder didn't reply with message"
+                self.assert_(False)
+        else:
+            resp = s.recv()
+            self.assert_(len(resp)==0)
+            s.close()
+            return
+
+        (func,good) = genresdict[STOP_DOWNLOAD_HELP]
+        # 5. Reply with STOP_DOWNLOAD_HELP
+        msg = func()
+        s.send(msg)
+
+        # the other side should close the connection, whether the msg was good or bad
+        resp = s.recv()
+        self.assert_(len(resp)==0)
+        s.close()
+        
+
+    def create_good_dlhelp(self):
+        return ASK_FOR_HELP+self.infohash
+
+    def check_get_metadata(self,data):
+        infohash = bdecode(data) # is bencoded for unknown reason, can't change it
+        self.assert_(infohash == self.infohash)
+
+    def create_good_metadata(self):
+        f = open(self.torrentfile,"rb")
+        data = f.read()
+        f.close() 
+        
+        d = self.create_good_metadata_dict(data)
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_good_metadata_dict(self,data):
+        d = {}
+        d['torrent_hash'] = self.infohash 
+        d['metadata'] = data
+        d['leecher'] = 1
+        d['seeder'] = 1
+        d['last_check_time'] = int(time.time())
+        d['status'] = 'good'
+        return d
+
+    def check_reserve_pieces(self,data):
+        # torrent_hash + 1-byte all_or_nothing + bencode([piece num,...])
+        self.assert_(len(data) > 21)
+        infohash = data[0:20]
+        allflag = data[20]
+        plist = bdecode(data[21:])
+        
+        self.assert_(infohash == self.infohash)
+        self.assert_(type(plist) == ListType)
+        return plist
+
+    def create_good_pieces_reserved(self,pieces):
+        payload = self.infohash + bencode(pieces)
+        return PIECES_RESERVED + payload
+
+    def check_request(self,data,pieces):
+        piece = toint(data[0:4])
+        self.assert_(piece in pieces)
+
+    def create_good_stop_dlhelp(self):
+        return STOP_DOWNLOAD_HELP+self.infohash
+
+
+    #
+    # Bad ASK_FOR_HELP
+    #    
+
+    def create_bad_dlhelp_not_infohash(self):
+        return ASK_FOR_HELP+"481"
+
+    #
+    # Bad METADATA
+    #
+
+    def create_bad_metadata_not_bdecodable(self):
+        return METADATA+"bla"
+
+    def create_bad_metadata_not_dict1(self):
+        d  = 481
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_not_dict2(self):
+        d  = []
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_empty_dict(self):
+        d = {}
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_wrong_dict_keys(self):
+        d = {}
+        d['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        d['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_bad_torrent1(self):
+        d = self.create_good_metadata_dict(None)
+        d['metadata'] = '\x12\x34' * 100 # random data
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_bad_metadata_bad_torrent2(self):
+        torrent = {}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+    def create_bad_metadata_bad_torrent3(self):
+        torrent = {'info':481}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_dl.py <method name>"
+    else:
+        suite.addTest(TestDownloadHelp(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_dlhelp.sh b/instrumentation/next-share/BaseLib/Test/test_dlhelp.sh
new file mode 100755 (executable)
index 0000000..6cc3480
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_dlhelp.py singtest_good_2fast
+python test_dlhelp.py singtest_bad_2fast_dlhelp
+python test_dlhelp.py singtest_bad_2fast_metadata_not_bdecodable
+python test_dlhelp.py singtest_bad_2fast_metadata_not_dict1
+python test_dlhelp.py singtest_bad_2fast_metadata_not_dict2
+python test_dlhelp.py singtest_bad_2fast_metadata_empty_dict
+python test_dlhelp.py singtest_bad_2fast_metadata_wrong_dict_keys
+python test_dlhelp.py singtest_bad_2fast_metadata_bad_torrent1
+python test_dlhelp.py singtest_bad_2fast_metadata_bad_torrent2
+python test_dlhelp.py singtest_bad_2fast_metadata_bad_torrent3
diff --git a/instrumentation/next-share/BaseLib/Test/test_extend_hs.py b/instrumentation/next-share/BaseLib/Test/test_extend_hs.py
new file mode 100644 (file)
index 0000000..9b43624
--- /dev/null
@@ -0,0 +1,318 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# TODO: Let Tribler initiate a BT connection to us. We then pretend to be old client
+# and then he should iniate an OL connection to us.
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+from traceback import print_exc
+from types import DictType,IntType
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from btconn import BTConnection
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+DEBUG=True
+
+class TestExtendHandshake(TestAsServer):
+    """ 
+    Testing EXTEND handshake message: uTorrent and Bram's BitTorrent now support 
+    an extension to the protocol, documented on 
+    http://www.rasterbar.com/products/libtorrent/extension_protocol.html
+
+    The problem is that the bit they use in the options field of the BT handshake
+    is the same as we use to indicate a peer supports the overlay-swarm connection.
+    The new clients will send an EXTEND message with ID 20 after the handshake to
+    inform the otherside what new messages it supports.
+
+    See BitTornado/BT1/Connecter.py
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+    
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        # Let Tribler start downloading an non-functioning torrent, so
+        # we can talk to a normal download engine.
+        
+        self.torrentfn = os.path.join('extend_hs_dir','dummydata.merkle.torrent')
+        tdef = TorrentDef.load(self.torrentfn)
+
+        dscfg = self.setUpDownloadConfig()
+        
+        self.session.start_download(tdef,dscfg)
+
+        # This is the infohash of the torrent in test/extend_hs_dir
+        self.infohash = '\xccg\x07\xe2\x9e!]\x16\xae{\xb8\x10?\xf9\xa5\xf9\x07\xfdBk'
+        self.mylistenport = 4810
+
+    def setUpDownloadConfig(self):
+        dscfg = DownloadStartupConfig()
+        dscfg.set_dest_dir(self.config_path)
+        return dscfg        
+        
+        
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        self.subtest_good_nontribler_extend_hs()
+        self.subtest_good_tribler_extend_hs()
+
+        self.subtest_bad_empty()
+        self.subtest_bad_ext_id_not_byte()
+        self.subtest_bad_not_hs()
+        self.subtest_bad_not_bdecodable()
+        self.subtest_bad_not_dict1()
+        self.subtest_bad_not_dict2()
+        self.subtest_bad_m_not_dict1()
+        self.subtest_bad_m_not_dict2()
+        # bencode doesn't permit this:
+        #self.subtest_bad_m_key_not_str()
+        self.subtest_bad_m_val_not_int()
+        
+        # Tribler doesn't check for these
+        ##self.subtest_bad_p_not_int()
+        ##self.subtest_bad_v_not_utf8str()
+
+
+    #
+    # Good EXTEND handshake message
+    #
+    def subtest_good_nontribler_extend_hs(self):
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'
+        self._test_good(self.create_good_nontribler_extend_hs,options=options,infohash=self.infohash)
+
+    def subtest_good_tribler_extend_hs(self):
+        self._test_good(self.create_good_tribler_extend_hs,infohash=self.infohash)
+        
+        # We've said we're a Tribler peer, and we initiated the connection, so 
+        # now *we* should now try to establish an overlay-swarm connection.
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,mylistenport=self.mylistenport)
+        # the connection should be intact, so this should not throw an
+        # exception:
+        time.sleep(5)
+        s.send('bla')
+        s.close()
+
+    def _test_good(self,msg_gen_func,options=None,infohash=None):
+        if options is None and infohash is None:
+            s = BTConnection('localhost',self.hisport)
+        elif options is None:
+            s = BTConnection('localhost',self.hisport,user_infohash=infohash)
+        elif infohash is None:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options)
+        else:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=infohash)
+        msg = msg_gen_func()
+        s.send(msg)
+        s.read_handshake_medium_rare()
+        time.sleep(5)
+
+        # Tribler should send an EXTEND message back
+        try:
+            s.s.settimeout(10.0)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            print >>sys.stderr,"test: Got reply",getMessageName(resp[0])
+            self.assert_(resp[0] == EXTEND)
+            self.check_tribler_extend_hs(resp[1:])
+            #s.close()
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with EXTEND message"
+            self.assert_(False)
+        
+
+    def create_good_nontribler_extend_hs(self):
+        d = {}
+        d['m'] = {'hallo':12, 'dag':255 }
+        d['p'] = self.mylistenport
+        d['v'] = 'TestSweet 1.2.3.4'
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_good_tribler_extend_hs(self):
+        d = {}
+        d['m'] = {'Tr_OVERLAYSWARM':253}
+        d['p'] = self.mylistenport
+        d['v'] = 'Tribler 3.5.1'
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+
+    def check_tribler_extend_hs(self,data):
+        self.assert_(data[0] == chr(0))
+        d = bdecode(data[1:])
+        self.assert_(type(d) == DictType)
+        self.assert_('m' in d.keys())
+        m = d['m']
+        self.assert_(type(m) == DictType)
+        self.assert_('Tr_OVERLAYSWARM' in m.keys())
+        val = m['Tr_OVERLAYSWARM']
+        self.assert_(type(val) == IntType)
+        self.assert_(val == 253)
+
+    #
+    # Bad EXTEND handshake message
+    #    
+    def subtest_bad_empty(self):
+        self._test_bad(self.create_empty)
+
+    def subtest_bad_ext_id_not_byte(self):
+        self._test_bad(self.create_ext_id_not_byte)
+    
+    def subtest_bad_not_hs(self):
+        self._test_bad(self.create_not_hs)
+    
+    def subtest_bad_not_bdecodable(self):
+        self._test_bad(self.create_not_bdecodable)
+
+    def subtest_bad_not_dict1(self):
+        self._test_bad(self.create_not_dict1)
+
+    def subtest_bad_not_dict2(self):
+        self._test_bad(self.create_not_dict2)
+
+    def subtest_bad_m_not_dict1(self):
+        self._test_bad(self.create_m_not_dict1)
+
+    def subtest_bad_m_not_dict2(self):
+        self._test_bad(self.create_m_not_dict2)
+
+    def subtest_bad_m_key_not_str(self):
+        self._test_bad(self.create_m_key_not_str)
+
+    def subtest_bad_m_val_not_int(self):
+        self._test_bad(self.create_m_val_not_int)
+
+    def subtest_bad_p_not_int(self):
+        self._test_bad(self.create_p_not_int)
+
+    def subtest_bad_v_not_utf8str(self):
+        self._test_bad(self.create_v_not_utf8str)
+
+    #
+    # Main test code for bad EXTEND handshake messages
+    #
+    def _test_bad(self,gen_drequest_func):
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'
+        s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=self.infohash)
+        print >> sys.stderr,"\ntest: ",gen_drequest_func
+        msg = gen_drequest_func()
+        s.send(msg)
+        time.sleep(5)
+        
+        # the other side should not like this and close the connection
+        try:
+            s.s.settimeout(10.0)
+            s.read_handshake_medium_rare(close_ok = True)
+            while True:
+                resp = s.recv()
+                if len(resp) > 0:
+                    print >>sys.stderr,"test: Got",getMessageName(resp[0]),"from peer"
+                    self.assert_(resp[0] == EXTEND or resp[0]==UNCHOKE)
+                else:
+                    self.assert_(len(resp)==0)
+                    s.close()
+                    break
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't close connection"
+            self.assert_(False)
+
+    #
+    # Bad message creators
+    # 
+    def create_empty(self):
+        return EXTEND
+
+    def create_ext_id_not_byte(self):
+        return EXTEND+'Hallo kijkbuiskinderen'
+    
+    def create_not_hs(self):
+        d = {}
+        bd = bencode(d)
+        return EXTEND+chr(231)+bd
+
+    def create_not_bdecodable(self):
+        return EXTEND+chr(0)+"bla"
+
+    def create_not_dict1(self):
+        d = 481
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_not_dict2(self):
+        d = []
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_m_not_dict1(self):
+        m = 481
+        d={'m':m}
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_m_not_dict2(self):
+        m = []
+        d={'m':m}
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_m_key_not_str(self):
+        m = {481:123}
+        d={'m':m}
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_m_val_not_int(self):
+        m = {'Message for ya, sir':[]}
+        d={'m':m}
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_p_not_int(self):
+        p = []
+        d={'p':p}
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_v_not_utf8str(self):
+        v = []
+        d={'v':v}
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+
+def test_suite():
+    
+    print >>sys.stderr,"test: test_suite #######################################3"
+    
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestExtendHandshake))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_friend.py b/instrumentation/next-share/BaseLib/Test/test_friend.py
new file mode 100644 (file)
index 0000000..f91c771
--- /dev/null
@@ -0,0 +1,136 @@
+# Written by Jie Yang, Arno Bakker
+# see LICENSE.txt for license information
+import os
+import tempfile
+import unittest
+from sets import Set
+import base64
+
+from BaseLib.Core.CacheDB.friends import ExternalFriendList
+from BaseLib.Core.CacheDB.sqlitecachedb import PeerDB
+
+lines = [
+'Jie Yang 2, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc6ebdH+dmvvgKiE7oOZuQba5I4msyuTJmVpJQVPAT+R9Pg8zsLsuJPV6RjU30RKHnCiaJvjtFW6pLXo, 130.161.158.51, 3966, 23623\n',
+'Pawel, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJ114tMJ6C8TkLkSv8QlVFlj/RpF2ibbar1P8GbzASpMDb1kSUBnmldfMFsNTNSK5cJGsTgAGFjYEJ78, 130.37.198.247, 6882a\n',
+'#Johan, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAUo6nahUzz+NtYWfabmtkvBryqX3ToxgdBKIllVtADv1Et+W0OyT9J0F8VPqSeBZVA1TPuLUpt3I9QHP, 130.37.193.64, 6883\n',
+'Arno Bakker 2, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWAiRwei5Kw9b2he6qmwh5Hr5fNR3FlgHQ1WhXY0AC4w8RQD59rp4Jbo2NdjyXUGb5y1BCeMCGoRCaFy, 130.37.193.64a, 6881\n'
+'Arno Bakker, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWAiRwei5Kw9b2he6qmwh5Hr5fNR3FlgHQ1WhXY0AC4w8RQD59rp4Jbo2NdjyXUGb5y1BCeMCGoRCaFy, 130.37.193.64, 6881\n'
+'Jie Yang, MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc6ebdH+dmvvgKiE7oOZuQba5I4msyuTJmVpJQVPAT+R9Pg8zsLsuJPV6RjU30RKHnCiaJvjtFW6pLXo, 130.161.158.51, 3966, 23623\n',
+]
+
+class TestFriendList(unittest.TestCase):
+    
+    def setUp(self):
+        self.tmpfilepath = tempfile.mktemp()
+        self.tmpdirpath = os.path.join(tempfile.gettempdir(), 'testdb')
+        self.flist = ExternalFriendList(friend_file=self.tmpfilepath, db_dir=self.tmpdirpath)
+        
+    def tearDown(self):
+        self.flist.clear()
+        try:
+            os.remove(self.tmpfilepath)
+        except Exception, msg:
+            pass
+
+    def writeFriends(self):
+        tf = open(self.tmpfilepath, "w")
+        tf.writelines(lines)
+        tf.close()
+            
+    def test_readFriendList(self):
+        self.writeFriends()
+        res = self.flist.readFriendList(self.tmpfilepath)
+        assert len(res) == 3, res
+        assert res[1]['name'] == 'Arno Bakker', res[0]
+        
+    def test_updateDB(self):
+        self.writeFriends()
+        res = self.flist.readFriendList()
+        self.flist.updateDB(res)
+        self.db_is_ok()
+        
+    def test_updateFriendList(self):
+        self.writeFriends()
+        self.flist.updateFriendList()
+        self.db_is_ok()
+        
+    def db_is_ok(self):
+        self.my_db = MyDB.getInstance()
+        self.peer_db = PeerDB.getInstance()
+        assert Set(self.my_db._get('friends')) == Set([
+        base64.decodestring('MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc6ebdH+dmvvgKiE7oOZuQba5I4msyuTJmVpJQVPAT+R9Pg8zsLsuJPV6RjU30RKHnCiaJvjtFW6pLXo\n'),
+        base64.decodestring('MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWAiRwei5Kw9b2he6qmwh5Hr5fNR3FlgHQ1WhXY0AC4w8RQD59rp4Jbo2NdjyXUGb5y1BCeMCGoRCaFy\n')
+        ]), self.my_db._get('friends')
+        assert self.peer_db._size() == 2
+        
+    def test_getFriends(self):
+        self.writeFriends()
+        self.flist.updateFriendList()
+        friends = self.flist.getFriends()
+        answer = [
+                   {'permid': base64.decodestring('MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAWAiRwei5Kw9b2he6qmwh5Hr5fNR3FlgHQ1WhXY0AC4w8RQD59rp4Jbo2NdjyXUGb5y1BCeMCGoRCaFy\n'),
+                   'name':'Arno Bakker',
+                   'ip':'130.37.193.64', 
+                   'port':6881,
+                   'similarity':0,
+                   'last_seen':0,
+                   'buddycast_times':0,
+                   'last_buddycast_time':0,
+                   'oversion':0,
+                   'connected_times':0,
+                   'npeers':0,
+                   'ntorrents':0,
+                   'nprefs':0,
+                   'nqueries':0
+                   },
+                   {'permid':base64.decodestring('MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAc6ebdH+dmvvgKiE7oOZuQba5I4msyuTJmVpJQVPAT+R9Pg8zsLsuJPV6RjU30RKHnCiaJvjtFW6pLXo\n'),
+                   'name':'Jie Yang',
+                   'ip':'130.161.158.51',
+                   'port':3966,
+                   'similarity':0,
+                   'last_seen':0,
+                   'buddycast_times':0,
+                   'last_buddycast_time':0,
+                   'oversion':0,                   
+                   'connected_times':0,
+                   'npeers':0,
+                   'ntorrents':0,
+                   'nprefs':0,
+                   'nqueries':0
+                   },
+                   ]
+        assert len(friends) == 2, len(friends)
+        # Arno: last_seen is set automatically these days :-(
+        for friend in friends:
+            friend['last_seen'] = 0
+
+        """
+        for f in friends:
+            k = f.keys()[:]
+            k.sort()
+            print "FRIEND",k
+        for a in answer:
+            k = a.keys()[:]
+            k.sort()
+            print "ANSWER",k
+        """
+        
+        assert friends == answer or (friends[0] == answer[1] and friends[1] == answer[0]), friends
+        #self.flist.writeFriendList('tmp.txt')
+        self.flist.deleteFriend(answer[0]['permid'])
+        friends = self.flist.getFriends()
+        assert len(friends) == 1, len(friends)
+        assert friends[0]['permid'] == answer[1]['permid']
+
+    def xxtest_normal(self):
+        flist = ExternalFriendList()
+        flist.updateFriendList()
+        friends = flist.getFriends()
+        print friends
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestFriendList))
+    
+    return suite
+    
diff --git a/instrumentation/next-share/BaseLib/Test/test_friendship.bat b/instrumentation/next-share/BaseLib/Test/test_friendship.bat
new file mode 100644 (file)
index 0000000..1559782
--- /dev/null
@@ -0,0 +1,16 @@
+set PYTHONPATH=..\..;%PYTHONPATH%\r
+\r
+python test_friendship.py singtest_good_friendship_req0\r
+python test_friendship.py singtest_good_friendship_req1\r
+python test_friendship.py singtest_good_friendship_he_invites\r
+python test_friendship.py singtest_good_friendship_he_already_invited\r
+python test_friendship.py singtest_good_resp_he_already_at_mutual\r
+python test_friendship.py singtest_good_friendship_fwd_req_dest3rdp\r
+python test_friendship.py singtest_good_friendship_fwd_resp0_dest3rdp\r
+python test_friendship.py singtest_good_friendship_fwd_resp1_dest3rdp\r
+python test_friendship.py singtest_good_friendship_fwd_req_desthim\r
+python test_friendship.py singtest_good_friendship_fwd_resp0_desthim\r
+python test_friendship.py singtest_good_friendship_fwd_resp1_desthim\r
+python test_friendship.py singtest_good_friendship_delegate_req\r
+python test_friendship.py singtest_good_friendship_delegate_shutdown\r
+python test_friendship.py singtest_bad_all\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_friendship.py b/instrumentation/next-share/BaseLib/Test/test_friendship.py
new file mode 100644 (file)
index 0000000..6e7d935
--- /dev/null
@@ -0,0 +1,763 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+#
+# WARNING:
+#
+# To run this test, please set
+# RESCHEDULE_INTERVAL = 6
+# RESEND_INTERVAL = 6
+#
+# In Tribler/Core/SocialNetwork/FriendshipMsgHandler.py
+#
+
+
+import unittest
+import sys
+import time
+import socket
+from traceback import print_exc
+from types import StringType, DictType, IntType
+from M2Crypto import EC
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+import btconn
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from BaseLib.Test.test_social_overlap import TestSocialOverlap
+
+DEBUG=True
+
+REQ='REQ'
+RESP='RESP'
+FWD='FWD'
+
+class TestFriendship(TestAsServer):
+    """ 
+    Testing FRIENDSHIP message of FRIENDSHIP extension V1
+    """
+    
+    def setUp(self):
+        """ override TestAsServer """
+        print >>sys.stderr,"test: *** setup friendship"
+        TestAsServer.setUp(self)
+
+        self.usercallbackexpected = True
+        self.usercallbackreceived = False
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        # FRIENDSHIP
+        self.config.set_social_networking(True)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())        
+
+        self.setUpMyListenSocket()
+
+    def setUpMyListenSocket(self):
+        self.dest_keypair = EC.gen_params(EC.NID_sect233k1)
+        self.dest_keypair.gen_key()
+        self.destpermid = str(self.dest_keypair.pub().get_der())
+        self.destport = 4810
+        
+        # Start our server side, to with Tribler will try to connect
+        self.destss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.destss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.destss.bind(('', self.destport))
+        self.destss.listen(1)
+
+        print >>sys.stderr,"test: my   permid",show_permid_short(self.mypermid)
+        print >>sys.stderr,"test: his  permid",show_permid_short(self.hispermid)
+        print >>sys.stderr,"test: dest permid",show_permid_short(self.destpermid)
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >>sys.stderr,"test: *** tear down friendship"
+        TestAsServer.tearDown(self)
+        self.assert_((not self.usercallbackexpected) or (self.usercallbackreceived))
+        time.sleep(10)
+
+    #
+    # Good FRIENDSHIP REQ
+    # 
+    def singtest_good_friendship_req1(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a REQ, and let the usercallback send a positive response 
+        """
+        self.session.set_friendship_callback(self.approve_usercallback)
+        self.subtest_good_friendship_req(REQ,mresp=1)
+        
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.mypermid)
+        self.assert_(fs == FS_MUTUAL)
+
+    def approve_usercallback(self,permid,params):
+        print >>sys.stderr,"test: Got user callback"
+        self.usercallbackreceived = True
+        self.session.send_friendship_message(permid,RESP,approved=True)
+
+    def singtest_good_friendship_req0(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a REQ, and let the usercallback send a negative response 
+        """
+        self.session.set_friendship_callback(self.deny_usercallback)
+        self.subtest_good_friendship_req(REQ,mresp=0)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.mypermid)
+        self.assert_(fs == FS_I_DENIED)
+
+    def deny_usercallback(self,permid,params):
+        print >>sys.stderr,"test: Got user callback"
+        self.usercallbackreceived = True
+        self.session.send_friendship_message(permid,RESP,approved=False)
+
+
+    def singtest_good_friendship_he_already_invited(self):
+        """ 
+            Test good FRIENDSHIP REQ message:
+            We set the friendDB as if Tribler already sent an invite, 
+            we then send a REQ, which should give an automatic reply. 
+        """
+        peerdb = self.session.open_dbhandler(NTFY_PEERS)
+        peer = {}
+        peer['permid'] = self.mypermid
+        peer['ip'] = '127.0.0.2'
+        peer['port'] = 5000
+        peer['last_seen'] = 0
+        peerdb.addPeer(peer['permid'],peer,update_dns=True,commit=True)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        frienddb.setFriendState(self.mypermid,FS_I_INVITED)
+        print >>sys.stderr,"test: already invited, setting",show_permid_short(self.mypermid)
+
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_req(REQ,mresp=1)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.mypermid)
+        self.assert_(fs == FS_MUTUAL)
+
+
+
+    def singtest_good_resp_he_already_at_mutual(self):
+        """ 
+            Test good FRIENDSHIP REQ message:
+            We set the friendDB as if Tribler already sent an invite, 
+            we then send a REQ, which should give an automatic reply. 
+        """
+        peerdb = self.session.open_dbhandler(NTFY_PEERS)
+        peer = {}
+        peer['permid'] = self.mypermid
+        peer['ip'] = '127.0.0.2'
+        peer['port'] = 5000
+        peer['last_seen'] = 0
+        peerdb.addPeer(peer['permid'],peer,update_dns=True,commit=True)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        frienddb.setFriendState(self.mypermid,FS_MUTUAL)
+        print >>sys.stderr,"test: already invited, setting",show_permid_short(self.mypermid)
+
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_req(RESP,mresp=1,expectreply=False)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.mypermid)
+        self.assert_(fs == FS_MUTUAL)
+
+
+
+    def singtest_good_friendship_req1_send_social_overlap(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a SOCIAL_OVERLAP, then a REQ, and let the usercallback 
+            send a positive response, and check if he recorded our image. 
+        """
+        self.session.set_friendship_callback(self.approve_check_icon_usercallback)
+        self.subtest_good_friendship_req(REQ,mresp=1,socover=True)
+        
+    def approve_check_icon_usercallback(self,permid,params):
+        print >>sys.stderr,"test: Got user callback"
+        
+        peerdb = self.session.open_dbhandler(NTFY_PEERS)
+        img = peerdb.getPeerIcon(self.mypermid)
+        print >>sys.stderr,"test: My img is",`img`
+        self.assert_(img[0] is not None)
+        
+        self.usercallbackreceived = True
+        self.session.send_friendship_message(permid,RESP,approved=True)
+
+
+    def subtest_good_friendship_req(self,mtype,fwd=None,mresp=None,socover=False,expectreply=True):
+        print >>sys.stderr,"test: good FRIENDSHIP",mtype,fwd
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        
+        if socover:
+            tso = TestSocialOverlap("test_all")
+            msg = tso.create_good_soverlap()
+            s.send(msg)
+        
+        msg = self.create_good_friendship_payload(mtype,fwd,mresp)
+        s.send(msg)
+
+        s.b.s.settimeout(10.0)
+        try:
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good FRIENDSHIP: Got reply",getMessageName(resp[0])
+                if resp[0] == FRIENDSHIP:
+                    break
+                elif resp[0] == SOCIAL_OVERLAP:
+                    pass
+                else:
+                    self.assert_(False)
+        except socket.timeout:
+            if expectreply:
+                print >> sys.stderr,"test: Timeout, bad, peer didn't reply with FRIENDSHIP message"
+                self.assert_(False)
+            else:
+                print >> sys.stderr,"test: Timeout, good, wasn't expecting a reply"
+                self.assert_(True)
+
+        if expectreply:
+            self.check_friendship(resp[1:],RESP,None,mresp)
+            time.sleep(10)
+            # the other side should not have closed the connection, as
+            # this is all valid, so this should not throw an exception:
+            s.send('bla')
+            s.close()
+
+
+
+    #
+    # Good FRIENDSHIP FWD destined for 3rd party (also us, as dest)
+    # 
+    def singtest_good_friendship_fwd_req_dest3rdp(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a FWD containing a REQ and see if Tribler tries to
+            deliver it to the 3rd party. 
+        """
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_dest3rdp(FWD,fwd=REQ)
+
+    def singtest_good_friendship_fwd_resp0_dest3rdp(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a FWD containing a negative RESP and see if Tribler tries to
+            deliver it to the specified dest (also us on diff listen port) 
+        """
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_dest3rdp(FWD,fwd=RESP,mresp=0)
+        
+    def singtest_good_friendship_fwd_resp1_dest3rdp(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a FWD containing a positive RESP and see if Tribler tries to
+            deliver it to the specified dest (also us on diff listen port) 
+        """
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_dest3rdp(FWD,fwd=RESP,mresp=1)
+
+    def subtest_good_friendship_fwd_dest3rdp(self,mtype,fwd=None,mresp=None):
+        print >>sys.stderr,"test: good FRIENDSHIP dest = 3rd party",mtype,fwd
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_friendship_payload(mtype,fwd,mresp,source=self.mypermid,dest=self.destpermid)
+        s.send(msg)
+
+        # He should try to forward the request to us
+        try:
+            self.destss.settimeout(10.0)
+            conn, addr = self.destss.accept()
+            s = OLConnection(self.dest_keypair,'',0,conn,self.destport)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good FRIENDSHIP fwd: Dest got reply",getMessageName(resp[0])
+                if resp[0] == FRIENDSHIP:
+                    break
+                elif resp[0] == SOCIAL_OVERLAP:
+                    pass
+                else:
+                    self.assert_(False)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't connect to FWD dest"
+            self.assert_(False)
+
+        self.check_friendship(resp[1:],FWD,fwd,mresp,source=self.mypermid,dest=self.destpermid)
+
+
+    #
+    # Good FRIENDSHIP FWD destined for him
+    #
+
+    def singtest_good_friendship_fwd_req_desthim(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            We send a FWD containing a REQ meant for Tribler, and see if it
+            sends a reply to dest (now source)
+        """
+        self.session.set_friendship_callback(self.approve_usercallback)
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_req_desthim(FWD,fwd=REQ,mresp=1)
+        
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.destpermid)
+        self.assert_(fs == FS_MUTUAL)
+
+
+    def subtest_good_friendship_fwd_req_desthim(self,mtype,fwd=None,mresp=None):
+        print >>sys.stderr,"test: good FRIENDSHIP dest = him",mtype,fwd
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_friendship_payload(mtype,fwd,mresp,source=self.destpermid,dest=self.hispermid)
+        s.send(msg)
+
+        # He should try to reply to dest's request, forwarded through my
+        try:
+            self.destss.settimeout(10.0)
+            conn, addr = self.destss.accept()
+            s = OLConnection(self.dest_keypair,'',0,conn,self.destport)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good FRIENDSHIP fwd: Dest got reply",getMessageName(resp[0])
+                if resp[0] == FRIENDSHIP:
+                    break
+                elif resp[0] == SOCIAL_OVERLAP:
+                    pass
+                else:
+                    self.assert_(False)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't connect to FWD dest"
+            self.assert_(False)
+
+        self.check_friendship(resp[1:],RESP,fwd,mresp)
+
+
+    def singtest_good_friendship_fwd_resp0_desthim(self):
+        """ 
+            Test good FRIENDSHIP RESP message: 
+            We send a FWD containing a negative RESP and see if Tribler
+            registers our denial. 
+        """
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_resp_desthim(FWD,fwd=RESP,mresp=0)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.destpermid)
+        self.assert_(fs == FS_HE_DENIED)
+
+
+    def singtest_good_friendship_fwd_resp1_desthim(self):
+        """ 
+            Test good FRIENDSHIP RESP message: 
+            We send a FWD containing a positive RESP and see if Tribler
+            registers our confirmation. 
+        """
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_resp_desthim(FWD,fwd=RESP,mresp=1)
+
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.getFriendState(self.destpermid)
+        print >>sys.stderr,"FS AFTER IS",fs
+        self.assert_(fs == FS_HE_INVITED)
+
+
+    def subtest_good_friendship_fwd_resp_desthim(self,mtype,fwd=None,mresp=None):
+        print >>sys.stderr,"test: good FRIENDSHIP dest = him",mtype,fwd
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_friendship_payload(mtype,fwd,mresp,source=self.destpermid,dest=self.hispermid)
+        s.send(msg)
+
+        # He should not reply.
+        try:
+            self.destss.settimeout(10.0)
+            conn, addr = self.destss.accept()
+            s = OLConnection(self.dest_keypair,'',0,conn,self.destport)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good FRIENDSHIP fwd: Dest got reply",getMessageName(resp[0])
+                if resp[0] == FRIENDSHIP:
+                    self.assert_(False)
+                    break
+                elif resp[0] == SOCIAL_OVERLAP:
+                    pass
+                else:
+                    self.assert_(False)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, good"
+            self.assert_(True)
+
+
+
+    #
+    # Delegate
+    #
+    #
+    # Good FRIENDSHIP REQ
+    # 
+    def singtest_good_friendship_delegate_req(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            Let Tribler send a REQ to a non-responding peer. Then it should
+            send a FWD to us as friend or buddy. 
+        """
+        self.config_db()
+
+        # Send request to offline peer
+        self.session.send_friendship_message(self.mypermid,REQ)
+        
+        # See if he forwards to us
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_fromhim(FWD,fwd=REQ)
+
+
+    def singtest_good_friendship_delegate_shutdown(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            Let Tribler send a REQ to a non-responding peer. Then it should
+            send a FWD to us as friend or buddy. 
+        """
+        self.config_db()
+
+        # Send request to offline peer
+        print >>sys.stderr,"test: SESSION send msg"
+        self.session.send_friendship_message(self.mypermid,REQ)
+        time.sleep(1) # make sure message is saved
+
+        # Shutdown session, to provoke forwarding
+        print >>sys.stderr,"test: SESSION Shutdown"
+        self.session.shutdown()
+        
+        # See if he forwards to us
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_fwd_fromhim(FWD,fwd=REQ)
+
+        self.session = None
+
+    def config_db(self):
+        peerdb = self.session.open_dbhandler(NTFY_PEERS)
+        # Add friend
+        peer = {}
+        peer['permid'] = self.destpermid
+        peer['ip'] = '127.0.0.1'
+        peer['port'] = self.destport
+        peer['last_seen'] = 0
+        peerdb.addPeer(peer['permid'],peer,update_dns=True,commit=True)
+        
+        # Make us as dest his friend
+        frienddb = self.session.open_dbhandler(NTFY_FRIENDS)
+        fs = frienddb.setFriendState(self.destpermid,state=FS_MUTUAL)
+
+        # Add offline peer
+        peer = {}
+        peer['permid'] = self.mypermid
+        peer['ip'] = '127.0.0.2'
+        peer['port'] = 5000
+        peer['last_seen'] = 0
+        peerdb.addPeer(peer['permid'],peer,update_dns=True,commit=True)
+        
+
+    def subtest_good_friendship_fwd_fromhim(self,mtype,fwd=None,mresp=None):
+        print >>sys.stderr,"test: Expecting good FRIENDSHIP fwd from him",mtype,fwd
+
+        # He should try to forward the request to us, his friend
+        try:
+            self.destss.settimeout(330.0)
+            conn, addr = self.destss.accept()
+            s = OLConnection(self.dest_keypair,'',0,conn,self.destport)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good FRIENDSHIP fwd: Dest got reply",getMessageName(resp[0])
+                if resp[0] == FRIENDSHIP:
+                    break
+                elif resp[0] == SOCIAL_OVERLAP:
+                    pass
+                else:
+                    self.assert_(False)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't connect to FWD dest"
+            self.assert_(False)
+
+        self.check_friendship(resp[1:],mtype,fwd,mresp,source=self.hispermid,dest=self.mypermid)
+
+
+
+    def singtest_good_friendship_he_invites(self):
+        """ 
+            Test good FRIENDSHIP REQ message: 
+            Let Tribler send a REQ to a good peer.
+        """
+        self.config_db()
+
+        icontype = 'image/jpeg'
+        icondata = self.read_usericon_ok()
+        self.session.set_mugshot(icondata)
+
+        # Send request to offline peer
+        self.session.send_friendship_message(self.destpermid,REQ)
+        
+        # See if he forwards to us
+        self.usercallbackexpected = False
+        self.subtest_good_friendship_req_fromhim(REQ)
+
+    def read_usericon_ok(self):
+        return self.read_file('usericon-ok.jpg')
+
+    def read_file(self,filename):
+        f = open( filename, 'rb')
+        data = f.read()
+        f.close()
+        return data
+
+
+
+    def subtest_good_friendship_req_fromhim(self,mtype,fwd=None,mresp=None):
+        print >>sys.stderr,"test: good FRIENDSHIP req from him",mtype,fwd
+
+        # He should try to forward the request to us, his friend
+        try:
+            self.destss.settimeout(330.0)
+            conn, addr = self.destss.accept()
+            s = OLConnection(self.dest_keypair,'',0,conn,self.destport)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: good FRIENDSHIP fwd: Dest got reply",getMessageName(resp[0])
+                if resp[0] == FRIENDSHIP:
+                    break
+                elif resp[0] == SOCIAL_OVERLAP:
+                    d = bdecode(resp[1:])
+                    print >>sys.stderr,"test: SOCIAL OVERLAP",`d`
+                    pass
+                else:
+                    self.assert_(False)
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't connect to FWD dest"
+            self.assert_(False)
+
+        self.check_friendship(resp[1:],mtype,fwd,mresp,source=self.hispermid,dest=self.mypermid)
+
+        
+
+
+    #
+    # Bad FRIENDSHIP messages
+    #
+    def singtest_bad_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        
+        self.usercallbackexpected = False
+        
+        self.subtest_bad_not_bdecodable()
+        self.subtest_bad_not_dict1()
+        self.subtest_bad_not_dict2()
+        self.subtest_bad_empty_dict()
+        self.subtest_bad_wrong_dict_keys()
+        self.subtest_bad_friendship_response()
+        self.subtest_bad_friendship_fwd()
+
+
+    def create_good_friendship_payload(self,mtype,fwd,resp,source=None,dest=None):
+        d = self.create_good_friendship(mtype,fwd,resp,source=source,dest=dest)
+        return self.create_payload(d)
+        
+    def create_good_friendship(self,mtype,fwd,resp,source=None,dest=None):
+        d = {}
+        d['msg type'] = mtype
+        if mtype == REQ:
+            pass
+        elif mtype == RESP:
+            d['response'] = resp 
+        else: # forward
+            d['msg'] = self.create_good_friendship(fwd,None,resp)
+            d['source'] = self.create_good_peer(source)
+            d['dest'] = self.create_good_peer(dest)
+        return d
+            
+    def create_good_peer(self,permid):
+        d = {}
+        d['permid'] = permid
+        d['ip'] = '127.0.0.1'
+        d['port'] = self.destport
+        
+        return d
+
+    def create_payload(self,r):
+        return FRIENDSHIP+bencode(r)
+
+    def check_friendship(self,data,mtype,fwd,resp,dobdecode=True,source=None,dest=None):
+        if dobdecode:
+            d = bdecode(data)
+        else:
+            d = data
+        
+        print >>sys.stderr,"test: Got FRIENDSHIP",`d`,type(d)
+        
+        self.assert_(type(d) == DictType)
+        self.assert_('msg type' in d)
+        self.assert_(type(d['msg type']) == StringType)
+        self.assert_(d['msg type'] == mtype)
+        
+        if mtype == RESP:
+            self.assert_('response' in d)
+            self.assert_(type(d['response']) == IntType)
+            
+            print >>sys.stderr,"test: COMPARE",`d['response']`,`resp`
+            
+            self.assert_(d['response'] == resp)
+        elif mtype == FWD:
+            self.assert_('source' in d)
+            self.check_peer(d['source'],permid=source)
+            self.assert_('dest' in d)
+            self.check_peer(d['dest'],permid=dest)
+            self.assert_('msg' in d)
+            self.check_friendship(d['msg'],fwd,None,resp,dobdecode=False)
+
+    def check_peer(self,d,permid=None):
+        self.assert_('permid' in d)
+        self.assert_(type(d['permid']) == StringType)
+        self.assert_(d['permid'] == permid)
+        self.assert_('ip' in d)
+        self.assert_(type(d['ip']) == StringType)
+        self.assert_('port' in d)
+        self.assert_(type(d['port']) == IntType)
+
+
+
+    def singtest_checkpoint(self):
+        """ Unused """
+        self.session.send_friendship_message(self.destpermid,RESP,approved=True)
+        self.session.lm.overlay_apps.friendship_handler.checkpoint()
+        self.session.lm.overlay_apps.friendship_handler.load_checkpoint()
+
+
+    #
+    # Bad FRIENDSHIP
+    #    
+    def subtest_bad_not_bdecodable(self):
+        self._test_bad(self.create_not_bdecodable)
+
+    def subtest_bad_not_dict1(self):
+        self._test_bad(self.create_not_dict1)
+
+    def subtest_bad_not_dict2(self):
+        self._test_bad(self.create_not_dict2)
+
+    def subtest_bad_empty_dict(self):
+        self._test_bad(self.create_empty_dict)
+
+    def subtest_bad_wrong_dict_keys(self):
+        self._test_bad(self.create_wrong_dict_keys)
+
+    def create_not_bdecodable(self):
+        return FRIENDSHIP+"bla"
+
+    def create_not_dict1(self):
+        friendship = 481
+        return self.create_payload(friendship)
+
+    def create_not_dict2(self):
+        friendship = []
+        return self.create_payload(friendship)
+
+    def create_empty_dict(self):
+        friendship = {}
+        return self.create_payload(friendship)
+
+    def create_wrong_dict_keys(self):
+        friendship = {}
+        friendship['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        friendship['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return self.create_payload(friendship)
+
+    def subtest_bad_friendship_response(self):
+        self._test_bad(self.create_bad_response)
+
+    def create_bad_response(self):
+        friendship = {}
+        friendship['msg type'] = RESP
+        friendship['response'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return self.create_payload(friendship)
+
+
+    def subtest_bad_friendship_fwd(self):
+        methods = [
+            self.make_bad_msg_forwarding_forward,
+            self.make_bad_source,
+            self.make_bad_dest]
+        for method in methods:
+            print >> sys.stderr,"\ntest: ",method,
+            self._test_bad(method)
+        
+
+    def make_bad_msg_forwarding_forward(self):
+        d = self.create_good_friendship(FWD,fwd=REQ,resp=None,source=self.destpermid,dest=self.hispermid)
+        d['msg'] = self.create_good_friendship(FWD,fwd=REQ,resp=None,source=self.destpermid,dest=self.hispermid)
+        return self.create_payload(d)
+        
+    def make_bad_source(self):
+        d = self.create_good_friendship(FWD,fwd=REQ,resp=None,source=self.destpermid,dest=self.hispermid)
+        d['source'] = self.make_bad_peer()
+        return self.create_payload(d)
+
+    def make_bad_dest(self):
+        d = self.create_good_friendship(FWD,fwd=REQ,resp=None,source=self.destpermid,dest=self.hispermid)
+        d['dest'] = self.make_bad_peer()
+        return self.create_payload(d)
+
+    def make_bad_peer(self):
+        d = {}
+        d['permid'] = 'peer 481'
+        # Error is too little fields. 
+        # TODO: test all possible bad peers
+        
+        return d
+
+    def _test_bad(self,gen_friendship_func):
+        print >>sys.stderr,"test: bad friendship",gen_friendship_func
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = gen_friendship_func()
+        s.send(msg)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        self.assert_(len(s.recv())==0)
+        s.close()
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_friendship.py <method name>"
+    else:
+        suite.addTest(TestFriendship(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_friendship.sh b/instrumentation/next-share/BaseLib/Test/test_friendship.sh
new file mode 100755 (executable)
index 0000000..853a289
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+# python test_friendship.py singtest_good_friendship_req0
+# python test_friendship.py singtest_good_friendship_req1
+# python test_friendship.py singtest_good_friendship_he_invites
+python test_friendship.py singtest_good_friendship_req1_send_social_overlap
+# python test_friendship.py singtest_good_friendship_he_already_invited
+# python test_friendship.py singtest_good_friendship_fwd_req_dest3rdp
+# python test_friendship.py singtest_good_friendship_fwd_resp0_dest3rdp
+# python test_friendship.py singtest_good_friendship_fwd_resp1_dest3rdp
+# python test_friendship.py singtest_good_friendship_fwd_req_desthim
+# python test_friendship.py singtest_good_friendship_fwd_resp0_desthim
+# python test_friendship.py singtest_good_friendship_fwd_resp1_desthim
+# python test_friendship.py singtest_good_friendship_delegate_req
+# python test_friendship.py singtest_good_friendship_delegate_shutdown
+# python test_friendship.py singtest_bad_all
diff --git a/instrumentation/next-share/BaseLib/Test/test_friendship_crawler.py b/instrumentation/next-share/BaseLib/Test/test_friendship_crawler.py
new file mode 100644 (file)
index 0000000..c74b63e
--- /dev/null
@@ -0,0 +1,99 @@
+# Written by Boudewijn Schoon, Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+import unittest\r
+import sys\r
+import time\r
+from traceback import print_exc\r
+from M2Crypto import EC\r
+\r
+from BaseLib.Test.test_crawler import TestCrawler\r
+from olconn import OLConnection\r
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode\r
+\r
+from BaseLib.Core.BitTornado.BT1.MessageID import CRAWLER_FRIENDSHIP_STATS\r
+from BaseLib.Core.CacheDB.sqlitecachedb import bin2str\r
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler\r
+from BaseLib.Core.CacheDB.SqliteFriendshipStatsCacheDB import FriendshipStatisticsDBHandler\r
+\r
+DEBUG=True\r
+\r
+class TestFriendshipCrawler(TestCrawler):\r
+    """ \r
+    Testing the user side of the crawler\r
+    """\r
+\r
+    def setUpPreSession(self):\r
+        TestCrawler.setUpPreSession(self)\r
+        self.config.set_social_networking(True)\r
+\r
+    def setUpPostSession(self):\r
+        """ override TestAsServer """\r
+        TestCrawler.setUpPostSession(self)\r
+\r
+        self.some_keypair = EC.gen_params(EC.NID_sect233k1)\r
+        self.some_keypair.gen_key()\r
+        self.some_permid = str(self.some_keypair.pub().get_der())\r
+\r
+        self.friendshipStatistics_db = FriendshipStatisticsDBHandler.getInstance()\r
+        self.friendshipStatistics_db.insertFriendshipStatistics( bin2str(self.his_permid), bin2str(self.some_permid), int(time.time()), 0, commit=True)        \r
+        self.friendshipStatistics_db.insertFriendshipStatistics( bin2str(self.my_permid), bin2str(self.some_permid), int(time.time()), 0, commit=True)\r
+\r
+        # make sure that the OLConnection IS in the crawler_db\r
+        crawler_db = CrawlerDBHandler.getInstance()\r
+        crawler_db.temporarilyAddCrawler(self.my_permid)\r
+\r
+    \r
+    def test_all(self):\r
+        """\r
+        I want to start a Tribler client once and then connect to it\r
+        many times. So there must be only one test method to prevent\r
+        setUp() from creating a new client every time.\r
+\r
+        The code is constructed so unittest will show the name of the\r
+        (sub)test where the error occured in the traceback it prints.\r
+        """\r
+        self.subtest_good_friendship_stats()\r
+\r
+    def subtest_good_friendship_stats(self):\r
+        """\r
+        Send a valid message-id from a registered crawler peer\r
+        """\r
+        print >>sys.stderr, "-"*80, "\ntest: good friendship stats"\r
+\r
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)\r
+\r
+        t = time.time() - 100.0\r
+        msg_dict = {'current time':int(t)}\r
+        payload = bencode(msg_dict)\r
+        self.send_crawler_request(s, CRAWLER_FRIENDSHIP_STATS, 0, 0, payload)\r
+\r
+        error, payload = self.receive_crawler_reply(s, CRAWLER_FRIENDSHIP_STATS, 0)\r
+        assert error == 0\r
+        \r
+        d = bdecode(payload)\r
+        if DEBUG:\r
+            print >>sys.stderr, "test: Got FRIENDSHIPSTATISTICS",`d`\r
+        stats = d['stats']\r
+        self.assert_(len(stats) == 1)\r
+        record = d['stats'][0]\r
+        self.assert_(record[0] == bin2str(self.his_permid))  # source_permid\r
+        self.assert_(record[1] == bin2str(self.some_permid)) # target_permid\r
+        self.assert_(record[2] == 0) # isForwarder\r
+\r
+        time.sleep(1)\r
+        s.close()\r
+\r
+    # def send_crawler_request(self, sock, message_id, channel_id, frequency, payload):\r
+    # def receive_crawler_reply(self, sock, message_id, channel_id):\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    suite.addTest(unittest.makeSuite(TestFriendshipCrawler))\r
+    \r
+    return suite\r
+\r
+if __name__ == "__main__":\r
+    unittest.main(defaultTest="test_suite")\r
+\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_g2g.py b/instrumentation/next-share/BaseLib/Test/test_g2g.py
new file mode 100644 (file)
index 0000000..89c92a7
--- /dev/null
@@ -0,0 +1,283 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+from traceback import print_exc
+from types import DictType,StringType,IntType
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from btconn import BTConnection
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.DownloadConfig import *
+
+DEBUG=True
+G2G_ID = 235
+
+class TestG2G(TestAsServer):
+    """ 
+    Testing EXTEND G2G message V2:
+
+    See BitTornado/BT1/Connecter.py
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(3)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+    
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        # Let Tribler start downloading an non-functioning torrent, so
+        # we can talk to a normal download engine.
+        
+        self.torrentfn = os.path.join('extend_hs_dir','dummydata.merkle.torrent')
+        tdef = TorrentDef.load(self.torrentfn)
+
+        dscfg = DownloadStartupConfig()
+        dscfg.set_dest_dir(self.config_path)
+        dscfg.set_video_event_callback(self.vod_ready_callback)
+        
+        self.d = self.session.start_download(tdef,dscfg)
+        
+        # This is the infohash of the torrent in test/extend_hs_dir
+        self.infohash = '\xccg\x07\xe2\x9e!]\x16\xae{\xb8\x10?\xf9\xa5\xf9\x07\xfdBk'
+        self.mylistenport = 4810
+
+    def vod_ready_callback(self,d,event,params):
+        pass
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        self.subtest_good_tribler_g2g_v2()
+        self.subtest_bad_g2g_v2()
+
+    #
+    # Good g2g_v2 message
+    #
+    def subtest_good_tribler_g2g_v2(self):
+        self._test_good(self.create_good_tribler_extend_hs_v2,infohash=self.infohash)
+        
+        # We've said we're a Tribler peer, and we initiated the connection, so 
+        # now *we* should now try to establish an overlay-swarm connection.
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,mylistenport=self.mylistenport)
+        # the connection should be intact, so this should not throw an
+        # exception:
+        time.sleep(5)
+        s.send('bla')
+        s.close()
+
+    def _test_good(self,msg_gen_func,options=None,infohash=None,g2g_id=G2G_ID):
+        if options is None and infohash is None:
+            s = BTConnection('localhost',self.hisport)
+        elif options is None:
+            s = BTConnection('localhost',self.hisport,user_infohash=infohash)
+        elif infohash is None:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options)
+        else:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=infohash)
+            
+        if DEBUG:
+            print "test: Creating test HS message",msg_gen_func,"g2g_id",g2g_id
+        msg = msg_gen_func(g2g_id=g2g_id)
+        s.send(msg)
+        s.read_handshake_medium_rare()
+
+        # Send our g2g_v2 message to Tribler
+        msg = self.create_good_g2g_v2(g2g_id=g2g_id)
+        s.send(msg)
+        
+        time.sleep(5)
+
+        # Tribler should send an EXTEND HS message back
+        try:
+            s.s.settimeout(10.0)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            self.assert_(resp[0] == EXTEND)
+            self.check_tribler_extend_hs_v2(resp[1:])
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with EXTEND HS message"
+            self.assert_(False)
+
+        # Tribler should send an g2g_v2 message after a while
+        print "test: Setting 60 second timeout to see if Tribler sends periodic g2g_v2"
+        
+        # Extreme h4xor
+        connlist = self.d.sd.dow.connecter.connections.values()[:]
+        piece = '\xab' * (2 ** 14)
+        for conn in connlist:
+            conn.queue_g2g_piece_xfer(0,0,piece)
+        
+        try:
+            s.s.settimeout(70.0)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print "test: Tribler returns",getMessageName(resp[0])
+                if resp[0] == EXTEND:
+                    self.check_g2g_v2(resp[1:],g2g_id=g2g_id)
+                    s.close()
+                    break
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with EXTEND g2g_v2 message"
+            self.assert_(False)
+
+        
+    def create_good_tribler_extend_hs_v2(self,g2g_id=G2G_ID):
+        d = {}
+        d['m'] = {'Tr_OVERLAYSWARM':253,'Tr_G2G_v2':g2g_id}
+        d['p'] = self.mylistenport
+        d['v'] = 'Tribler 4.2.0'
+        d['e'] = 0
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def check_tribler_extend_hs_v2(self,data):
+        self.assert_(data[0] == chr(0))
+        d = bdecode(data[1:])
+        self.assert_(type(d) == DictType)
+        self.assert_('m' in d.keys())
+        m = d['m']
+        self.assert_(type(m) == DictType)
+        self.assert_('Tr_OVERLAYSWARM' in m.keys())
+        val = m['Tr_OVERLAYSWARM']
+        self.assert_(type(val) == IntType)
+        self.assert_(val == 253)
+        self.assert_('Tr_G2G_v2' in m.keys())
+        val = m['Tr_G2G_v2']
+        self.assert_(type(val) == IntType)
+        self.assert_(val == G2G_ID)
+
+    def create_good_g2g_v2(self,g2g_id=G2G_ID):
+        d = {'0':'d','1':'b'}
+        bd = bencode(d)
+        return EXTEND+chr(g2g_id)+bd
+
+    def check_g2g_v2(self,data,g2g_id):
+        self.assert_(data[0] == chr(g2g_id))
+        d = bdecode(data[1:])
+        
+        print >>sys.stderr,"test: l is",`d`
+        
+        self.assert_(type(d) == DictType)
+        for k,v in d.iteritems():
+            self.assert_(type(k) == StringType)
+            self.assert_(type(v) == StringType)
+            self.assert_(ord(k) > 0)
+            self.assert_(ord(v) <= 100)
+            
+    #
+    # Bad EXTEND handshake message
+    #    
+    def subtest_bad_g2g_v2(self):
+        methods = [self.create_empty,
+            self.create_ext_id_not_byte,
+            self.create_not_bdecodable,
+            self.create_not_dict1,
+            self.create_not_dict2,
+            self.create_key_not_int,
+            self.create_val_not_str,
+            self.create_val_too_big]
+
+        for m in methods:
+            self._test_bad(m)
+
+    #
+    # Main test code for bad EXTEND g2g_v2 messages
+    #
+    def _test_bad(self,gen_drequest_func):
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'
+        s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=self.infohash)
+        print >> sys.stderr,"\ntest: ",gen_drequest_func
+        
+        hsmsg = self.create_good_tribler_extend_hs_v2()
+        s.send(hsmsg)
+        
+        msg = gen_drequest_func()
+        s.send(msg)
+        time.sleep(5)
+        
+        # the other side should not like this and close the connection
+        try:
+            s.s.settimeout(10.0)
+            s.read_handshake_medium_rare(close_ok = True)
+            while True:
+                resp = s.recv()
+                if len(resp) > 0:
+                    print >>sys.stderr,"test: Got",getMessageName(resp[0]),"from peer"
+                    self.assert_(resp[0] == EXTEND or resp[0]==UNCHOKE)
+                else:
+                    self.assert_(len(resp)==0)
+                    s.close()
+                    break
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't close connection"
+            self.assert_(False)
+
+    #
+    # Bad message creators
+    # 
+    def create_empty(self):
+        return EXTEND+chr(G2G_ID)
+
+    def create_ext_id_not_byte(self):
+        return EXTEND+'Hallo kijkbuiskinderen'
+    
+    def create_not_bdecodable(self):
+        return EXTEND+chr(G2G_ID)+"bla"
+
+    def create_not_dict1(self):
+        d = 481
+        bd = bencode(d)
+        return EXTEND+chr(G2G_ID)+bd
+
+    def create_not_dict2(self):
+        d = []
+        bd = bencode(d)
+        return EXTEND+chr(G2G_ID)+bd
+
+    def create_key_not_int(self):
+        d = {'hallo':'d'}
+        bd = bencode(d)
+        return EXTEND+chr(G2G_ID)+bd
+        
+    def create_val_not_str(self):
+        d = {'481':481}
+        bd = bencode(d)
+        return EXTEND+chr(G2G_ID)+bd
+
+    def create_val_too_big(self):
+        d = {'481':chr(129)}
+        bd = bencode(d)
+        return EXTEND+chr(G2G_ID)+bd
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestG2G))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_gui_server.py b/instrumentation/next-share/BaseLib/Test/test_gui_server.py
new file mode 100644 (file)
index 0000000..3cd638b
--- /dev/null
@@ -0,0 +1,67 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# TODO: integrate with test_TimedTaskQueue
+
+import unittest
+from time import sleep
+
+from BaseLib.Utilities.TimedTaskQueue import TimedTaskQueue
+
+class TestGUITaskQueue(unittest.TestCase):
+    
+    def setUp(self):
+        self.ntasks = 0
+        self.completed = []
+        self.guiserver = TimedTaskQueue()
+        
+    def tearDown(self):
+        sleep(2)
+        self.completed.sort()
+        if self.completed != range(self.ntasks):
+            print "test failed",self.completed
+            self.assert_(False)
+
+    def test_simple(self):
+        self.ntasks = 1
+        
+        self.guiserver.add_task(lambda:self.task(0),0)
+
+    def test_more(self):
+        self.ntasks = 10
+        
+        for i in range(self.ntasks):
+            # lambda functions are evil, this is not the same as lambda:task(i)
+            self.guiserver.add_task(self.define_task(i),0)
+
+    def test_delay(self):
+        self.ntasks = 1
+        
+        self.guiserver.add_task(lambda:self.task(0),3)
+        print "test: sleeping 5 secs so tasks gets executed"
+        sleep(5)
+
+    def test_delay2(self):
+        self.ntasks = 2
+        
+        self.guiserver.add_task(lambda:self.task(1),3)
+        self.guiserver.add_task(lambda:self.task(0),1)
+        print "test: sleeping 5 secs so tasks gets executed"
+        sleep(5)
+
+    def define_task(self,num):
+        return lambda:self.task(num)
+
+    def task(self,num):
+        print "Running task",num
+        self.completed.append(num)
+        
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestGUITaskQueue))
+    
+    return suite
+    
+if __name__ == '__main__':
+    unittest.main(defaultTest='test_suite')
diff --git a/instrumentation/next-share/BaseLib/Test/test_magnetlink.py b/instrumentation/next-share/BaseLib/Test/test_magnetlink.py
new file mode 100644 (file)
index 0000000..f88ad4f
--- /dev/null
@@ -0,0 +1,345 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+from binascii import hexlify
+import socket
+import unittest
+import os
+import sys
+import time
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from btconn import BTConnection
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import getMessageName, protocol_name, EXTEND
+from BaseLib.Core.simpledefs import dlstatus_strings, DLSTATUS_DOWNLOADING
+from BaseLib.Core.DecentralizedTracking.MagnetLink.MagnetLink import MagnetHandler
+
+LISTEN_PORT = 12345
+DEBUG=True
+
+class MagnetHelpers:
+    def __init__(self, tdef):
+        # the metadata that we will transfer
+        infodata = bencode(tdef.get_metainfo()["info"])
+        self.metadata_list = [infodata[index:index+16*1024] for index in xrange(0, len(infodata), 16*1024)]
+        assert len(self.metadata_list) > 100, "We need multiple pieces to test!"
+        self.metadata_size = len(infodata)
+
+    def create_good_extend_handshake(self):
+        payload = {"m":{"ut_metadata":42}, "metadata_size":self.metadata_size}
+        return EXTEND + chr(0) + bencode(payload)
+
+    def create_good_extend_metadata_request(self, metadata_id, piece):
+        payload = {"msg_type":0, "piece":piece}
+        return EXTEND + chr(metadata_id) + bencode(payload)
+
+    def create_good_extend_metadata_reply(self, metadata_id, piece):
+        payload = {"msg_type":1, "piece":piece, "total_size":len(self.metadata_list[piece])}
+        return EXTEND + chr(metadata_id) + bencode(payload) + self.metadata_list[piece]
+
+    def metadata_id_from_extend_handshake(self, data):
+        assert data[0] == chr(0)
+        d = bdecode(data[1:])
+        assert isinstance(d, dict)
+        assert 'm' in d.keys()
+        m = d['m']
+        assert isinstance(m, dict)
+        assert "ut_metadata" in m.keys()
+        val = m["ut_metadata"]
+        assert isinstance(val, int)
+        return val
+
+    def read_extend_handshake(self, conn):
+        conn.s.settimeout(10.0)
+        responce = conn.recv()
+        self.assert_(len(responce) > 0)
+        # print >>sys.stderr,"test: Got reply", getMessageName(responce[0])
+        self.assert_(responce[0] == EXTEND)
+        return self.metadata_id_from_extend_handshake(responce[1:])
+
+    def read_extend_metadata_request(self, conn):
+        conn.s.settimeout(10.0)
+        while True:
+            responce = conn.recv()
+            assert len(responce) > 0
+            # print >>sys.stderr,"test: Got data", getMessageName(responce[0])
+            if responce[0] == EXTEND:
+                break
+
+        assert responce[0] == EXTEND
+        assert ord(responce[1]) == 42
+
+        payload = bdecode(responce[2:])
+        assert "msg_type" in payload
+        assert payload["msg_type"] == 0
+        assert "piece" in payload
+        assert isinstance(payload["piece"], int)
+
+        return payload["piece"]
+
+    def read_extend_metadata_reply(self, conn, piece):
+        conn.s.settimeout(10.0)
+        while True:
+            responce = conn.recv()
+            assert len(responce) > 0
+            # print >>sys.stderr,"test: Got data", getMessageName(responce[0])
+            if responce[0] == EXTEND:
+                break
+
+        assert responce[0] == EXTEND
+        assert ord(responce[1]) == 42
+
+        payload = bdecode(responce[2:])
+        assert payload["msg_type"] == 1
+        assert payload["piece"] == piece
+        assert payload["data"] == self.metadata_list[piece]
+
+    def read_extend_metadata_reject(self, conn, piece):
+        conn.s.settimeout(10.0)
+        while True:
+            responce = conn.recv()
+            assert len(responce) > 0
+            # print >>sys.stderr,"test: Got reject", getMessageName(responce[0])
+            if responce[0] == EXTEND:
+                break
+
+        assert responce[0] == EXTEND
+        assert ord(responce[1]) == 42
+
+        payload = bdecode(responce[2:])
+        assert payload["msg_type"] == 2
+        assert payload["piece"] == piece
+
+    def read_extend_metadata_close(self, conn):
+        """
+        No extend metadata messages may be send and the connection
+        needs to close.
+        """
+        conn.s.settimeout(10.0)
+        while True:
+            responce = conn.recv()
+            if len(responce) == 0:
+                break
+            assert not (responce[0] == EXTEND and responce[1] == 42)
+
+class TestMagnetMiniBitTorrent(TestAsServer, MagnetHelpers):
+    """
+    A MiniBitTorrent instance is used to connect to BitTorrent clients
+    and download the info part from the metadata. 
+    """
+    def setUp(self):
+        """ override TestAsServer """
+        # listener for incoming connections from MiniBitTorrent
+        self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.server.bind(("localhost", LISTEN_PORT))
+        self.server.listen(1)
+
+        # the metadata that we want to transfer
+        self.tdef = TorrentDef()
+        self.tdef.add_content(os.path.join(os.getcwd(), "API", "file.wmv"))
+        self.tdef.set_tracker("http://fake.net/announce")
+        # we use a small piece length to obtain multiple pieces
+        self.tdef.set_piece_length(1) 
+        self.tdef.finalize()
+
+        MagnetHelpers.__init__(self, self.tdef)
+
+        # startup the client
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def create_good_url(self, infohash=None, title=None, tracker=None):
+        url = "magnet:?xt=urn:btih:"
+        if infohash:
+            assert isinstance(infohash, str)
+            url += hexlify(infohash)
+        else:
+            url += hexlify(self.tdef.get_infohash())
+        if title:
+            assert isinstance(title, str)
+            url += "&dn=" + title
+        if tracker:
+            assert isinstance(tracker, str)
+            url += "&tr=" + tracker
+        return url
+
+    def test_good_transfer(self):
+        def torrentdef_retrieved(tdef):
+            tags["retrieved"] = True
+            tags["metainfo"] = tdef.get_metainfo()
+
+        tags = {"retrieved":False}
+
+        assert TorrentDef.retrieve_from_magnet(self.create_good_url(), torrentdef_retrieved)
+
+        # supply fake addresses (regular dht obviously wont work here)
+        for magnetlink in MagnetHandler.get_instance().get_magnets():
+            magnetlink._swarm.add_potential_peers([("localhost", LISTEN_PORT)])
+
+        # accept incoming connection
+        self.server.settimeout(10.0)
+        sock, address = self.server.accept()
+        assert sock, "No incoming connection"
+
+        # handshakes
+        conn = BTConnection(address[0], address[1], opensock=sock, user_infohash=self.tdef.get_infohash())
+        conn.send(self.create_good_extend_handshake())
+        conn.read_handshake_medium_rare()
+        metadata_id = self.read_extend_handshake(conn)
+
+        # serve pieces
+        for counter in xrange(len(self.metadata_list)):
+            piece = self.read_extend_metadata_request(conn)
+            assert 0 <= piece < len(self.metadata_list)
+            conn.send(self.create_good_extend_metadata_reply(metadata_id, piece))
+
+        # no more metadata request may be send and the connection must
+        # be closed
+        self.read_extend_metadata_close(conn)
+
+        time.sleep(5)
+        assert tags["retrieved"]
+        assert tags["metainfo"]["info"] == self.tdef.get_metainfo()["info"]
+
+class TestMetadata(TestAsServer, MagnetHelpers):
+    """
+    Once we are downloading a torrent, our client should respond to
+    the ut_metadata extention message.  This allows other clients to
+    obtain the info part of the metadata from us.
+    """
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+    
+        # the metadata that we want to transfer
+        self.tdef = TorrentDef()
+        self.tdef.add_content(os.path.join(os.getcwd(), "API", "file.wmv"))
+        self.tdef.set_tracker(self.session.get_internal_tracker_url())
+        # we use a small piece length to obtain multiple pieces
+        self.tdef.set_piece_length(1) 
+        self.tdef.finalize()
+        # self.tdef.save(os.path.join(self.session.get_state_dir(), "gen.torrent"))
+        
+        MagnetHelpers.__init__(self, self.tdef)
+
+    def setup_seeder(self):
+        self.seeder_setup_complete = False
+        self.seeder_teardown_complete = False
+        self.seeder_teardown = False
+
+        self.dscfg = DownloadStartupConfig()
+        self.dscfg.set_dest_dir(os.getcwd())
+        self.download = self.session.start_download(self.tdef, self.dscfg)
+        self.download.set_state_callback(self.seeder_state_callback)
+
+        counter = 0
+        while not self.seeder_setup_complete:
+            counter += 1
+            time.sleep(1)
+            assert counter < 30, "timeout"
+
+        print >> sys.stderr, "test: setup_seeder() complete"
+
+    def teardown_seeder(self):
+        self.seeder_teardown_complete = False
+        self.session.remove_download(self.download)
+
+        counter = 0
+        while not self.seeder_setup_complete:
+            counter += 1
+            time.sleep(1)
+            assert counter < 30, "timeout"
+
+        print >> sys.stderr, "test: teardown_seeder() complete"
+
+    def seeder_state_callback(self,ds):
+        assert not self.seeder_teardown_complete
+        self.seeder_setup_complete = (ds.get_status() == DLSTATUS_DOWNLOADING)
+        d = ds.get_download()
+        print >> sys.stderr, "test: seeder:", `d.get_def().get_name()`, dlstatus_strings[ds.get_status()], ds.get_progress()
+        if self.seeder_teardown:
+            self.seeder_teardown_complete = True
+        else:
+            return (1.0, False)
+
+    def test_all(self):
+        self.setup_seeder()
+        try:
+            self.subtest_good_flood()
+        finally:
+            self.teardown_seeder()
+
+        self.setup_seeder()
+        try:
+            self.subtest_good_request()
+            self.subtest_bad_request()
+        finally:
+            self.teardown_seeder()
+
+    def subtest_good_request(self):
+        conn = BTConnection("localhost", self.hisport, user_infohash=self.tdef.get_infohash())
+        conn.send(self.create_good_extend_handshake())
+        conn.read_handshake_medium_rare()
+        metadata_id = self.read_extend_handshake(conn)
+
+        # request metadata block 0, 2, 3, and the last
+        conn.send(self.create_good_extend_metadata_request(metadata_id, 0))
+        conn.send(self.create_good_extend_metadata_request(metadata_id, 2))
+        conn.send(self.create_good_extend_metadata_request(metadata_id, 3))
+        conn.send(self.create_good_extend_metadata_request(metadata_id, len(self.metadata_list) - 1))
+
+        self.read_extend_metadata_reply(conn, 0)
+        self.read_extend_metadata_reply(conn, 2)
+        self.read_extend_metadata_reply(conn, 3)
+        self.read_extend_metadata_reply(conn, len(self.metadata_list) - 1)
+
+    def subtest_good_flood(self):
+        conn = BTConnection("localhost", self.hisport, user_infohash=self.tdef.get_infohash())
+        conn.send(self.create_good_extend_handshake())
+        conn.read_handshake_medium_rare()
+        metadata_id = self.read_extend_handshake(conn)
+
+        for counter in xrange(len(self.metadata_list) * 2):
+            piece = counter % len(self.metadata_list)
+            conn.send(self.create_good_extend_metadata_request(metadata_id, piece))
+
+            if counter > len(self.metadata_list):
+                self.read_extend_metadata_reject(conn, piece)
+            else:
+                self.read_extend_metadata_reply(conn, piece)
+
+    def subtest_bad_request(self):
+        self.bad_request_and_disconnect({"msg_type":0, "piece":len(self.metadata_list)})
+        self.bad_request_and_disconnect({"msg_type":0, "piece":-1})
+        self.bad_request_and_disconnect({"msg_type":0, "piece":"1"})
+        self.bad_request_and_disconnect({"msg_type":0, "piece":[1,2]})
+        self.bad_request_and_disconnect({"msg_type":0, "PIECE":1})
+        
+    def bad_request_and_disconnect(self, payload):
+        conn = BTConnection("localhost", self.hisport, user_infohash=self.tdef.get_infohash())
+        conn.send(self.create_good_extend_handshake())
+        conn.read_handshake_medium_rare()
+        metadata_id = self.read_extend_handshake(conn)
+
+        conn.send(EXTEND + chr(metadata_id) + bencode(payload))
+        self.read_extend_metadata_close(conn)
+
+if __name__ == "__main__":
+    tests = [TestMetadata, TestMagnetMiniBitTorrent]
+    test_dict = dict([(test.__name__, test) for test in tests])
+    if len(sys.argv) == 2 and sys.argv[1] in test_dict:
+        unittest.main(defaultTest=test_dict[sys.argv[1]])
+    else:
+        print >> sys.stderr, "What test do you want to run? "
+        print >> sys.stderr, "Available:", test_dict.keys()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_merkle.py b/instrumentation/next-share/BaseLib/Test/test_merkle.py
new file mode 100644 (file)
index 0000000..0d8026e
--- /dev/null
@@ -0,0 +1,318 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+
+from tempfile import mkstemp
+import os
+from types import StringType, DictType
+from math import ceil
+
+from BaseLib.Core.API import *
+from BaseLib.Core.Merkle.merkle import *
+from BaseLib.Core.BitTornado.bencode import bdecode
+
+from traceback import print_exc
+
+DEBUG=False
+
+class TestMerkleHashes(unittest.TestCase):
+    """ 
+    Testing Simple Merkle Hashes extension version 0, in particular:
+    * The algorithmic part
+    * The .torrent file part
+    See test_merkle_msg.py for protocol testing.
+    """
+    
+    def setUp(self):
+        pass
+        
+    def tearDown(self):
+        pass
+
+    def test_get_hashes_for_piece(self):
+        """ 
+            test MerkleTree.get_hashes_for_piece() method 
+        """
+        self._test_123pieces_tree_get_hashes()
+        self._test_8piece_tree_uncle_calc()
+
+    def _test_123pieces_tree_get_hashes(self):
+        for n in range(1,64):
+            piece_size = 2 ** n
+            self._test_1piece_tree_get_hashes(piece_size,piece_size)
+            for add in [1,piece_size-1]:
+                self._test_1piece_tree_get_hashes(piece_size,add)
+                self._test_2piece_tree_get_hashes(piece_size,add)
+                self._test_3piece_tree_get_hashes(piece_size,add)
+
+    def _test_1piece_tree_get_hashes(self,piece_size,length_add):
+        """ testing get_hashes_for_piece on tree with 1 piece """
+        msg = "1piece_get_hashes("+str(piece_size)+","+str(length_add)+") failed"
+        npieces = 1
+        total_length = length_add
+
+        piece_hashes = ['\x01\x02\x03\x04\x05\x06\x07\x08\x07\x06\x05\x04\x03\x02\x01\x00\x01\x02\x03\x04' ] * npieces
+        tree = MerkleTree(piece_size,total_length,None,piece_hashes)
+        for p in range(npieces):
+            ohlist = tree.get_hashes_for_piece(p)
+            self.assert_(len(ohlist)==1,msg)
+            self.assert_(ohlist[0][0] == 0,msg)
+            self.assertEquals(ohlist[0][1],piece_hashes[0],msg)
+
+    def _test_2piece_tree_get_hashes(self,piece_size,length_add):
+        """testing get_hashes_for_piece on tree with 2 pieces """
+        msg = "2piece_get_hashes("+str(piece_size)+","+str(length_add)+") failed"
+        npieces = 2
+        total_length = piece_size+length_add
+
+        piece_hashes = ['\x01\x02\x03\x04\x05\x06\x07\x08\x07\x06\x05\x04\x03\x02\x01\x00\x01\x02\x03\x04' ] * npieces
+        tree = MerkleTree(piece_size,total_length,None,piece_hashes)
+        for p in range(npieces):
+            ohlist = tree.get_hashes_for_piece(p)
+            self.assert_(len(ohlist)==3)
+            ohlist.sort()
+            self.assert_(ohlist[0][0] == 0,msg)
+            self.assert_(ohlist[1][0] == 1,msg)
+            self.assert_(ohlist[2][0] == 2,msg)
+            self.assertDigestEquals(ohlist[1][1]+ohlist[2][1], ohlist[0][1],msg)
+
+    def _test_3piece_tree_get_hashes(self,piece_size,length_add):
+        """ testing get_hashes_for_piece on tree with 3 pieces """
+        msg = "3piece_get_hashes("+str(piece_size)+","+str(length_add)+") failed"
+        npieces = 3
+        total_length = 2*piece_size+length_add
+
+        piece_hashes = ['\x01\x02\x03\x04\x05\x06\x07\x08\x07\x06\x05\x04\x03\x02\x01\x00\x01\x02\x03\x04' ] * npieces
+        tree = MerkleTree(piece_size,total_length,None,piece_hashes)
+        for p in range(npieces):
+            ohlist = tree.get_hashes_for_piece(p)
+            self.assert_(len(ohlist)==4,msg)
+            ohlist.sort()
+            if p == 0 or p == 1:
+                self.assert_(ohlist[0][0] == 0,msg)
+                self.assert_(ohlist[1][0] == 2,msg)
+                self.assert_(ohlist[2][0] == 3,msg)
+                self.assert_(ohlist[3][0] == 4,msg)
+                digest34 = self.calc_digest(ohlist[2][1]+ohlist[3][1])
+                self.assertDigestEquals(digest34+ohlist[1][1],ohlist[0][1],msg)
+            else:
+                self.assert_(ohlist[0][0] == 0,msg)
+                self.assert_(ohlist[1][0] == 1,msg)
+                self.assert_(ohlist[2][0] == 5,msg)
+                self.assert_(ohlist[3][0] == 6,msg)
+                digest56 = self.calc_digest(ohlist[2][1]+ohlist[3][1])
+                self.assertDigestEquals(ohlist[1][1]+digest56,ohlist[0][1],msg)
+
+    def assertDigestEquals(self,data,digest,msg=None):
+        self.assertEquals(self.calc_digest(data),digest,msg)
+
+    def calc_digest(self,data):
+        digester = sha()
+        digester.update(data)
+        return digester.digest()
+
+    def _test_8piece_tree_uncle_calc(self):
+        npieces = 8
+        hashlist = self.get_indices_for_piece(0,npieces)
+        assert hashlist == [7, 8, 4, 2, 0]
+
+        hashlist = self.get_indices_for_piece(1,npieces)
+        assert hashlist == [8, 7, 4, 2, 0]
+
+        hashlist = self.get_indices_for_piece(2,npieces)
+        assert hashlist == [9, 10, 3, 2, 0]
+
+        hashlist = self.get_indices_for_piece(3,npieces)
+        assert hashlist == [10, 9, 3, 2, 0]
+
+        hashlist = self.get_indices_for_piece(4,npieces)
+        assert hashlist == [11, 12, 6, 1, 0]
+
+        hashlist = self.get_indices_for_piece(5,npieces)
+        assert hashlist == [12, 11, 6, 1, 0]
+
+        hashlist = self.get_indices_for_piece(6,npieces)
+        assert hashlist == [13, 14, 5, 1, 0]
+
+        hashlist = self.get_indices_for_piece(7,npieces)
+        assert hashlist == [14, 13, 5, 1, 0]
+
+    def get_indices_for_piece(self,index,npieces):
+        height = get_tree_height(npieces)
+        tree = create_tree(height)
+        ohlist = get_hashes_for_piece(tree,height,index)
+        list = []
+        for oh in ohlist:
+            list.append(oh[0])
+        return list
+
+    def test_check_hashes_update_hash_admin(self):
+        """ 
+            test MerkleTree.check_hashes() and update_hash_admin() methods
+        """
+        for n in range(1,64):
+            piece_size = 2 ** n
+            for add in [1,piece_size-1]:
+                self._test_3piece_tree_check_hashes_update_hash_admin(piece_size, add)
+
+    def _test_3piece_tree_check_hashes_update_hash_admin(self,piece_size,length_add):
+        """ testing check_hashes and update_hash_admin tree with 3 pieces """
+        msg = "3piece_check_hashes("+str(piece_size)+","+str(length_add)+") failed"
+        npieces = 3
+        total_length = 2*piece_size+length_add
+
+        piece_hashes = ['\x01\x02\x03\x04\x05\x06\x07\x08\x07\x06\x05\x04\x03\x02\x01\x00\x01\x02\x03\x04' ] * npieces
+        fulltree = MerkleTree(piece_size,total_length,None,piece_hashes)
+        root_hash = fulltree.get_root_hash()
+        emptytree = MerkleTree(piece_size,total_length,root_hash,None)
+        empty_piece_hashes = [0] * npieces
+
+        for p in range(npieces):
+            ohlist = fulltree.get_hashes_for_piece(p)
+            self.assert_(emptytree.check_hashes(ohlist),msg)
+
+        for p in range(npieces):
+            ohlist = fulltree.get_hashes_for_piece(p)
+            self.assert_(emptytree.check_hashes(ohlist),msg)
+            emptytree.update_hash_admin(ohlist,empty_piece_hashes)
+
+        for p in range(npieces):
+            self.assert_(piece_hashes[p] == empty_piece_hashes[p],msg)
+
+    def test_merkle_torrent(self):
+        """
+            test the creation of Merkle torrent files via TorrentMaker/btmakemetafile.py
+        """
+        piece_size = 2 ** 18
+        for file_size in [1,piece_size-1,piece_size,piece_size+1,2*piece_size,(2*piece_size)+1]:
+            self.create_merkle_torrent(file_size,piece_size)
+
+    def create_merkle_torrent(self,file_size,piece_size):
+        try:
+            # 1. create file
+            [handle,datafilename]= mkstemp()
+            os.close(handle)
+            block = "".zfill(file_size)
+            fp = open(datafilename,"wb")
+            fp.write(block)
+            fp.close()
+            torrentfilename = datafilename+'.tribe'
+
+            # 2. Set torrent args
+            tdef = TorrentDef()
+            tdef.set_tracker("http://localhost:6969/announce")
+            tdef.set_create_merkle_torrent(True)
+            tdef.set_piece_length(int(log(piece_size,2)))
+
+            # 3. create Merkle torrent
+            #make_meta_file(datafilename,url,params,flag,dummy_progress,1,dummy_filecallback)
+            tdef.add_content(datafilename)
+            tdef.finalize()
+            tdef.save(torrentfilename)
+
+            # 4. read Merkle torrent
+            fp = open(torrentfilename,"rb")
+            data = fp.read(10000)
+            fp.close()
+
+            # 5. test Merkle torrent
+            # basic tests
+            dict = bdecode(data)
+            self.assert_(type(dict) == DictType)
+            self.assert_(dict.has_key('info'))
+            info = dict['info']
+            self.assert_(type(info) == DictType)
+            self.assert_(not info.has_key('pieces'))
+            self.assert_(info.has_key('root hash'))
+            roothash = info['root hash']
+            self.assert_(type(roothash) == StringType)
+            self.assert_(len(roothash)== 20)
+
+            # create hash tree
+            hashes = self.read_and_calc_hashes(datafilename,piece_size)
+            npieces = len(hashes)
+            if DEBUG:
+                print "npieces is",npieces
+            height = log(npieces,2)+1
+            if height > int(height):
+                height += 1
+            height = int(height)
+            if DEBUG:
+                print "height is",height
+
+            starto = (2 ** (height-1))-1
+
+            if DEBUG:
+                print "starto is",starto
+            tree = [0] * ((2 ** (height))-1)
+            if DEBUG:
+                print "len tree is",len(tree)
+            # put hashes in tree
+            for i in range(len(hashes)):
+                o = starto + i
+                tree[o] = hashes[i]
+
+            # fill unused
+            nplaces = (2 ** height)-(2 ** (height-1))        
+            xso = starto+npieces
+            xeo = starto+nplaces
+            for o in range(xso,xeo):
+                tree[o] = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+            # calc higher level ones
+            if height > 1:
+                for o in range(len(tree)-starto-2,-1,-1):
+                    co = self.get_child_offset(o,height)
+                    if DEBUG:
+                        print "offset is",o,"co is",co
+                    data = tree[co]+tree[co+1]
+                    digest = self.calc_digest(data)
+                    tree[o] = digest
+            self.assert_(tree[0],roothash)
+        except Exception,e:
+            print_exc()
+        #finally:
+        #    os.remove(datafilename)
+        #    os.remove(torrentfilename)
+
+    def read_and_calc_hashes(self,filename,piece_size):
+        hashes = []
+        fp = open(filename,"rb")
+        while True:
+            block = fp.read(piece_size)
+            if len(block) == 0:
+                break
+            digest = self.calc_digest(block)
+            hashes.append(digest)
+            if len(block) != piece_size:
+                break
+        fp.close()
+        return hashes
+
+    def get_child_offset(self,offset,height):
+        if DEBUG:
+            print "get_child(",offset,",",height,")"
+        if offset == 0:
+            level = 1
+        else:
+            level = log(offset,2)
+            if level == int(level):
+                level += 1
+            else:
+                level = ceil(level)
+            level = int(level)
+        starto = (2 ** (level-1))-1
+        diffo = offset-starto
+        diffo *= 2
+        cstarto = (2 ** level)-1
+        return cstarto+diffo
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestMerkleHashes))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_merkle_msg.bat b/instrumentation/next-share/BaseLib/Test/test_merkle_msg.bat
new file mode 100644 (file)
index 0000000..f3386f1
--- /dev/null
@@ -0,0 +1,7 @@
+set PYTHONPATH=..\..;%PYTHONPATH%\r
+\r
+python test_merkle_msg.py singtest_good_hashpiece_bepstyle\r
+python test_merkle_msg.py singtest_good_hashpiece_oldstyle\r
+python test_merkle_msg.py singtest_good_request_bepstyle\r
+python test_merkle_msg.py singtest_bad_hashpiece_bepstyle\r
+python test_merkle_msg.py singtest_bad_hashpiece_oldstyle\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_merkle_msg.py b/instrumentation/next-share/BaseLib/Test/test_merkle_msg.py
new file mode 100644 (file)
index 0000000..79c0ce6
--- /dev/null
@@ -0,0 +1,628 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# TODO: we download from Tribler \r
+#\r
+\r
+import unittest\r
+import os\r
+import sys\r
+import time\r
+import socket\r
+from sha import sha\r
+from traceback import print_exc\r
+from types import DictType,StringType,IntType,ListType\r
+from M2Crypto import Rand\r
+\r
+from BaseLib.Test.test_as_server import TestAsServer\r
+from olconn import OLConnection\r
+from btconn import BTConnection\r
+from BaseLib.Core.TorrentDef import TorrentDef\r
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig\r
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode\r
+from BaseLib.Core.BitTornado.BT1.convert import tobinary,toint\r
+from BaseLib.Core.BitTornado.bitfield import Bitfield\r
+from BaseLib.Core.BitTornado.BT1.MessageID import *\r
+from BaseLib.Core.Merkle.merkle import MerkleTree\r
+\r
+from BaseLib.Core.Utilities.utilities import isValidIP\r
+\r
+DEBUG=True\r
+\r
+class TestMerkleMessage(TestAsServer):\r
+    """ \r
+    Testing Merkle hashpiece messages for both:\r
+    * Merkle BEP style\r
+    * old Tribler <= 4.5.2 that did not use the Extention protocol (BEP 10).\r
+     \r
+    See BitTornado/BT1/Connecter.py\r
+    """\r
+\r
+    def setUp(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUp(self)\r
+        print >>sys.stderr,"test: Giving Session time to startup"\r
+        time.sleep(5)\r
+        print >>sys.stderr,"test: Session should have started up"\r
+\r
+    def setUpPreSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPreSession(self)\r
+        self.config.set_overlay(False)\r
+        self.config.set_megacache(False)\r
+\r
+    \r
+    def setUpPostSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPostSession(self)\r
+\r
+        # Let Tribler start downloading an non-functioning torrent, so\r
+        # we can talk to a normal download engine.\r
+        self.tdef = TorrentDef()\r
+        self.sourcefn = os.path.join(os.getcwd(),"API","file2.wmv")\r
+        self.tdef.add_content(self.sourcefn)\r
+        self.tdef.set_create_merkle_torrent(True)\r
+        self.tdef.set_tracker("http://127.0.0.1:12/announce")\r
+        self.tdef.finalize()\r
+\r
+        self.torrentfn = os.path.join(self.session.get_state_dir(),"gen.torrent")\r
+        self.tdef.save(self.torrentfn)\r
+        \r
+        dscfg = self.setUpDownloadConfig()\r
+        \r
+        self.session.start_download(self.tdef,dscfg)\r
+\r
+        self.infohash = self.tdef.get_infohash()\r
+        self.mylistenport = 4810\r
+        \r
+        self.numpieces = (self.tdef.get_length()+self.tdef.get_piece_length()-1) / self.tdef.get_piece_length()\r
+        b = Bitfield(self.numpieces)\r
+        for i in range(self.numpieces):\r
+            b[i] = True\r
+        self.assert_(b.complete())\r
+        self.seederbitfieldstr = b.tostring()\r
+\r
+        #piece_hashes = ['\x01\x02\x03\x04\x05\x06\x07\x08\x07\x06\x05\x04\x03\x02\x01\x00\x01\x02\x03\x04' ] * npieces\r
+        # Construct Merkle tree\r
+        tdef2 = TorrentDef()\r
+        tdef2.add_content(self.sourcefn)\r
+        tdef2.set_create_merkle_torrent(False)\r
+        tdef2.set_tracker("http://127.0.0.1:12/announce")\r
+        tdef2.set_piece_length(self.tdef.get_piece_length())\r
+        tdef2.finalize()\r
+        metainfo = tdef2.get_metainfo()\r
+        \r
+        piecesstr = metainfo['info']['pieces']\r
+        print >>sys.stderr,"test: pieces has len",len(piecesstr)\r
+        piece_hashes = []\r
+        for i in range(0,len(piecesstr),20):\r
+            hash = piecesstr[i:i+20]\r
+            print >>sys.stderr,"test: piece",i/20,"hash",`hash`\r
+            piece_hashes.append(hash)\r
+            \r
+        print >>sys.stderr,"test: Putting",len(piece_hashes),"into MerkleTree, size",self.tdef.get_piece_length(),tdef2.get_piece_length()\r
+        \r
+        self.tree = MerkleTree(self.tdef.get_piece_length(),self.tdef.get_length(),None,piece_hashes)\r
+        \r
+        \r
+        f = open(self.sourcefn,"rb")\r
+        piece1 = f.read(2 ** 18)\r
+        piece2 = f.read(2 ** 18)\r
+        print >>sys.stderr,"read piece1",len(piece1)\r
+        print >>sys.stderr,"read piece2",len(piece2)\r
+        f.close()\r
+        hash1 = sha(piece1).digest()\r
+        hash2 = sha(piece2).digest()\r
+        print >>sys.stderr,"hash piece1",`hash1`\r
+        print >>sys.stderr,"hash piece2",`hash2`\r
+        f2 = open("piece1.bin","wb")\r
+        f2.write(piece2)\r
+        f2.close()\r
+        \r
+\r
+    def setUpDownloadConfig(self):\r
+        dscfg = DownloadStartupConfig()\r
+        print >>sys.stderr,"test: Downloading to",self.config_path\r
+        dscfg.set_dest_dir(self.config_path)\r
+        dscfg.set_breakup_seed_bitfield(False)\r
+        \r
+        return dscfg        \r
+        \r
+    def tearDown(self):\r
+        TestAsServer.tearDown(self)\r
+        try:\r
+            os.remove('piece1.bin')\r
+        except:\r
+            pass\r
+\r
+        \r
+    def singtest_good_hashpiece_bepstyle(self):\r
+        self.subtest_good_hashpiece(False)\r
+\r
+    def singtest_good_hashpiece_oldstyle(self):\r
+        self.subtest_good_hashpiece(True)\r
+\r
+    def singtest_good_request_bepstyle(self):\r
+        # Let Session download file first\r
+        self.subtest_good_hashpiece(False)\r
+        # Now connect as different peer and download\r
+        print >>sys.stderr,"\n\ntest: test_good_request: STARTING"\r
+        self._test_good_request()\r
+\r
+    def singtest_bad_hashpiece_bepstyle(self):\r
+        self.subtest_bad_hashpiece(False)\r
+\r
+    def singtest_bad_hashpiece_oldstyle(self):\r
+        self.subtest_bad_hashpiece(True)\r
+\r
+    #\r
+    # Good hashpiece message\r
+    #\r
+    def subtest_good_hashpiece(self,oldstyle):\r
+        print >>sys.stderr,"test: Testing good hashpiece, oldstyle",oldstyle\r
+        if oldstyle:\r
+            self._test_good(self.create_good_hashpiece,oldstyle,self.create_good_tribler_extend_hs,infohash=self.infohash)\r
+        else:\r
+            options = '\x00\x00\x00\x00\x00\x10\x00\x00'\r
+            self._test_good(self.create_good_hashpiece,oldstyle,self.create_good_nontribler_extend_hs,options=options,infohash=self.infohash)\r
+\r
+    def _test_good(self,msg_gen_func,oldstyle,extend_hs_gen_func,options=None,infohash=None):\r
+        if options is None and infohash is None:\r
+            s = BTConnection('localhost',self.hisport)\r
+        elif options is None:\r
+            s = BTConnection('localhost',self.hisport,user_infohash=infohash)\r
+        elif infohash is None:\r
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options)\r
+        else:\r
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=infohash)\r
+        print >>sys.stderr,"test: test_good: Create EXTEND HS"\r
+        msg = extend_hs_gen_func()\r
+        print >>sys.stderr,"test: test_good: Sending EXTEND HS",`msg`\r
+        s.send(msg)\r
+        print >>sys.stderr,"test: test_good: Waiting for BT HS"\r
+        s.read_handshake_medium_rare()\r
+\r
+        # Tribler should send an EXTEND message back\r
+        try:\r
+            print >>sys.stderr,"test: Waiting for reply"\r
+            s.s.settimeout(10.0)\r
+            resp = s.recv()\r
+            self.assert_(len(resp) > 0)\r
+            print >>sys.stderr,"test: Got reply",getMessageName(resp[0])\r
+            self.assert_(resp[0] == EXTEND)\r
+            self.check_tribler_extend_hs(resp[1:])\r
+            \r
+            # 1. Pretend we're seeder: send BITFIELD and UNCHOKE\r
+            msg = BITFIELD+self.seederbitfieldstr\r
+            s.send(msg)\r
+            msg = UNCHOKE\r
+            s.send(msg)\r
+            print >>sys.stderr,"test: Pretend we are seeder"\r
+            while True:\r
+                resp = s.recv()\r
+                self.assert_(len(resp) > 0)\r
+                print >>sys.stderr,"test: Got reply2",getMessageName(resp[0])\r
+                self.assert_(resp[0] == REQUEST or resp[0] == INTERESTED or resp[0] == UNCHOKE or resp[0] == HAVE or resp[0] == NOT_INTERESTED)\r
+                if resp[0] == REQUEST:\r
+                    chunkid = self.check_request(resp)\r
+            \r
+                    # 2. Reply to REQUEST with HASHPIECE (oldstyle) or Tr_hashpiece\r
+                    msg = msg_gen_func(oldstyle,chunkid)\r
+                    s.send(msg)\r
+                elif resp[0] == NOT_INTERESTED:\r
+                    break\r
+            \r
+            #s.close()\r
+        except socket.timeout:\r
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply in time"\r
+            self.assert_(False)\r
+        \r
+        destfn = os.path.join(self.config_path,"file2.wmv")\r
+        sf = open(self.sourcefn,"rb")\r
+        df = open(destfn,"rb")\r
+        n = self.tdef.get_piece_length()\r
+        while True:\r
+            sdata = sf.read(n)\r
+            if len(sdata) == 0:\r
+                break\r
+            ddata = df.read(n)\r
+            self.assert_(sdata == ddata)\r
+\r
+        time.sleep(3)\r
+        s.close()\r
+\r
+    def create_good_nontribler_extend_hs(self):\r
+        """ Merkle BEP style """\r
+        d = {}\r
+        d['m'] = {'Tr_hashpiece':250}\r
+        d['p'] = self.mylistenport\r
+        d['v'] = 'TestSweet 1.2.3.4'\r
+        bd = bencode(d)\r
+        return EXTEND+chr(0)+bd\r
+\r
+    def create_good_tribler_extend_hs(self):\r
+        """ old Tribler style """\r
+        d = {}\r
+        d['m'] = {'Tr_OVERLAYSWARM':253}\r
+        d['p'] = self.mylistenport\r
+        d['v'] = 'Tribler 3.5.1'\r
+        bd = bencode(d)\r
+        return EXTEND+chr(0)+bd\r
+\r
+\r
+    def check_tribler_extend_hs(self,data):\r
+        self.assert_(data[0] == chr(0))\r
+        d = bdecode(data[1:])\r
+        self.assert_(type(d) == DictType)\r
+        self.assert_('m' in d.keys())\r
+        m = d['m']\r
+        self.assert_(type(m) == DictType)\r
+        self.assert_('Tr_hashpiece' in m.keys())\r
+        val = m['Tr_hashpiece']\r
+        self.assert_(type(val) == IntType)\r
+        self.assert_(val == 250)\r
+\r
+    def check_request(self,data):\r
+        index = toint(data[1:5])\r
+        begin = toint(data[5:9])\r
+        length = toint(data[9:])\r
+        return (index,begin,length)\r
+\r
+    def create_good_hashpiece(self,oldstyle,chunkid):\r
+        index, begin, length = chunkid\r
+        if begin == 0:\r
+            ohlist = self.tree.get_hashes_for_piece(index)\r
+        else:\r
+            ohlist = []\r
+\r
+        chunk = self.read_chunk(index,begin,length)\r
+        bohlist = bencode(ohlist)\r
+        \r
+        print >>sys.stderr,"test: create_good_hashpiece:",index,begin,length,"==len",len(chunk)\r
+\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        if oldstyle:\r
+            msg = HASHPIECE+payload\r
+        else:\r
+            # Offical: use the msg ID he defined in his handshake\r
+            msg = EXTEND+HASHPIECE+payload\r
+        return msg\r
+\r
+    def read_chunk(self,index,begin,length):\r
+        offset = index*self.tdef.get_piece_length() + begin\r
+        f = open(self.sourcefn,"rb")\r
+        f.seek(offset)\r
+        chunk = f.read(length)\r
+        f.close()\r
+        return chunk\r
+\r
+\r
+\r
+    #\r
+    # Test whether Tribler sends good Tr_hashpiece on our requests\r
+    #\r
+    def _test_good_request(self):\r
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'\r
+        myid = Rand.rand_bytes(20)\r
+         \r
+        s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=self.infohash,myid=myid)\r
+        msg = self.create_good_nontribler_extend_hs()\r
+        s.send(msg)\r
+        s.read_handshake_medium_rare()\r
+\r
+        # Tribler should send an EXTEND message back\r
+        try:\r
+            print >>sys.stderr,"test: Waiting for reply"\r
+            s.s.settimeout(10.0)\r
+            resp = s.recv()\r
+            self.assert_(len(resp) > 0)\r
+            print >>sys.stderr,"test: Got reply",getMessageName(resp[0])\r
+            self.assert_(resp[0] == EXTEND)\r
+            self.check_tribler_extend_hs(resp[1:])\r
+            \r
+            # 1. Pretend we're leecher: send INTERESTED\r
+            msg = INTERESTED\r
+            s.send(msg)\r
+            print >>sys.stderr,"test: Pretend we are leecher"\r
+            while True:\r
+                resp = s.recv()\r
+                self.assert_(len(resp) > 0)\r
+                print >>sys.stderr,"test: Got reply2",getMessageName(resp[0])\r
+                if resp[0] == EXTEND:\r
+                    print >>sys.stderr,"test: Got EXTEND type",getMessageName(resp[1])\r
+                self.assert_(resp[0] == UNCHOKE or resp[0] == BITFIELD or resp[0] == EXTEND or resp[0] == HAVE)\r
+                if resp[0] == UNCHOKE:\r
+                    # 2. Reply with REQUESTs\r
+                    for index in range(0,self.numpieces):\r
+                        plen = self.get_piece_length(index)\r
+                            \r
+                        for begin in range(0,plen,2 ** 14):\r
+                            length = self.get_chunk_length(index,begin)\r
+                            print >>sys.stderr,"RETRIEVE",index,begin,length\r
+                            chunkid = (index,begin,length)\r
+                            msg = self.create_request(chunkid)\r
+                            s.send(msg)\r
+                            \r
+                    #s.send(NOT_INTERESTED)\r
+                        \r
+                elif resp[0] == EXTEND and resp[1] == HASHPIECE:\r
+                    done = self.check_hashpiece(resp)\r
+                    if done:\r
+                        break\r
+                elif resp[0] == BITFIELD:\r
+                    self.check_bitfield(resp)\r
+            \r
+            #s.close()\r
+        except socket.timeout:\r
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply in time"\r
+            self.assert_(False)\r
+\r
+        time.sleep(3)\r
+        s.close()\r
+\r
+\r
+    def get_piece_length(self,index):\r
+        if index == (self.numpieces-1):\r
+            plen = self.tdef.get_length() % self.tdef.get_piece_length()\r
+        else:\r
+            plen = self.tdef.get_piece_length()\r
+        return plen\r
+    \r
+    def get_chunk_length(self,index,begin):\r
+        plen = self.get_piece_length(index)\r
+        length = 2 ** 14\r
+        if index == (self.numpieces-1):\r
+            if (begin+2 ** 14) > plen:\r
+                length = plen - begin\r
+        return length\r
+                \r
+        \r
+    def create_request(self,chunkid):\r
+        index,begin,length = chunkid\r
+        return REQUEST+tobinary(index)+tobinary(begin)+tobinary(length)\r
+\r
+\r
+    def check_hashpiece(self,resp):\r
+        """ Merkle BEP style """\r
+        print >>sys.stderr,"test: good_request: check_hashpiece"\r
+        self.assert_(resp[0] == EXTEND)\r
+        self.assert_(resp[1] == HASHPIECE)\r
+        index = toint(resp[2:2+4])\r
+        begin = toint(resp[6:6+4])\r
+        ohlen = toint(resp[10:10+4])\r
+        print >>sys.stderr,"test: good_request: check_hashpiece",index,begin,ohlen\r
+        bohlist = resp[14:14+ohlen]\r
+        hisohlist = bdecode(bohlist)\r
+        hischunk = resp[14+ohlen:]\r
+        \r
+        if begin == 0:\r
+            self.assert_(type(hisohlist) == ListType)\r
+            for oh in hisohlist:\r
+                self.assert_(type(oh) == ListType)\r
+                self.assert_(len(oh) == 2)\r
+                self.assert_(type(oh[0]) == IntType)\r
+                self.assert_(type(oh[1]) == StringType)\r
+            \r
+            hisohlist.sort()\r
+            print >>sys.stderr,"test: good_request: check_hashpiece",`hisohlist`\r
+            myohlist = self.tree.get_hashes_for_piece(index)\r
+            myohlist.sort()\r
+            \r
+            self.assert_(len(hisohlist) == len(myohlist))\r
+            for i in range(0,len(hisohlist)):\r
+                hisoh = hisohlist[i]\r
+                myoh = myohlist[i]\r
+                self.assert_(hisoh == myoh)\r
+        else:\r
+            self.assert_(len(hisohlist) == 0)\r
+        \r
+        mylength = self.get_chunk_length(index,begin)\r
+        mychunk = self.read_chunk(index,begin,mylength)\r
+        \r
+        self.assert_(hischunk == mychunk)\r
+\r
+        return index == self.numpieces-1 and mylength != 2 ** 14\r
+\r
+    def check_bitfield(self,data):\r
+        self.assert_(data[0] == BITFIELD)\r
+        bitmap = data[1:]\r
+        self.assert_(len(bitmap) == 1)\r
+        # Must have set_breakup_seed_bitfield() set to False\r
+        self.assert_(bitmap == '\xc0')\r
+\r
+\r
+    #\r
+    # Bad EXTEND handshake message\r
+    #    \r
+    def subtest_bad_hashpiece(self,oldstyle):\r
+        if not oldstyle:\r
+            # Test becomes equivalent to BT keep alive message (len 0, payload '')\r
+            self._test_bad(self.create_empty,oldstyle)\r
+        self._test_bad(self.create_ext_id_not_byte,oldstyle)\r
+        self._test_bad(self.create_not_hashpiece,oldstyle)\r
+        self._test_bad(self.create_not_index,oldstyle)\r
+        self._test_bad(self.create_not_begin,oldstyle)\r
+        self._test_bad(self.create_not_len_bohlist,oldstyle)\r
+        self._test_bad(self.create_ohlist_not_bdecodable,oldstyle)\r
+        self._test_bad(self.create_ohlist_wrong_no_hashes,oldstyle)\r
+        self._test_bad(self.create_ohlist_wrong_no_root_hash,oldstyle)\r
+        self._test_bad(self.create_ohlist_wrong_bad_offset,oldstyle)\r
+        self._test_bad(self.create_ohlist_wrong_bad_hash,oldstyle)\r
+        # TODO: need working peer kicking for that\r
+        ##self._test_bad(self.create_bad_chunk,oldstyle)\r
+    \r
+    #\r
+    # Main test code for bad EXTEND handshake messages\r
+    #\r
+    def _test_bad(self,msg_gen_func,oldstyle):\r
+        print >>sys.stderr,"test: test_BAD: Create EXTEND HS",`msg_gen_func`,oldstyle\r
+        if oldstyle:\r
+            options = None\r
+            exthsmsg = self.create_good_tribler_extend_hs()\r
+        else:\r
+            options = '\x00\x00\x00\x00\x00\x10\x00\x00'\r
+            exthsmsg = self.create_good_nontribler_extend_hs()\r
+        \r
+        \r
+        s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=self.infohash)\r
+        s.send(exthsmsg)\r
+        s.read_handshake_medium_rare()\r
+\r
+        # Tribler should send an EXTEND message back\r
+        try:\r
+            print >>sys.stderr,"test: Waiting for reply"\r
+            s.s.settimeout(10.0)\r
+            resp = s.recv()\r
+            self.assert_(len(resp) > 0)\r
+            print >>sys.stderr,"test: Got reply",getMessageName(resp[0])\r
+            self.assert_(resp[0] == EXTEND)\r
+            self.check_tribler_extend_hs(resp[1:])\r
+            \r
+            # 1. Pretend we're seeder: send BITFIELD and UNCHOKE\r
+            msg = BITFIELD+self.seederbitfieldstr\r
+            s.send(msg)\r
+            msg = UNCHOKE\r
+            s.send(msg)\r
+            print >>sys.stderr,"test: Pretend we are seeder"\r
+            while True:\r
+                resp = s.recv()\r
+                self.assert_(len(resp) > 0)\r
+                print >>sys.stderr,"test: Got reply 2",getMessageName(resp[0])\r
+                self.assert_(resp[0] == REQUEST or resp[0] == INTERESTED or resp[0] == UNCHOKE or resp[0] == HAVE or resp[0] == NOT_INTERESTED)\r
+                if resp[0] == REQUEST:\r
+                    chunkid = self.check_request(resp)\r
+            \r
+                    # 2. Reply to REQUEST with *bad* HASHPIECE\r
+                    msg = msg_gen_func(chunkid)\r
+                    if oldstyle:\r
+                        if len(msg) == 1:\r
+                            msg = ''\r
+                        else:\r
+                            msg = msg[1:] # Strip EXTEND byte\r
+                    s.send(msg)\r
+                    break\r
+                    \r
+            \r
+            #s.close()\r
+        except socket.timeout:\r
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply in time"\r
+            self.assert_(False)\r
+\r
+\r
+        time.sleep(3)\r
+        # Should have closed the connection\r
+        try:\r
+            s.send(UNCHOKE)\r
+            self.assert_(False)\r
+        except:\r
+            print_exc()\r
+\r
+        s.close()\r
+            \r
+\r
+    #\r
+    # Bad message creators (all create Merkle BEP style, I strip first byte\r
+    # later for oldstyle\r
+    # \r
+    def create_empty(self,chunkid):\r
+        return EXTEND\r
+\r
+    def create_ext_id_not_byte(self,chunkid):\r
+        return EXTEND+'Hallo kijkbuiskinderen'\r
+\r
+    def create_not_hashpiece(self,chunkid):\r
+        index,begin,length = chunkid\r
+        ohlist = []\r
+        bohlist = bencode(ohlist)\r
+        chunk = self.read_chunk(index,begin,length)\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+chr(231)+payload\r
+\r
+    def create_not_index(self,chunkid):\r
+        payload = 'bla'\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_not_begin(self,chunkid):\r
+        index,begin,length = chunkid\r
+        payload = tobinary(index)+'bla'\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_not_len_bohlist(self,chunkid):\r
+        index,begin,length = chunkid\r
+        payload = tobinary(index)+tobinary(begin)+'bla'\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_ohlist_not_bdecodable(self,chunkid):\r
+        index,begin,length = chunkid\r
+        bohlist = 'bla'\r
+        chunk = '*' * (2 ** 14)\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_ohlist_wrong_no_hashes(self,chunkid):\r
+        index,begin,length = chunkid\r
+        ohlist = [ (0,'#' * 20),(1,'$' * 20)]  # should contain 3 for file2.wmv: own, sibling and root\r
+        bohlist = bencode(ohlist)\r
+        chunk = '*' * (2 ** 14)\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_ohlist_wrong_no_root_hash(self,chunkid):\r
+        index,begin,length = chunkid\r
+        ohlist = self.tree.get_hashes_for_piece(index)\r
+        newohlist = []\r
+        # Remove root hash\r
+        for oh in ohlist:\r
+            if oh[0] != 0:\r
+                newohlist.append(oh)\r
+        ohlist = newohlist\r
+        bohlist = bencode(ohlist)\r
+        chunk = self.read_chunk(index,begin,length)\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+\r
+    def create_ohlist_wrong_bad_offset(self,chunkid):\r
+        index,begin,length = chunkid\r
+        ohlist = self.tree.get_hashes_for_piece(index)\r
+        ohlist[1][0] = 481\r
+        bohlist = bencode(ohlist)\r
+        chunk = self.read_chunk(index,begin,length)\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_ohlist_wrong_bad_hash(self,chunkid):\r
+        index,begin,length = chunkid\r
+        ohlist = self.tree.get_hashes_for_piece(index)\r
+        ohlist[1][1] = '$' * 20\r
+        bohlist = bencode(ohlist)\r
+        chunk = self.read_chunk(index,begin,length)\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+    def create_bad_chunk(self,chunkid):\r
+        index,begin,length = chunkid\r
+        ohlist = self.tree.get_hashes_for_piece(index)\r
+        bohlist = bencode(ohlist)\r
+        chunk = '*' * length\r
+        payload = tobinary(index)+tobinary(begin)+tobinary(len(bohlist))+bohlist+chunk\r
+        return EXTEND+HASHPIECE+payload\r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    # We should run the tests in a separate Python interpreter to prevent \r
+    # problems with our singleton classes, e.g. PeerDB, etc.\r
+    if len(sys.argv) != 2:\r
+        print "Usage: python test_merkle_msg.py <method name>"\r
+    else:\r
+        suite.addTest(TestMerkleMessage(sys.argv[1]))\r
+    \r
+    return suite\r
+\r
+def main():\r
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])\r
+\r
+if __name__ == "__main__":\r
+    main()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_merkle_msg.sh b/instrumentation/next-share/BaseLib/Test/test_merkle_msg.sh
new file mode 100755 (executable)
index 0000000..fae4067
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_merkle_msg.py singtest_good_hashpiece_bepstyle
+python test_merkle_msg.py singtest_good_hashpiece_oldstyle
+python test_merkle_msg.py singtest_good_request_bepstyle
+python test_merkle_msg.py singtest_bad_hashpiece_bepstyle
+python test_merkle_msg.py singtest_bad_hashpiece_oldstyle
diff --git a/instrumentation/next-share/BaseLib/Test/test_miscutils.py b/instrumentation/next-share/BaseLib/Test/test_miscutils.py
new file mode 100644 (file)
index 0000000..081c830
--- /dev/null
@@ -0,0 +1,10 @@
+
+from BaseLib.Core.APIImplementation.miscutils import parse_playtime_to_secs
+
+assert parse_playtime_to_secs("0") == 0.0
+assert parse_playtime_to_secs("0.1") == 0.1
+assert parse_playtime_to_secs("1:00") == 60.0
+assert parse_playtime_to_secs("1:0.3") == 60.3
+assert parse_playtime_to_secs("10:00") == 600.0
+assert parse_playtime_to_secs("10:56:11") == 39371.00
+assert parse_playtime_to_secs("10:56:11.77") == 39371.77
diff --git a/instrumentation/next-share/BaseLib/Test/test_multicast.py b/instrumentation/next-share/BaseLib/Test/test_multicast.py
new file mode 100644 (file)
index 0000000..48b0fd7
--- /dev/null
@@ -0,0 +1,286 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+#
+
+# Arno, 2009-04-16: 
+# - You should also test whether Tribler responds correctly to multicast 
+#   messages sent directly from a socket, and not via your code.
+#
+# - Some IPv6 tests will fail on Win32 if IPv6 is not installed.
+#
+#
+
+import unittest
+import tempfile
+import select
+
+from BaseLib.Core.Multicast import *
+
+
+# class MyLoggerTest(unittest.TestCase):
+
+#     """
+#     Test the MyLogger class
+   
+#     """
+
+#     def setUp(self):
+#         self.log = MyLogger()
+#         self.conn = BTConnection('localhost',self.hisport)
+
+#     def testLog(self):
+
+#         self.log.debug("DEBUG message")
+
+#         self.log.info("INFO message")
+
+#         self.log.warning("WARNING message")
+
+#         self.log.fatal("FATAL message")
+
+#         try:
+#             raise Exception("Exception text")
+#         except:
+#             self.log.exception("Should have a traceback below here:")
+
+
+class FakeOverlayBridge:
+    def add_task(self, function, data):
+        function()
+
+class FakePeerDBHandler:
+    
+    def addPeer(self, *args, **kargs):
+        pass
+    
+    def setPeerLocalFlag(self, *args):
+        pass
+    
+
+class TestUDPServer(threading.Thread):
+
+    def __init__(self, socket, mc_channel):
+        threading.Thread.__init__(self)
+        
+        self.socket = socket
+        self.mc_channel = mc_channel
+        self.running = True
+        
+    def run(self):
+        
+        while self.running:
+            try:
+                if select.select([self.socket],[],[], 1)[0]:
+                    (data, addr) = self.socket.recvfrom(1500)
+                    self.mc_channel.data_came_in(addr, data)
+            except Exception,e:
+                print e
+                
+
+    def stop(self):
+        self.running = False
+    
+
+class MulticastTest(unittest.TestCase):
+
+    """
+    Test multicast class
+
+    """
+    
+    def __init__(self, param):
+        unittest.TestCase.__init__(self, param)
+        
+        #TestAsServer.__init__(self, param)
+        self.test_server = None
+        self.overlay_bridge = FakeOverlayBridge()
+        self.peer_db = FakePeerDBHandler()
+        
+    def prepare_test(self, config, capabilitites=None):
+        """
+        Cannot be done by setUp as we need special config
+        """
+
+        self.multicast = Multicast(config, self.overlay_bridge, 1234, 1, self.peer_db,
+                                   capabilities=capabilitites)
+        
+        self.test_server = TestUDPServer(self.multicast.getSocket(),
+                                         self.multicast)
+        self.test_server.start()
+        
+    def tearDown(self):
+        if self.test_server is not None:
+            self.test_server.stop()
+        self.multicast = None
+        
+        
+    def testIPv4(self):
+
+        # Dummy config
+        config = {'permid':'123',
+                  'hostname':'myhostname',
+                  'port':'1234',
+                  'multicast_ipv4_address':'224.0.1.43',
+                  'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                  'multicast_port':'6124',
+                  'multicast_enabled':True,
+                  'multicast_ipv4_enabled':True,
+                  'multicast_ipv6_enabled':False,
+                  'multicast_announce':True}
+        
+        self.prepare_test(config)
+        
+        failed = True
+        seen = 0
+        for (permid, addr, capabilities) in self.multicast.discoverNodes():
+            if permid == '123':
+                failed = False
+        if failed:
+            raise Exception("Didn't discover myself using IPv4")
+
+    
+    def testIPv6(self):
+
+        # Dummy config
+        config = {'permid':'123',
+                  'multicast_ipv4_address':'224.0.1.43',
+                  'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                  'multicast_port':'6124',
+                  'multicast_enabled':True,
+                  'multicast_ipv4_enabled':False,
+                  'multicast_ipv6_enabled':True,
+                  'multicast_announce':True}
+        
+        self.prepare_test(config)
+        failed = True
+        for (permid, addr, capabilities) in self.multicast.discoverNodes():
+            if permid == '123':
+                failed = False
+        if failed:
+            raise Exception("Didn't discover myself using IPv6")
+
+    def testBoth(self):
+
+        # Dummy config
+        config = {'permid':'123',
+                  'multicast_ipv4_address':'224.0.1.43',
+                  'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                  'multicast_port':'6124',
+                  'multicast_enabled':True,
+                  'multicast_ipv4_enabled':True,
+                  'multicast_ipv6_enabled':True,
+                  'multicast_announce':True}
+        
+        self.prepare_test(config)
+
+        seen = 0
+        for (permid, addr, capabilities) in self.multicast.discoverNodes():
+            if permid == '123':
+                seen += 1
+        if seen < 2:
+            raise Exception("Didn't discover myself enough using both (saw me %d times, expected 2)"%seen)
+
+    def testAllDisabled(self):
+
+        # Dummy config
+        config = {'permid':'123',
+                  'multicast_ipv4_address':'224.0.1.43',
+                  'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                  'multicast_port':'6124',
+                  'multicast_ipv4_enabled':False,
+                  'multicast_ipv6_enabled':False,
+                  'multicast_announce':True}
+        
+        self.prepare_test(config)
+
+        try:
+            if len(self.multicast.discoverNodes()) > 0:
+                raise Exception("Discovered nodes even though multicast is not allowed")
+        except:
+            # Expected
+            pass
+
+
+    def testAnnounce(self):
+
+        # Dummy config
+        config = {'permid':'123',
+                  'multicast_ipv4_address':'224.0.1.43',
+                  'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                  'multicast_port':'6124',
+                  'multicast_enabled':True,
+                  'multicast_ipv4_enabled':True,
+                  'multicast_ipv6_enabled':True,
+                  'multicast_announce':True}
+        
+        self.prepare_test(config)
+
+        # Handle the announce
+        self.announces = []
+        self.multicast.addAnnounceHandler(self.handleAnnounce)
+        self.multicast.sendAnnounce(['elem1','elem2'])
+
+        # Wait for asynchronous handling
+        time.sleep(2.0)
+
+        for announce in self.announces:
+            if announce == ['123', 'elem1', 'elem2']:
+                return # Got it
+
+        raise Exception("Failed to get announce")
+    def handleAnnounce(self, permid, addr, list):
+
+        """
+        Handle announce callback function
+        """
+        self.announces.append([permid] + list)
+
+    def testCapabilities(self):
+        """
+        Test capabilities thingy
+        """
+
+        myCapabilities = ["Something", "something else", "something totally different"]
+        
+        # Dummy config
+        config = {'permid':'testCapabilities',
+                  'multicast_ipv4_address':'224.0.1.43',
+                  'multicast_ipv6_address':'ff02::4124:1261:ffef',
+                  'multicast_port':'6124',
+                  'multicast_enabled':True,
+                  'multicast_ipv4_enabled':False,
+                  'multicast_ipv6_enabled':True,
+                  'multicast_announce':True}
+        
+        self.prepare_test(config, myCapabilities)
+
+        failed = True
+        for (permid, addr, capabilities) in self.multicast.discoverNodes():
+            if permid == config['permid']:
+                failed = False
+                if capabilities != myCapabilities:
+                    raise Exception("Got bad capabilities, got %s, expected %s"%(str(capabilities), str(myCapabilities)))
+                
+        if failed:
+            raise Exception("Didn't discover myself using IPv6")
+        
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(MulticastTest))
+    
+    return suite
+
+
+        
+if __name__ == "__main__":
+
+    # TODO: Multicast does gives us multiple hits for ourselves, is that ok?
+
+    print "Testing the Multicast classes"
+
+    unittest.main()
+
+    print "All done"
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_na_extend_hs.py b/instrumentation/next-share/BaseLib/Test/test_na_extend_hs.py
new file mode 100644 (file)
index 0000000..41ca457
--- /dev/null
@@ -0,0 +1,184 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# TODO: Let Tribler initiate a BT connection to us. We then pretend to be old client
+# and then he should iniate an OL connection to us.
+#
+
+import unittest
+import sys
+import time
+import socket
+from traceback import print_exc
+from types import DictType,IntType
+
+from BaseLib.Test.test_extend_hs import TestExtendHandshake
+from btconn import BTConnection
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.BT1.track import compact_ip
+from BaseLib.Core.Utilities.Crypto import sha
+
+DEBUG=True
+
+class TestNetworkAwareExtendHandshake(TestExtendHandshake):
+    """ 
+    Test our network awareness code that tries to detect if we're behind
+    the same NAT and if so, connect via the internal network.
+    
+    See BitTornado/BT1/Connecter.py
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestExtendHandshake.setUp(self)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestExtendHandshake.setUpPreSession(self)
+    
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestExtendHandshake.setUpPostSession(self)
+
+        # Create a fake "internal network interface"
+        self.setUpMyListenSocket()
+
+        self.myid = "R------andomPeer4811"
+        
+    def setUpDownloadConfig(self):
+        dscfg = TestExtendHandshake.setUpDownloadConfig(self)
+        
+        dscfg.set_same_nat_try_internal(True)
+        dscfg.set_unchoke_bias_for_internal(481)
+        return dscfg
+
+    def setUpMyListenSocket(self):
+        self.destport = 4811
+        
+        # Start our server side, to with Tribler will try to connect
+        self.destss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.destss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.destss.bind(('', self.destport))
+        self.destss.listen(1)
+
+    def singtest_ext_ip_unknown(self):
+        """ Send EXTEND hs to Tribler with yourip set to 127.0.0.1, so it
+        appears we are using the same IP address. Tribler doesn't know its
+        own external IP address, so it will do a loopback test to yourip
+        to check. If this check succeeds it will connect to us via the 
+        internal network, i.e., our internal interface self.destss.
+        """
+        self.session.lm.upnp_ext_ip = None
+        self.subtest_good_tribler_extend_hs()
+       
+        
+    def singtest_ext_ip_known(self):
+        """ Same as singtest_ext_ip_unknown() except no loopback test is needed
+        as Tribler knows its external IP and it will be the same as the sent yourip.
+        """
+        self.session.lm.upnp_ext_ip = '127.0.0.1'
+        self.subtest_good_tribler_extend_hs()
+        
+        
+    #
+    # Good EXTEND handshake message
+    #
+    def subtest_good_tribler_extend_hs(self):
+        self._test_good(self.create_good_tribler_extend_hs,infohash=self.infohash)
+        
+    def _test_good(self,msg_gen_func,options=None,infohash=None):
+        
+        print >>sys.stderr,"test: test good, gen_func",msg_gen_func
+        
+        if options is None and infohash is None:
+            s = BTConnection('localhost',self.hisport,myid=self.myid)
+        elif options is None:
+            s = BTConnection('localhost',self.hisport,user_infohash=infohash,myid=self.myid)
+        elif infohash is None:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options,myid=self.myid)
+        else:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=infohash,myid=self.myid)
+        msg = msg_gen_func()
+        s.send(msg)
+        s.read_handshake_medium_rare()
+        time.sleep(5)
+
+        # Tribler should send an EXTEND message back
+        try:
+            s.s.settimeout(10.0)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            print >>sys.stderr,"test: Got reply",getMessageName(resp[0])
+            self.assert_(resp[0] == EXTEND)
+            self.check_tribler_extend_hs(resp[1:])
+            #s.close()
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with EXTEND message"
+            self.assert_(False)
+
+        # Tribler should try to connect to our internal interface
+        self.destss.settimeout(10.0)
+        conn, addr = self.destss.accept()
+        s2 = BTConnection('',0,conn,user_infohash=self.infohash,myid=self.myid)
+        s2.send(INTERESTED)
+        s2.read_handshake_medium_rare()
+        
+        # Is it him?
+        self.assert_(s.hisid == s2.hisid)
+
+        # He should close original conn
+        try:
+            while True:
+                resp = s.recv()
+                if len(resp) > 0:
+                    print >>sys.stderr,"test: Got data on internal conn",getMessageName(resp[0])
+                else:
+                    break
+        except socket.timeout:
+            self.assert_(False)
+                
+        self.assert_(True)
+        
+
+    def create_good_tribler_extend_hs(self):
+        d = {}
+        d['m'] = {'Tr_OVERLAYSWARM':253}
+        d['p'] = self.mylistenport
+        d['v'] = 'Tribler 4.5.0'
+        d['yourip'] = compact_ip('127.0.0.1')
+        d['ipv4'] = compact_ip('224.4.8.1')
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+
+    def check_tribler_extend_hs(self,data):
+        self.assert_(data[0] == chr(0))
+        d = bdecode(data[1:])
+        self.assert_(type(d) == DictType)
+        self.assert_('m' in d.keys())
+        m = d['m']
+        self.assert_(type(m) == DictType)
+        self.assert_('Tr_OVERLAYSWARM' in m.keys())
+        val = m['Tr_OVERLAYSWARM']
+        self.assert_(type(val) == IntType)
+        self.assert_(val == 253)
+        
+        print >>sys.stderr,"test: Reply is",`d`
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_na_extend_hs.py <method name>"
+    else:
+        suite.addTest(TestNetworkAwareExtendHandshake(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_na_extend_hs.sh b/instrumentation/next-share/BaseLib/Test/test_na_extend_hs.sh
new file mode 100644 (file)
index 0000000..b55d0bb
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_na_extend_hs.py singtest_ext_ip_known
+python test_na_extend_hs.py singtest_ext_ip_unknown
diff --git a/instrumentation/next-share/BaseLib/Test/test_natcheck.py b/instrumentation/next-share/BaseLib/Test/test_natcheck.py
new file mode 100644 (file)
index 0000000..6df5670
--- /dev/null
@@ -0,0 +1,74 @@
+# Written by Arno Bakker, Boudewijn Schoon
+# see LICENSE.txt for license information
+
+import sys
+import time
+import unittest
+import socket
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.bencode import bdecode
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler
+
+from olconn import OLConnection
+from test_crawler import TestCrawler
+
+DEBUG=True
+
+class TestNatCheck(TestCrawler):
+    """ 
+    Testing Nat-Check statistics gathering using the Crawler framework
+    """
+
+    def test_all(self):
+        """
+        I want to start a Tribler client once and then connect to it
+        many times. So there must be only one test method to prevent
+        setUp() from creating a new client every time.
+
+        The code is constructed so unittest will show the name of the
+        (sub)test where the error occured in the traceback it prints.
+        """
+        self.subtest_valid_nat_check()
+        
+    def subtest_valid_nat_check(self):
+        """
+        Send a CRAWLER_NATCHECK message to the Tribler instance. A
+        reply containing a nat type should be returned.
+        """
+        print >>sys.stderr, "-"*80, "\ntest: subtest_valid_nat_check"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport, mylistenport=self.listen_port)
+        self.send_crawler_request(s, CRAWLER_NATCHECK, 42, 0, "")
+        s.close()
+
+        if DEBUG: print >>sys.stderr, "test_natcheck: the nat-check code allows for a 10 minute delay in reporting the nat stats"
+        self.listen_socket.settimeout(11 * 60)
+
+        # wait for reply
+        try:
+            conn, addr = self.listen_socket.accept()
+        except socket.timeout:
+            if DEBUG: print >> sys.stderr,"test_natcheck: timeout, bad, peer didn't connect to send the crawler reply"
+            assert False, "test_natcheck: timeout, bad, peer didn't connect to send the crawler reply"
+        s = OLConnection(self.my_keypair, "", 0, conn, mylistenport=self.listen_port)
+
+        # read reply
+        error, payload = self.receive_crawler_reply(s, CRAWLER_NATCHECK, 42)
+        assert error == 0
+        if DEBUG: print >>sys.stderr, "test_natcheck:", bdecode(payload)
+
+        time.sleep(1)
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestNatCheck))
+    return suite
+
+if __name__ == "__main__":
+    unittest.main(defaultTest="test_suite")
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_osutils.py b/instrumentation/next-share/BaseLib/Test/test_osutils.py
new file mode 100644 (file)
index 0000000..d6f7f6a
--- /dev/null
@@ -0,0 +1,75 @@
+import os
+import sys
+import unittest
+
+if os.path.exists('test_osutils.py'):
+    BASE_DIR = '..'
+    sys.path.insert(1, os.path.abspath('..'))
+elif os.path.exists('LICENSE.txt'):
+    BASE_DIR = '.'
+    
+from BaseLib.Core.osutils import fix_filebasename
+
+fix_filebasename
+
+class Test_OsUtils(unittest.TestCase):
+    def test_fix_filebasename(self):
+        default_name = '_'
+        win_name_table = {
+          'abcdef':'abcdef',
+          '.':default_name,
+          '..':default_name,
+          '':default_name,
+          ' ':default_name,
+          '   ':default_name,
+          os.path.join('a','b'):'a_b',
+          '\x5c\x61':'_a',    # \x5c = '\\'
+          '\x92\x97':'\x92\x97',
+          '\x5c\x5c':'__',
+          '\x5c\x61\x5c':'_a_',
+          '\x2f\x61':'_a',    # \x2f = '/'
+          '\x92\x97':'\x92\x97',
+          '\x2f\x2f':'__',
+          '\x2f\x61\x2f':'_a_',
+          'a'*300:'a'*255
+        }
+        for c in '"*/:<>?\\|':
+            win_name_table[c] = default_name
+            
+        linux_name_table = {
+          'abcdef':'abcdef',
+          '.':default_name,
+          '..':default_name,
+          '':default_name,
+          ' ':default_name,
+          '   ':default_name,
+          os.path.join('a','b'):'a_b',
+          '\x2f\x61':'_a',    # \x2f = '/'
+          '\x92\x97':'\x92\x97',
+          '\x2f\x2f':'__',
+          '\x2f\x61\x2f':'_a_',
+          'a'*300:'a'*255
+        }
+        
+        if sys.platform.startswith('win'):
+            name_table = win_name_table
+        else:
+            name_table = linux_name_table
+        
+        for name in name_table:
+            fixedname = fix_filebasename(name)
+            assert fixedname == name_table[name], (fixedname,name_table[name])
+            
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(Test_OsUtils))
+    
+    return suite
+        
+def main():
+    unittest.main(defaultTest='test_suite')
+
+    
+if __name__ == '__main__':
+    main()    
+                       
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/test_overlay_bridge.py b/instrumentation/next-share/BaseLib/Test/test_overlay_bridge.py
new file mode 100644 (file)
index 0000000..eb9e523
--- /dev/null
@@ -0,0 +1,77 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import sys
+import unittest
+import time
+import tempfile
+import os
+
+import BaseLib.Core.CacheDB.sqlitecachedb as sqlitecachedb  
+from BaseLib.Test.test_secure_overlay import TestSecureOverlay,Peer 
+from BaseLib.Core.Overlay.SecureOverlay import SecureOverlay
+from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+from BaseLib.Core.Utilities.utilities import show_permid_short
+
+class TestOverlayThreadingBridge(TestSecureOverlay):
+    
+    def setUp(self):
+        
+        print >>sys.stderr,"test: TestOverlayThreadingBridge.setUp()"
+
+        self.config_path = tempfile.mkdtemp()
+        config = {}
+        config['state_dir'] = self.config_path
+        config['torrent_collecting_dir'] = self.config_path
+        config['install_dir'] = os.path.join('..','..')
+        config['peer_icon_path'] = os.path.join(self.config_path,'peer_icons')
+        config['superpeer'] = False
+        sqlitecachedb.init(config, self.rawserver_fatalerrorfunc)
+        
+        secover1 = SecureOverlay.getInstance()
+        secover1.resetSingleton()
+        secover2 = SecureOverlay.getInstance()
+        secover2.resetSingleton()
+        
+        overbridge1 = OverlayThreadingBridge()
+        overbridge1.register_bridge(secover1,None)
+        overbridge1.resetSingleton()
+
+        overbridge2 = OverlayThreadingBridge()
+        overbridge2.register_bridge(secover2,None)
+        overbridge2.resetSingleton()
+
+        
+        self.peer1 = Peer(self,1234,overbridge1)
+        self.peer2 = Peer(self,5678,overbridge2)
+        self.peer1.start()
+        self.peer2.start()
+        self.wanted = False
+        self.wanted2 = False
+        self.got = False
+        self.got2 = False
+        self.first = True
+
+        print >>sys.stderr,"test: setUp: peer1 permid is",show_permid_short(self.peer1.my_permid)
+        print >>sys.stderr,"test: setUp: peer2 permid is",show_permid_short(self.peer2.my_permid)
+
+        time.sleep(2) # let server threads start
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_overlay_bridge.py <method name>"
+    else:
+        suite.addTest(TestOverlayThreadingBridge(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_overlay_bridge.sh b/instrumentation/next-share/BaseLib/Test/test_overlay_bridge.sh
new file mode 100755 (executable)
index 0000000..e241833
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+python test_overlay_bridge.py singtest_connect_dns_to_dead_peer
+python test_overlay_bridge.py singtest_connect_dns_to_live_peer
+python test_overlay_bridge.py singtest_connect_to_dead_peerA
+python test_overlay_bridge.py singtest_connect_to_dead_peerB
+python test_overlay_bridge.py singtest_connect_to_live_peer
+python test_overlay_bridge.py singtest_connect_twice_to_live_peer
+python test_overlay_bridge.py singtest_send_unopenedA
+python test_overlay_bridge.py singtest_send_unopenedB
+python test_overlay_bridge.py singtest_send_local_close
+python test_overlay_bridge.py singtest_send_remote_close
+python test_overlay_bridge.py singtest_send_opened
+python test_overlay_bridge.py singtest_close_unopened
+python test_overlay_bridge.py singtest_close_opened
+python test_overlay_bridge.py singtest_receive
+python test_overlay_bridge.py singtest_got_conn_incoming
+python test_overlay_bridge.py singtest_got_conn_outgoing
+python test_overlay_bridge.py singtest_got_conn_local_close
+python test_overlay_bridge.py singtest_got_conn_remote_close
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_permid.py b/instrumentation/next-share/BaseLib/Test/test_permid.py
new file mode 100644 (file)
index 0000000..93e0451
--- /dev/null
@@ -0,0 +1,348 @@
+# Written by Arno Bakker, Jie Yang, Bram Cohen
+# see LICENSE.txt for license information
+
+import unittest
+
+import sys
+import time
+from types import DictType, StringType
+
+from BaseLib.Test.test_as_server import TestAsServer
+from btconn import BTConnection
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import CHALLENGE,RESPONSE1,RESPONSE2
+from BaseLib.Core.Utilities.Crypto import sha
+from M2Crypto import EC
+
+DEBUG=False
+
+random_size = 1024
+
+class TestPermIDs(TestAsServer):
+    """ 
+    Testing PermID extension version 1
+    """
+    
+    #def setUp(self):
+        # """ inherited from TestAsServer """
+
+    #def tearDown(self):
+        # """ inherited from TestAsServer """
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        # 1. test good challenge/response
+        self.subtest_good_challenge_response2()
+
+        # 2. test various bad challenge messages
+        self.subtest_bad_chal_not_bdecodable()
+        self.subtest_bad_chal_too_short()
+        self.subtest_bad_chal_too_big()
+
+        # 3. test various bad response2 messages
+        self.subtest_bad_resp2_not_bdecodable()
+        self.subtest_bad_resp2_not_dict1()
+        self.subtest_bad_resp2_not_dict2()
+        self.subtest_bad_resp2_empty_dict()
+        self.subtest_bad_resp2_wrong_dict_keys()
+        self.subtest_bad_resp2_bad_cert()
+        self.subtest_bad_resp2_bad_peerid()
+        self.subtest_bad_resp2_bad_sig_input()
+        self.subtest_bad_resp2_too_short_randomB()
+        self.subtest_bad_resp2_too_short_randomA()
+        self.subtest_bad_resp2_wrong_randomB()
+        self.subtest_bad_resp2_wrong_randomA()
+        self.subtest_bad_resp2_sig_by_other_keypair()
+
+    #
+    # Good challenge/reponse
+    #
+    def subtest_good_challenge_response2(self):
+        """ 
+            test good challenge and response2 messages
+        """
+        print >> sys.stderr,"test: good challenge/response"
+        s = BTConnection('localhost',self.hisport)
+        s.read_handshake()
+        [rB,chal_data] = self.create_good_challenge()
+        s.send(chal_data)
+        resp1_data = s.recv()
+        self.assert_(resp1_data[0] == RESPONSE1)
+        resp1_dict = self.check_response1(resp1_data[1:],rB,s.get_my_id())
+        resp2_data = self.create_good_response2(rB,resp1_dict,s.get_his_id())
+        s.send(resp2_data)
+        time.sleep(10)
+        # the other side should not have closed the connection, as
+        # this is all valid, so this should not throw an exception:
+        s.send('bla')
+        s.close()
+
+    def create_good_challenge(self):
+        r = "".zfill(random_size)
+        return [r,self.create_challenge_payload(r)]
+
+    def create_good_response2(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = [rB,resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_challenge_payload(self,r):
+        return CHALLENGE+bencode(r)
+
+    def create_response2_payload(self,dict):
+        return RESPONSE2+bencode(dict)
+    
+    #
+    # Bad challenges
+    #    
+    def subtest_bad_chal_not_bdecodable(self):
+        self._test_bad_challenge(self.create_not_bdecodable_challenge)
+    
+    def subtest_bad_chal_too_short(self):
+        self._test_bad_challenge(self.create_too_short_challenge)
+
+    def subtest_bad_chal_too_big(self):
+        self._test_bad_challenge(self.create_too_big_challenge)
+
+    def _test_bad_challenge(self,gen_chal_func):
+        s = BTConnection('localhost',self.hisport)
+        s.read_handshake()
+        [rB,chal_data] = gen_chal_func()
+        s.send(chal_data)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        msg = s.recv()
+        self.assert_(len(msg)==0)
+        s.close()
+
+    def create_not_bdecodable_challenge(self):
+        r = "".zfill(random_size)
+        return [r,CHALLENGE+"hallo"]
+
+    def create_too_short_challenge(self):
+        r = "".zfill(random_size-1)  # too short
+        return [r,self.create_challenge_payload(r)]
+
+    def create_too_big_challenge(self):
+        r = "".zfill(random_size+1)  # too big
+        return [r,self.create_challenge_payload(r)]
+
+    #
+    # Bad response2
+    #    
+    def subtest_bad_resp2_not_bdecodable(self):
+        self._test_bad_response2(self.create_resp2_not_bdecodable)
+
+    def subtest_bad_resp2_not_dict1(self):
+        self._test_bad_response2(self.create_resp2_not_dict1)
+
+    def subtest_bad_resp2_not_dict2(self):
+        self._test_bad_response2(self.create_resp2_not_dict2)
+
+    def subtest_bad_resp2_empty_dict(self):
+        self._test_bad_response2(self.create_resp2_empty_dict)
+
+    def subtest_bad_resp2_wrong_dict_keys(self):
+        self._test_bad_response2(self.create_resp2_wrong_dict_keys)
+
+    def subtest_bad_resp2_bad_cert(self):
+        self._test_bad_response2(self.create_resp2_bad_cert)
+
+    def subtest_bad_resp2_bad_peerid(self):
+        self._test_bad_response2(self.create_resp2_bad_peerid)
+
+    def subtest_bad_resp2_bad_sig_input(self):
+        self._test_bad_response2(self.create_resp2_bad_sig_input)
+
+    def subtest_bad_resp2_too_short_randomB(self):
+        self._test_bad_response2(self.create_resp2_too_short_randomB)
+
+    def subtest_bad_resp2_too_short_randomA(self):
+        self._test_bad_response2(self.create_resp2_too_short_randomA)
+
+    def subtest_bad_resp2_wrong_randomB(self):
+        self._test_bad_response2(self.create_resp2_wrong_randomB)
+
+    def subtest_bad_resp2_wrong_randomA(self):
+        self._test_bad_response2(self.create_resp2_wrong_randomA)
+
+    def subtest_bad_resp2_sig_by_other_keypair(self):
+        self._test_bad_response2(self.create_resp2_sig_by_other_keypair)
+
+    def _test_bad_response2(self,gen_resp2_func):
+        print >> sys.stderr,"test: bad response2",gen_resp2_func
+        s = BTConnection('localhost',self.hisport)
+        s.read_handshake()
+        [rB,chal_data] = self.create_good_challenge()
+        s.send(chal_data)
+        resp1_data = s.recv()
+        self.assert_(resp1_data[0] == RESPONSE1)
+        resp1_dict = self.check_response1(resp1_data[1:],rB,s.get_my_id())
+        resp2_data = gen_resp2_func(rB,resp1_dict,s.get_his_id())
+        s.send(resp2_data)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        msg = s.recv()
+        self.assert_(len(msg)==0)
+        s.close()
+
+    def create_resp2_not_bdecodable(self,rB,resp1_dict,hisid):
+        return RESPONSE2+"bla"
+
+    def create_resp2_not_dict1(self,rB,resp1_dict,hisid):
+        resp2 = 481
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_not_dict2(self,rB,resp1_dict,hisid):
+        resp2 = []
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_empty_dict(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_wrong_dict_keys(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        resp2['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_bad_cert(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        resp2['A'] = hisid
+        sig_list = [rB,resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_bad_peerid(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        sig_list = [rB,resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_bad_sig_input(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_data = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_too_short_randomB(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = ['\x00\x00\x00\x00\x00\x30\x00\x00',resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_too_short_randomA(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = [rB,'\x00\x00\x00\x00\x00\x30\x00\x00',hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_wrong_randomB(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = ["wrong".zfill(random_size),resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    def create_resp2_wrong_randomA(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = [rB,"wrong".zfill(random_size),hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+
+    def create_resp2_sig_by_other_keypair(self,rB,resp1_dict,hisid):
+        resp2 = {}
+        resp2['certB'] = str(self.my_keypair.pub().get_der())
+        resp2['A'] = hisid
+        sig_list = [rB,resp1_dict['rA'],hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.his_keypair.sign_dsa_asn1(sig_hash))
+        resp2['SB'] = sig_asn1
+        return self.create_response2_payload(resp2)
+
+    #
+    # Utils
+    #
+    def check_response1(self,resp1_data,rB,myid):
+        resp1 = bdecode(resp1_data)
+        self.assert_(type(resp1) == DictType)
+        self.assert_(resp1.has_key('certA'))
+        self.assert_(resp1.has_key('rA'))
+        self.assert_(resp1.has_key('B'))
+        self.assert_(resp1.has_key('SA'))
+        # show throw exception when key no good
+        pubA = EC.pub_key_from_der(resp1['certA'])
+        rA = resp1['rA']
+        self.assert_(type(rA) == StringType)
+        self.assert_(len(rA) == random_size)
+        B = resp1['B']
+        self.assert_(type(B) == StringType)
+        self.assert_(B,myid)
+        SA = resp1['SA']
+        self.assert_(type(SA) == StringType)
+        # verify signature
+        sig_list = [rA,rB,myid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        self.assert_(pubA.verify_dsa_asn1(sig_hash,SA))
+        # Cannot resign the data with his keypair to double check. Signing
+        # appears to yield different, supposedly valid sigs each time.
+        return resp1
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestPermIDs))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_permid_response1.py b/instrumentation/next-share/BaseLib/Test/test_permid_response1.py
new file mode 100644 (file)
index 0000000..ab565eb
--- /dev/null
@@ -0,0 +1,435 @@
+# Written by Arno Bakker, Jie Yang
+# see LICENSE.txt for license information
+
+# This test checks how the Tribler code responds to bad RESPONSE1 messages.
+# To test this we would have to have Tribler connect to us, that is,
+# initiate the challenge response. As it is not trivial to let the client 
+# connect to another (us) we have written a different solution:
+#
+# 1. We create our own server listening to a given TCP port. 
+# 2. We create a bogus Encrypter/Connecter Connection object encapsulating a 
+#    normal TCP connection to our server.
+# 3. We pass the bogus Connection object to the permid.ChallengeResponse class 
+#    and tell it to initiate the C/R protocol.
+# 4. Our server responds with malformed RESPONSE1 messages.
+#
+import unittest
+
+import sys
+import socket
+import time
+from threading import Thread
+from types import DictType, StringType
+from traceback import print_exc
+from M2Crypto import EC
+
+from btconn import BTConnection
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import CHALLENGE,RESPONSE1,RESPONSE2
+import BaseLib.Core.Overlay.permid as permid
+from BaseLib.Core.Utilities.Crypto import sha
+
+DEBUG=False
+
+#
+# CAUTION: when a test is added to MyServer.test_all(), increase test_count and 
+# make sure the should_succeed flag to TestPermIDsResponse1.subtest_connect()
+# is set correctly.
+#
+test_count = 13
+
+random_size = 1024  # the number of random bytes in the C/R protocol
+
+class MyServer(Thread):
+    
+    def __init__(self,port,testcase):
+        Thread.__init__(self)
+        self.setDaemon(True)
+        self.testcase = testcase
+        self.port = port
+        self.my_keypair = EC.gen_params(EC.NID_sect233k1)
+        self.my_keypair.gen_key()
+
+        self.other_keypair = EC.gen_params(EC.NID_sect233k1)
+        self.other_keypair.gen_key()
+
+
+    def run(self):
+        try:
+            self.runReal()
+        except Exception,e:
+            print_exc()
+            self.testcase.assert_(False,str(e))
+
+    def runReal(self):
+        ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        ss.bind(('', self.port))
+        ss.listen(1)
+        self.test_all(ss)
+        ss.close()
+        print "myserver: Server thread ending"
+
+    def test_all(self,ss):
+        """
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        #
+        # CAUTION: when a test is added here, increase test_count and make
+        # sure the should_succeed flag to TestPermIDsResponse1.subtest_connect()
+        # is set correctly.
+        ## Good must be first
+        self.subtest_good_response1(ss)
+        self.subtest_bad_resp1_no_bdecoable(ss)
+        self.subtest_bad_resp1_not_dict1(ss)
+        self.subtest_bad_resp1_not_dict2(ss)
+        self.subtest_bad_resp1_empty_dict(ss)
+        self.subtest_bad_resp1_wrong_dict_keys(ss)
+        self.subtest_bad_resp1_bad_cert(ss)
+        self.subtest_bad_resp1_too_short_randomA(ss)
+        self.subtest_bad_resp1_bad_peerid(ss)
+        self.subtest_bad_resp1_bad_sig_input(ss)
+        self.subtest_bad_resp1_too_short_randomB(ss)
+        self.subtest_bad_resp1_wrong_randomB(ss)
+        self.subtest_bad_resp1_sig_by_other_key(ss)
+
+    def subtest_good_response1(self,ss):
+        self._test_response1(ss, self.create_good_response1,True)
+
+    def subtest_bad_resp1_no_bdecoable(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_no_bdecodable,False)
+    
+    def subtest_bad_resp1_not_dict1(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_not_dict1,False)
+
+    def subtest_bad_resp1_not_dict2(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_not_dict2,False)
+
+    def subtest_bad_resp1_empty_dict(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_empty_dict,False)
+
+    def subtest_bad_resp1_wrong_dict_keys(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_wrong_dict_keys,False)
+
+    def subtest_bad_resp1_bad_cert(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_bad_cert,False)
+
+    def subtest_bad_resp1_too_short_randomA(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_too_short_randomA,False)
+
+    def subtest_bad_resp1_bad_peerid(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_bad_peerid,False)
+
+    def subtest_bad_resp1_bad_sig_input(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_bad_sig_input,False)
+
+    def subtest_bad_resp1_too_short_randomB(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_too_short_randomB,False)
+
+    def subtest_bad_resp1_wrong_randomB(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_bad_randomB,False)
+
+    def subtest_bad_resp1_sig_by_other_key(self,ss):
+        self._test_response1(ss, self.create_bad_resp1_sig_by_other_key,False)
+
+    def _test_response1(self,ss,gen_resp1,good):
+        print >>sys.stderr,"test: myserver running:",gen_resp1
+        conn, addr = ss.accept()
+        s = BTConnection('',0,conn)
+        s.read_handshake_medium_rare()
+        # Read challenge
+        msg = s.recv()
+        self.testcase.assert_(msg[0] == CHALLENGE)
+        randomB = bdecode(msg[1:])
+        self.testcase.assert_(type(randomB) == StringType)
+        self.testcase.assert_(len(randomB) == random_size)
+        [randomA,resp1_data] = gen_resp1(randomB,s.get_his_id())
+        s.send(resp1_data)
+        if good:
+            # Read response2
+            msg = s.recv()
+            self.testcase.assert_(msg[0] == RESPONSE2)
+            self.check_response2(msg[1:],randomA,randomB,s.get_my_id())
+            # the connection should be intact, so this should not throw an
+            # exception:
+            time.sleep(5)
+            s.send('bla')
+            s.close()
+        else:
+            time.sleep(5)
+            # the other side should not our bad RESPONSE1 this and close the 
+            # connection
+            msg = s.recv()
+            self.testcase.assert_(len(msg)==0)
+            s.close()
+
+
+    def create_good_response1(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_response1_payload(self,dict):
+        return RESPONSE1+bencode(dict)
+
+    def check_response2(self,resp2_data,rA,rB,myid):
+        resp2 = bdecode(resp2_data)
+        self.testcase.assert_(type(resp2) == DictType)
+        self.testcase.assert_(resp2.has_key('certB'))
+        self.testcase.assert_(resp2.has_key('A'))
+        self.testcase.assert_(resp2.has_key('SB'))
+        # show throw exception when key no good
+        pubB = EC.pub_key_from_der(resp2['certB'])
+        A = resp2['A']
+        self.testcase.assert_(type(A) == StringType)
+        self.testcase.assert_(A,myid)
+        SB = resp2['SB']
+        self.testcase.assert_(type(SB) == StringType)
+        # verify signature
+        sig_list = [rB,rA,myid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        self.testcase.assert_(pubB.verify_dsa_asn1(sig_hash,SB))
+        # Cannot resign the data with his keypair to double check. Signing
+        # appears to yield different, supposedly valid sigs each time.
+
+    def create_bad_resp1_no_bdecodable(self,rB,hisid):
+        r = "".zfill(random_size)
+        return [r,RESPONSE1+'bla']
+
+    def create_bad_resp1_not_dict1(self,rB,hisid):
+        resp1 = 481
+        r = "".zfill(random_size)
+        return [r,self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_not_dict2(self,rB,hisid):
+        resp1 = []
+        r = "".zfill(random_size)
+        return [r,self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_empty_dict(self,rB,hisid):
+        resp1 = {}
+        r = "".zfill(random_size)
+        return [r,self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_wrong_dict_keys(self,rB,hisid):
+        resp1 = {}
+        resp1['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        resp1['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        r = "".zfill(random_size)
+        return [r,self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_bad_cert(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_too_short_randomA(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_bad_peerid(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_bad_sig_input(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_too_short_randomB(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],'\x00\x00\x00\x00\x00\x30\x00\x00',hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+    def create_bad_resp1_bad_randomB(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],"wrong".zfill(random_size),hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.my_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+
+    def create_bad_resp1_sig_by_other_key(self,rB,hisid):
+        resp1 = {}
+        resp1['certA'] = str(self.my_keypair.pub().get_der())
+        resp1['rA'] = "".zfill(random_size)
+        resp1['B'] = hisid
+        sig_list = [resp1['rA'],rB,hisid]
+        sig_data = bencode(sig_list)
+        sig_hash = sha(sig_data).digest()
+        sig_asn1 = str(self.other_keypair.sign_dsa_asn1(sig_hash))
+        resp1['SA'] = sig_asn1
+        return [resp1['rA'],self.create_response1_payload(resp1)]
+
+#
+# Proxy class to fool the ChallengeReponse class
+#
+
+class EncrypterConnection:
+    def __init__(self,myid):
+        self.id = myid
+
+class ConnecterConnection:
+
+    def __init__(self,port):
+        self.s = BTConnection('localhost',port)
+        self.s.read_handshake_medium_rare()
+        self.connection = EncrypterConnection(self.s.get_his_id())
+
+    def get_my_id(self):
+        return self.s.get_my_id()
+
+    def get_unauth_peer_id(self):
+        return self.s.get_his_id()
+
+    def is_locally_initiated(self):
+        return True
+
+    def send_message(self,msg):
+        self.s.send(msg)
+        
+    def get_message(self):
+        return self.s.recv()
+
+    def set_permid(self,x):
+        pass
+
+    def set_auth_peer_id(self,x):
+        pass
+
+    def close(self):
+        self.s.close()
+
+class SecureOverlay:
+    def __init__(self):
+        pass
+
+    def got_auth_connection(self,singsock,permid,peer_id):
+        pass
+
+#
+# The actual TestCase
+#
+class TestPermIDsResponse1(unittest.TestCase):
+    """ 
+    Testing PermID extension version 1, RESPONSE1 message.
+    """
+    
+    def setUp(self):
+        self.my_keypair = EC.gen_params(EC.NID_sect233k1)
+        self.my_keypair.gen_key()
+
+        self.server_port = 4810
+        self.server = MyServer(self.server_port,self)
+        self.server.start()
+        time.sleep(1) # allow server to start
+
+        self.overlay = SecureOverlay()
+
+    def tearDown(self):
+        pass
+    
+    def test_all(self):
+        """ 
+            I want to start my test server once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new server every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        for count in range(test_count):
+            if count == 0:
+                self.subtest_connect(True) # first test is good response1
+            else:
+                self.subtest_connect(False) # others are bad
+
+    def subtest_connect(self,should_succeed):
+        if DEBUG:
+            print "client: subtest_connect"
+        self.conn = ConnecterConnection(self.server_port)
+        self.myid = self.conn.get_my_id()
+
+        self.cr = permid.ChallengeResponse(self.my_keypair,self.myid,self.overlay)
+        self.cr.start_cr(self.conn)
+        resp1_data = self.conn.get_message()
+        success = self.cr.got_message(self.conn,resp1_data)
+        if success and should_succeed:
+            # Correct behaviour is to keep connection open.
+            # long enough for MyServer to test if the connection still exists
+            time.sleep(10) 
+            self.conn.close()
+        elif not success and not should_succeed:
+            # Correct behaviour is to close conn immediately.
+            self.conn.close()
+        elif success and not should_succeed:
+            # Correct behaviour is to keep connection open.
+            # Should have failed
+            self.assert_(False,"Tribler should not have accepted RESPONSE1")
+            time.sleep(10)  # Emulate we're still running
+            self.conn.close()
+        elif not success and should_succeed:
+            # Correct behaviour is to close conn immediately.
+            # Should have succeeded
+            self.assert_(False,"Tribler should have accepted RESPONSE1")
+            self.conn.close()
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestPermIDsResponse1))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_proxyservice.bat b/instrumentation/next-share/BaseLib/Test/test_proxyservice.bat
new file mode 100755 (executable)
index 0000000..c0e7bf4
--- /dev/null
@@ -0,0 +1,12 @@
+set PYTHONPATH=..\..\r
+\r
+python test_proxyservice.py singtest_good_2fast\r
+REM #python test_proxyservice.py singtest_bad_2fast_dlhelp\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_not_bdecodable\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_not_dict1\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_not_dict2\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_empty_dict\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_wrong_dict_keys\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent1\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent2\r
+REM #python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent3\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_proxyservice.py b/instrumentation/next-share/BaseLib/Test/test_proxyservice.py
new file mode 100644 (file)
index 0000000..e3d0db0
--- /dev/null
@@ -0,0 +1,446 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# Like test_secure_overlay, we start a new python interpreter for each test. 
+# Although we don't have the singleton problem here, we do need to do this as the
+# HTTPServer that MyTracker uses won't relinquish the listen socket, causing 
+# "address in use" errors in the next test. This is probably due to the fact that
+# MyTracker has a thread mixed in, as a listensocket.close() normally releases it
+# (according to lsof).
+#
+
+import unittest
+import os
+import sys
+import time
+from types import ListType
+import socket
+import hashlib
+
+from BaseLib.Test.test_as_server import TestAsServer
+from btconn import BTConnection
+from olconn import OLConnection
+from BaseLib.Core.RequestPolicy import AllowAllRequestPolicy
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.BT1.convert import toint
+from BaseLib.Core.CacheDB.CacheDBHandler import FriendDBHandler
+from BaseLib.Test.test_connect_overlay import MyTracker
+
+DEBUG=True
+
+class TestDownloadHelp(TestAsServer):
+    """ 
+    Testing download helping
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+        self.setUpMyListenSockets()
+        
+        # Must be changed in test/extend_hs_dir/proxyservice.test.torrent as well
+        self.mytrackerport = 4901
+        self.myid = 'R410-----HgUyPu56789'
+        self.mytracker = MyTracker(self.mytrackerport,self.myid,'127.0.0.1',self.mylistenport)
+        self.mytracker.background_serve()
+
+        self.myid2 = 'R410-----56789HuGyx0'
+        
+    def setUpMyListenSockets(self):
+        # Start our server side, to with Tribler will try to connect
+        # Seeder socket
+        self.mylistenport = 4810
+        self.myss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss.bind(('', self.mylistenport))
+        self.myss.listen(1)
+
+        # Leecher socket
+        self.mylistenport2 = 3726
+        self.myss2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss2.bind(('', self.mylistenport2))
+        self.myss2.listen(1)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())  
+        
+        # Calculating the infohash for proxyservice.test.torrent
+        self.torrentfile = os.path.join('extend_hs_dir','proxyservice.test.torrent')
+        
+        # Read torrentfile to calculate the infohash
+        torrentfile_content = open(self.torrentfile, "rb")
+        # Decode all the file
+        metainfo = bdecode(torrentfile_content.read())
+        # Re-encode only the info section
+        self.infohash = hashlib.sha1(bencode(metainfo['info'])).digest()
+        # Close the torrentfile
+        torrentfile_content.close()
+        
+        # Add us as friend, so he will accept the ASK_FOR_HELP
+        if False:  # TEMP
+            friendsdb = FriendDBHandler.getInstance()
+            friendsdb.addFriend(self.mypermid)
+        else:
+            self.session.set_overlay_request_policy(AllowAllRequestPolicy())
+          
+        self.session.set_download_states_callback(self.states_callback)
+
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+        self.mytracker.shutdown()
+        self.tearDownMyListenSockets()
+
+
+    def tearDownMyListenSockets(self):
+        self.myss.close()
+        self.myss2.close()
+
+
+    def states_callback(self,dslist):
+        print >>sys.stderr,"stats: dslist",len(dslist)
+        for ds in dslist:
+            print >>sys.stderr,"stats: coordinator",`ds.get_coopdl_coordinator()`
+            print >>sys.stderr,"stats: helpers",`ds.get_coopdl_helpers()`
+        print >>sys.stderr, ""
+        return (0.5,False)
+
+    # Creates dictionary with good commands
+    def get_genresdict(self):
+        genresdict = {}
+        genresdict[ASK_FOR_HELP] = (self.create_good_dlhelp,True)
+        genresdict[METADATA] = (self.create_good_metadata,True)
+        genresdict[PIECES_RESERVED] = (self.create_good_pieces_reserved,True)
+        genresdict[STOP_DOWNLOAD_HELP] = (self.create_good_stop_dlhelp,True)
+        return genresdict
+
+    #
+    # Good 2fast
+    #
+    def singtest_good_2fast(self):
+        # DEBUG
+        print "***"
+        print "*** running singtest_good_2fast"
+        print "***"
+        genresdict = self.get_genresdict()
+        print >>sys.stderr,"test: good ASK_FOR_HELP"
+        self._test_2fast(genresdict)
+    
+
+    #
+    # Bad 2fast
+    #
+    def singtest_bad_2fast_dlhelp(self):
+        genresdict = self.get_genresdict()
+        genresdict[ASK_FOR_HELP] = (self.create_bad_dlhelp_not_infohash,False)
+        print >>sys.stderr,"test: bad dlhelp"
+        self._test_2fast(genresdict)
+        
+    def singtest_bad_2fast_metadata_not_bdecodable(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_bdecodable,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_not_dict1(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict1,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_not_dict2(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict2,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    def singtest_bad_2fast_metadata_empty_dict(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_empty_dict,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_wrong_dict_keys(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_wrong_dict_keys,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_bad_torrent1(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent1,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    def singtest_bad_2fast_metadata_bad_torrent2(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent2,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+    def singtest_bad_2fast_metadata_bad_torrent3(self):
+        genresdict = self.get_genresdict()
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent3,False)
+        print >>sys.stderr,"test: bad METADATA",genresdict[METADATA][0]
+        self._test_2fast(genresdict)
+
+
+    
+    def _test_2fast(self,genresdict):
+        """ 
+            test ASK_FOR_HELP, METADATA, PIECES_RESERVED and STOP_DOWNLOAD_HELP sequence
+        """
+        # 1. Establish overlay connection to Tribler
+        ol_connection = OLConnection(self.my_keypair, 'localhost', self.hisport, mylistenport=self.mylistenport2)
+        
+        # Send ASK_FOR_HELP
+        (generate_data, sent_good_values) = genresdict[ASK_FOR_HELP]
+        msg = generate_data()
+        ol_connection.send(msg)
+        if sent_good_values:
+            resp = ol_connection.recv()
+            self.assert_(resp[0] == GET_METADATA)
+            self.check_get_metadata(resp[1:])
+            print >>sys.stderr,"test: Got GET_METADATA for torrent, sent_good_values"
+        else:
+            resp = ol_connection.recv()
+            self.assert_(len(resp)==0)
+            ol_connection.close()
+            return
+        
+        # Send METADATA
+        (generate_data,sent_good_values) = genresdict[METADATA]
+        msg = generate_data()
+        ol_connection.send(msg)
+
+        if sent_good_values:
+            # 2. Accept the data connection Tribler wants to establish with us, the coordinator
+            self.myss2.settimeout(10.0)
+            conn, addr = self.myss2.accept()
+            #(self,hostname,port,opensock=None,user_option_pattern=None,user_infohash=None,myid=None,mylistenport=None,myoversion=None):
+            bt_connection_2 = BTConnection('', 0, conn, user_infohash=self.infohash, myid=self.myid2)
+            bt_connection_2.read_handshake_medium_rare()
+            
+            msg = UNCHOKE
+            bt_connection_2.send(msg)
+            print >>sys.stderr,"test: Got data connection to us, as coordinator, sent_good_values"
+        else:
+            resp = ol_connection.recv()
+            self.assert_(len(resp)==0)
+            ol_connection.close()
+            return
+
+        # 3. Our tracker says there is another peer (also us) on port 4810
+        # Now accept a connection on that port and pretend we're a seeder
+        self.myss.settimeout(10.0)
+        conn, addr = self.myss.accept()
+        options = '\x00\x00\x00\x00\x00\x00\x00\x00'
+        bt_connection = BTConnection('', 0, conn, user_option_pattern=options, user_infohash=self.infohash, myid=self.myid)
+        bt_connection.read_handshake_medium_rare()
+        
+        # Get the number of pieces from the .torrent file
+        torrentfile_content = open(self.torrentfile, "rb")
+        metadata_dict = bdecode(torrentfile_content.read())
+        torrentfile_content.close()
+        if "length" in metadata_dict["info"]:
+            length = metadata_dict["info"]["length"]
+        else:
+            length = 0
+            for file in metadata_dict["info"]["files"]:
+                length += file["length"]
+        numpieces = length / metadata_dict["info"]["piece length"]
+        
+        bitf = Bitfield(numpieces)
+        for i in range(numpieces):
+            bitf[i] = True
+        self.assert_(bitf.complete())
+        msg = BITFIELD+bitf.tostring()
+        bt_connection.send(msg)
+        msg = UNCHOKE
+        bt_connection.send(msg)
+        print >>sys.stderr,"test: Got BT connection to us, as fake seeder, sent_good_values"
+
+        # 4. Await a RESERVE_PIECES message on the overlay connection
+        resp = ol_connection.recv()
+        self.assert_(resp[0] == RESERVE_PIECES)
+        pieces = self.check_reserve_pieces(resp[1:])
+        print >>sys.stderr,"test: Got RESERVE_PIECES, sent_good_values"
+
+        # 5. Reply with PIECES_RESERVED
+        (generate_data, sent_good_values) = genresdict[PIECES_RESERVED]
+        msg = generate_data(pieces)
+        ol_connection.send(msg)
+        
+        if sent_good_values:
+            # 6. Await REQUEST on fake seeder
+            while True:
+                resp = bt_connection.recv()
+                self.assert_(len(resp) > 0)
+                print "test: Fake seeder got message",getMessageName(resp[0])
+                if resp[0] == REQUEST:
+                    self.check_request(resp[1:],pieces)
+                    print >>sys.stderr,"test: Fake seeder got REQUEST for reserved piece, sent_good_values"
+                    break
+        else:
+            resp = ol_connection.recv()
+            self.assert_(len(resp)==0)
+            ol_connection.close()
+            return
+
+        # 7. Reply with STOP_DOWNLOAD_HELP
+        (generate_data, sent_good_values) = genresdict[STOP_DOWNLOAD_HELP]
+        msg = generate_data()
+        ol_connection.send(msg)
+
+        # the other side should close the connection, whether the msg was sent_good_values or bad
+        resp = ol_connection.recv()
+        self.assert_(len(resp)==0)
+        ol_connection.close()
+        
+
+    def create_good_dlhelp(self):
+        return ASK_FOR_HELP+self.infohash
+
+    def check_get_metadata(self,data):
+        infohash = bdecode(data) # is bencoded for unknown reason, can't change it =))
+        self.assert_(infohash == self.infohash)
+
+    def create_good_metadata(self):
+        f = open(self.torrentfile,"rb")
+        data = f.read()
+        f.close() 
+        
+        d = self.create_good_metadata_dict(data)
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_good_metadata_dict(self,data):
+        d = {}
+        d['torrent_hash'] = self.infohash 
+        d['metadata'] = data
+        d['leecher'] = 1
+        d['seeder'] = 1
+        d['last_check_time'] = int(time.time())
+        d['status'] = 'good'
+        return d
+
+    def check_reserve_pieces(self,data):
+        # torrent_hash + 1-byte all_or_nothing + bencode([piece num,...])
+        self.assert_(len(data) > 21)
+        infohash = data[0:20]
+        allflag = data[20]
+        plist = bdecode(data[21:])
+        
+        self.assert_(infohash == self.infohash)
+        self.assert_(type(plist) == ListType)
+        return plist
+
+    def create_good_pieces_reserved(self,pieces):
+        payload = self.infohash + bencode(pieces)
+        return PIECES_RESERVED + payload
+
+    def check_request(self,data,pieces):
+        piece = toint(data[0:4])
+        self.assert_(piece in pieces)
+
+    def create_good_stop_dlhelp(self):
+        return STOP_DOWNLOAD_HELP+self.infohash
+
+
+    #
+    # Bad ASK_FOR_HELP
+    #    
+
+    def create_bad_dlhelp_not_infohash(self):
+        return ASK_FOR_HELP+"481"
+
+    #
+    # Bad METADATA
+    #
+
+    def create_bad_metadata_not_bdecodable(self):
+        return METADATA+"bla"
+
+    def create_bad_metadata_not_dict1(self):
+        d  = 481
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_not_dict2(self):
+        d  = []
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_empty_dict(self):
+        d = {}
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_wrong_dict_keys(self):
+        d = {}
+        d['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        d['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_bad_torrent1(self):
+        d = self.create_good_metadata_dict(None)
+        d['metadata'] = '\x12\x34' * 100 # random data
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_bad_metadata_bad_torrent2(self):
+        torrent = {}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+    def create_bad_metadata_bad_torrent3(self):
+        torrent = {'info':481}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_dl.py <method name>"
+    else:
+        suite.addTest(TestDownloadHelp(sys.argv[1]))
+        # DEBUG
+        print "***"
+        print "*** Calling TestDownloadHelp with argument " + sys.argv[1]
+        print "***"
+
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_proxyservice.sh b/instrumentation/next-share/BaseLib/Test/test_proxyservice.sh
new file mode 100755 (executable)
index 0000000..e93054f
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/sh -x
+#
+# Written by George Milescu
+# see LICENSE.txt for license information
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_proxyservice.py singtest_good_2fast
+#python test_proxyservice.py singtest_bad_2fast_dlhelp
+#python test_proxyservice.py singtest_bad_2fast_metadata_not_bdecodable
+#python test_proxyservice.py singtest_bad_2fast_metadata_not_dict1
+#python test_proxyservice.py singtest_bad_2fast_metadata_not_dict2
+#python test_proxyservice.py singtest_bad_2fast_metadata_empty_dict
+#python test_proxyservice.py singtest_bad_2fast_metadata_wrong_dict_keys
+#python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent1
+#python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent2
+#python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent3
diff --git a/instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.bat b/instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.bat
new file mode 100644 (file)
index 0000000..51ae133
--- /dev/null
@@ -0,0 +1,20 @@
+REM\r
+REM Written by George Milescu\r
+REM see LICENSE.txt for license information\r
+REM\r
+REM We should run the tests in a separate Python interpreter to prevent \r
+REM problems with our singleton classes, e.g. SuperPeerDB, etc.\r
+REM\r
+\r
+set PYTHONPATH=..\..\r
+\r
+python test_proxyservice_as_coord.py singtest_good_proxy\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_ask_for_help\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_not_bdecodable\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_not_dict1\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_not_dict2\r
+python test_proxyservice_as_coord.py singtest_bad_2fast_metadata_empty_dict\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_wrong_dict_keys\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_bad_torrent1\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_bad_torrent2\r
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_bad_torrent3\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.py b/instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.py
new file mode 100644 (file)
index 0000000..bbeebc8
--- /dev/null
@@ -0,0 +1,517 @@
+# Written by Arno Bakker, George Milescu
+# see LICENSE.txt for license information
+#
+# Like test_secure_overlay, we start a new python interpreter for each test. 
+# Although we don't have the singleton problem here, we do need to do this as the
+# HTTPServer that MyTracker uses won't relinquish the listen socket, causing 
+# "address in use" errors in the next test. This is probably due to the fact that
+# MyTracker has a thread mixed in, as a listensocket.close() normally releases it
+# (according to lsof).
+#
+
+import unittest
+import os
+import sys
+import time
+import math
+from types import ListType
+import socket
+import hashlib
+import tempfile
+import string
+import random
+
+from BaseLib.Test.test_as_server import TestAsServer
+from btconn import BTConnection
+from olconn import OLConnection
+from BaseLib.Core.RequestPolicy import AllowAllRequestPolicy
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.bitfield import Bitfield
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.BitTornado.BT1.convert import toint
+from BaseLib.Core.CacheDB.CacheDBHandler import FriendDBHandler, TorrentDBHandler
+from BaseLib.Test.test_connect_overlay import MyTracker
+
+DEBUG=False
+
+class TestProxyServiceAsCoordinator(TestAsServer):
+    """ This class tests the ProxyService Helper stack. It simulates a coordinator and connects to the
+    helper instance, sending messages to it and verifying the received responses
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+
+        self.setUpMyListenSockets()
+        
+        # Must be changed in test/extend_hs_dir/proxyservice.test.torrent as well
+        self.mytrackerport = 4901
+        self.myid = 'R410-----HgUyPu56789'
+        self.mytracker = MyTracker(self.mytrackerport,self.myid,'127.0.0.1',self.mylistenport)
+        self.mytracker.background_serve()
+
+        self.myid2 = 'R410-----56789HuGyx0' # used for the coordinator
+        
+        # Arno, 2009-12-15: Make sure coop downloads have their own destdir
+        destdir = tempfile.mkdtemp()
+        self.config.set_download_help_dir(destdir)
+        
+        # Set the proxyservice to full speed
+        self.config.set_proxyservice_status(1) #PROXYSERVICE_ON=1
+    
+    def setUpMyListenSockets(self):
+        # Start our server side, to which Tribler will try to connect
+        # coordinator BitTorrent socket (the helper connects to this socket to sent BT messages with pieces requested by the coordinator)
+        self.mylistenport = 4810
+        self.myss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss.bind(('', self.mylistenport))
+        self.myss.listen(1)
+
+        # Leecher socket (the helper connects to this socket to download the pieces requested by the coordinator)
+        self.mylistenport2 = 3726
+        self.myss2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.myss2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        self.myss2.bind(('', self.mylistenport2))
+        self.myss2.listen(1)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())
+        
+        # Calculating the infohash for proxyservice.test.torrent
+        self.torrentfile = os.path.join('extend_hs_dir','proxyservice.test.torrent')
+        
+        # Read torrent file to calculate the infohash
+        torrentfile_content = open(self.torrentfile, "rb")
+        # Decode all the file
+        metainfo = bdecode(torrentfile_content.read())
+        # Calculate the torrent length
+        if "length" in metainfo["info"]:
+            self.length = metainfo["info"]["length"]
+        else:
+            self.length = 0
+            for f in metainfo["info"]["files"]:
+                self.length += f["length"]
+        # Re-encode only the info section
+        self.infohash = hashlib.sha1(bencode(metainfo['info'])).digest()
+        # Store the number of pieces
+        self.numpieces = int(math.ceil((self.length / metainfo["info"]["piece length"])))
+        # Close the torrentfile
+        torrentfile_content.close()
+        
+        # Add us as friend, so he will accept the ASK_FOR_HELP message
+        if False:  # TEMP: friendsdb doesn't have an addFriend method
+#            friendsdb = FriendDBHandler.getInstance()
+#            friendsdb.addFriend(self.mypermid)
+            pass
+        else:
+            # Accept overlay requests from anybody
+            self.session.set_overlay_request_policy(AllowAllRequestPolicy())
+            
+        self.session.set_download_states_callback(self.states_callback)
+        """
+        statedir=self.session.get_state_dir()
+        os.system('cp /tmp/Gopher.torrent ' + statedir + '/collected_torrent_files/Gopher.torrent')
+        """
+        
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+        self.mytracker.shutdown()
+        self.tearDownMyListenSockets()
+
+
+    def tearDownMyListenSockets(self):
+        self.myss.close()
+        self.myss2.close()
+
+
+    def states_callback(self,dslist):
+        print >>sys.stderr,"stats: dslist",len(dslist)
+        for ds in dslist:
+            print >>sys.stderr,"stats: coordinator",`ds.get_coopdl_coordinator()`
+            print >>sys.stderr,"stats: helpers",`ds.get_coopdl_helpers()`
+        print >>sys.stderr, ""
+        return (0.5,False)
+
+    # Creates dictionary with the correct (good) commands used by the coordinator to test the helper
+    def get_genresdict(self):
+        genresdict = {}
+        genresdict[ASK_FOR_HELP] = (self.create_good_ask_for_help,True)
+        genresdict[STOP_HELPING] = (self.create_good_stop_helping,True)
+        genresdict[REQUEST_PIECES] = (self.create_good_request_pieces,True)
+        genresdict[CANCEL_PIECE] = (self.create_good_cancel_piece,True)
+        # The helper will request the .torrent metadata
+        genresdict[METADATA] = (self.create_good_metadata,True)
+
+        return genresdict
+
+    #
+    # Good proxy messages
+    #
+    def singtest_good_proxy(self):
+        genresdict = self.get_genresdict()
+        print >> sys.stderr, "test: good ASK_FOR_HELP"
+        self._test_proxy(genresdict)
+    
+
+    #
+    # Bad proxy messages
+    #
+    def singtest_bad_proxy_ask_for_help(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad ASK_FOR_HELP message
+        genresdict[ASK_FOR_HELP] = (self.create_bad_ask_for_help_no_infohash,False)
+        print >> sys.stderr, "test: bad ask_for_help"
+        self._test_proxy(genresdict)
+        
+    def singtest_bad_proxy_metadata_not_bdecodable(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_not_bdecodable,False)
+        print >> sys.stderr, "test: bad METADATA (not bdecodable)", genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+    def singtest_bad_proxy_metadata_not_dict1(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict1,False)
+        print >> sys.stderr, "test: bad METADATA (not a dictionary)", genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+    def singtest_bad_proxy_metadata_not_dict2(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_not_dict2,False)
+        print >>sys.stderr,"test: bad METADATA (not a dictionary)",genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+    def singtest_bad_2fast_metadata_empty_dict(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_empty_dict,False)
+        print >>sys.stderr,"test: bad METADATA (empty dictionary)",genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+    def singtest_bad_proxy_metadata_wrong_dict_keys(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_wrong_dict_keys,False)
+        print >>sys.stderr,"test: bad METADATA (wrong keys in dictionary)",genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+    def singtest_bad_proxy_metadata_bad_torrent1(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent1,False)
+        print >>sys.stderr,"test: bad METADATA (wrong metadata field in dictionary)",genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+
+    def singtest_bad_proxy_metadata_bad_torrent2(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent2,False)
+        print >>sys.stderr,"test: bad METADATA (empty dictionary in metadata filed)",genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+    def singtest_bad_proxy_metadata_bad_torrent3(self):
+        # Get the correct messages used by the coordinator
+        genresdict = self.get_genresdict()
+        # Prepare a bad METADATA message
+        genresdict[METADATA] = (self.create_bad_metadata_bad_torrent3,False)
+        print >>sys.stderr,"test: bad METADATA (bad metadata field in dictionary)",genresdict[METADATA][0]
+        self._test_proxy(genresdict)
+
+
+    
+    def _test_proxy(self,genresdict):
+        """ Send messages to the helper instance and test it.
+            
+            Testing ASK_FOR_HELP, STOP_HELPING, REQUEST_PIECES, CANCEL_PIECE and METADATA
+        """
+        # 1. Establish overlay connection to Tribler
+        ol_connection = OLConnection(self.my_keypair, 'localhost', self.hisport, mylistenport=self.mylistenport2)
+        
+        # 2. Send the ASK_FOR_HELP message
+        (generate_data,sent_good_values) = genresdict[ASK_FOR_HELP]
+        msg = generate_data()
+        ol_connection.send(msg)
+        if sent_good_values:
+            # Read the helper's response
+            resp = ol_connection.recv()
+            # Check the helper's response
+            # 3. At this point, the helper does not have the .torrent file, so it requests it with a METADATA message
+            self.assert_(resp[0] == GET_METADATA)
+            self.check_get_metadata(resp[1:])
+            print >>sys.stderr,"test: Got GET_METADATA for torrent, good"
+        else:
+            # Read the helper's response
+            resp = ol_connection.recv()
+            # Check the helper's response
+            self.assert_(len(resp)==0)
+            ol_connection.close()
+            return
+
+        # 4. Send METADATA
+        (generate_data,sent_good_values) = genresdict[METADATA]
+        msg = generate_data()
+        ol_connection.send(msg)
+        if sent_good_values:
+            # 5. At this point the helper is confirming his availability to help 
+            # Read the helper's response
+            resp = ol_connection.recv()
+            # Check the helper's response
+            self.assert_(resp[0] == JOIN_HELPERS)
+            self.check_ask_for_help(resp)
+            print >>sys.stderr,"test: Got JOIN_HELPERS for torrent, good"
+
+            # 6. At this point, the helper will contact the tracker and then wait for REQUEST_PIECES messages 
+            # So we send a request pieces message
+            (generate_data,sent_good_values) = genresdict[REQUEST_PIECES]
+            msg = generate_data()
+            ol_connection.send(msg)
+
+            # At this point the helper will contact the seeders in the swarm to download the requested piece
+            # There is only one seeder in the swarm, the coordinator's twin
+            # 8. Our tracker says there is another peer (also us) on port 4810
+            # Now accept a connection on that port and pretend we're a seeder
+            self.myss.settimeout(10.0)
+            conn, addr = self.myss.accept()
+            options = '\x00\x00\x00\x00\x00\x00\x00\x00'
+            s2 = BTConnection('',0,conn,user_option_pattern=options,user_infohash=self.infohash,myid=self.myid)
+            s2.read_handshake_medium_rare()
+            
+            # Send a bitfield message to the helper (pretending we are a regular seeder)
+            b = Bitfield(self.numpieces)
+            for i in range(self.numpieces):
+                b[i] = True
+            self.assert_(b.complete())
+            msg = BITFIELD+b.tostring()
+            s2.send(msg)
+            msg = UNCHOKE
+            s2.send(msg)
+            print >>sys.stderr,"test: Got BT connection to us, as fake seeder, good"
+        else:
+            resp = ol_connection.recv()
+            self.assert_(len(resp)==0)
+            ol_connection.close()
+            return
+
+        # 7. Accept the data connection the helper wants to establish with us, the coordinator.
+        # The helper will send via this connection the pieces we request it to download.
+        self.myss2.settimeout(10.0)
+        conn, addr = self.myss2.accept()
+        s3 = BTConnection('',0,conn,user_infohash=self.infohash,myid=self.myid2)
+        s3.read_handshake_medium_rare()
+        
+        msg = UNCHOKE
+        s3.send(msg)
+        print >>sys.stderr,"test: Got data connection to us, as coordinator, good"
+        
+        # 9. At this point the helper should sent a PROXY_HAVE message on the overlay connection
+#        resp = ol_connection.recv()
+#        self.assert_(resp[0] == PROXY_HAVE)
+#        print >>sys.stderr,"test: Got PROXY)HAVE, good"
+
+        # 10. Await REQUEST on fake seeder
+        try:
+            while True:
+                s2.s.settimeout(10.0)
+                resp = s2.recv()
+                self.assert_(len(resp) > 0)
+                print "test: Fake seeder got message",getMessageName(resp[0])
+                if resp[0] == REQUEST:
+                    self.check_request(resp[1:])
+                    print >>sys.stderr,"test: Fake seeder got REQUEST for reserved piece, good"
+                    break
+                
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, fake seeder didn't reply with message"
+            self.assert_(False)
+        
+        # 11. Sent the helper a STOP_HELPING message
+        (generate_data,sent_good_values) = genresdict[STOP_HELPING]
+        msg = generate_data()
+        ol_connection.send(msg)
+        # The other side should close the connection, whether the msg was good or bad
+        resp = ol_connection.recv()
+        self.assert_(len(resp)==0)
+        ol_connection.close()
+
+    def check_request(self,data):
+        piece = toint(data[0:4])
+        self.assert_(piece == 1)        
+
+    #
+    # Correct (good) messages used by the coordinator to test the helper
+    #
+    def create_good_ask_for_help(self):
+        """ Create a correctly formatted ASK_FOR_HELP message and return it
+        """
+        # Generate a random challenge - random number on 8 bytes (62**8 possible combinations)
+        chars = string.letters + string.digits #len(chars)=62
+        challenge = ''
+        for i in range(8):
+            challenge = challenge + random.choice(chars)
+
+        return ASK_FOR_HELP + self.infohash + bencode(challenge)
+    
+    def check_ask_for_help(self, data):
+        """ Check the answer the coordinator got for an ASK_FOR_HELP message
+        The helper should have sent a JOIN_HELPERS message
+        """
+        infohash = data[1:21]
+        self.assert_(infohash == self.infohash)
+    
+    #----------
+    
+    def create_good_stop_helping(self):
+        return STOP_HELPING + self.infohash
+
+    def check_stop_helping(self, data):
+        """ Check the answer the coordinator got for a STOP_HELPING message
+        The helper should have sent a RESIGN_AS_HELPER message
+        """
+        infohash = data[1:21]
+        self.assert_(infohash == self.infohash)
+    
+    #----------
+
+    def create_good_request_pieces(self):
+        # Request piece number 1
+        piece = 1
+        return REQUEST_PIECES + self.infohash + bencode(piece)
+    # The reply for this message is a BT Have message
+    
+    #----------
+    
+    def create_good_cancel_piece(self):
+        # Cancel piece number 1
+        piece = 1
+        return CANCEL_PIECE + self.infohash + bencode(piece)
+    # This message is not supposed to have any reply
+    # TODO: test the DROPEPD_PIECE message, after implementation
+    
+    #----------
+
+    def create_good_metadata(self):
+        f = open(self.torrentfile,"rb")
+        data = f.read()
+        f.close() 
+        
+        d = self.create_good_metadata_dict(data)
+        bd = bencode(d)
+        return METADATA + bd
+
+    def create_good_metadata_dict(self,data):
+        d = {}
+        d['torrent_hash'] = self.infohash 
+        d['metadata'] = data
+        d['leecher'] = 1
+        d['seeder'] = 1
+        d['last_check_time'] = int(time.time())
+        d['status'] = 'good'
+        return d
+
+    def check_get_metadata(self,data):
+        infohash = bdecode(data) # is bencoded for unknown reason, can't change it =))
+        self.assert_(infohash == self.infohash)
+
+    #----------
+
+    #
+    # Incorrect (bad) messages used by the coordinator to test the helper
+    #    
+    def create_bad_ask_for_help_no_infohash(self):
+        return ASK_FOR_HELP+"481"
+
+    def create_bad_metadata_not_bdecodable(self):
+        return METADATA+"bla"
+
+    def create_bad_metadata_not_dict1(self):
+        d  = 481
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_not_dict2(self):
+        d  = []
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_empty_dict(self):
+        d = {}
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_wrong_dict_keys(self):
+        d = {}
+        d['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        d['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return METADATA+bencode(d)
+
+    def create_bad_metadata_bad_torrent1(self):
+        d = self.create_good_metadata_dict(None)
+        d['metadata'] = '\x12\x34' * 100 # random data
+        bd = bencode(d)
+        return METADATA+bd
+
+    def create_bad_metadata_bad_torrent2(self):
+        torrent = {}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+    def create_bad_metadata_bad_torrent3(self):
+        torrent = {'info':481}
+        data = bencode(torrent)
+        
+        d = self.create_good_metadata_dict(data)
+        d['metadata'] = data
+        bd = bencode(d)
+        return METADATA+bd
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_proxyservice_as_coord.py <method name>"
+    else:
+        suite.addTest(TestProxyServiceAsCoordinator(sys.argv[1]))
+        # DEBUG
+        print "***"
+        print "*** Calling TestProxyServiceAsCoordinator with argument " + sys.argv[1]
+        print "***"
+
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.sh b/instrumentation/next-share/BaseLib/Test/test_proxyservice_as_coord.sh
new file mode 100755 (executable)
index 0000000..07f56ed
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh -x
+#
+# Written by George Milescu
+# see LICENSE.txt for license information
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+#mkdir /tmp/tmp-test-tribler
+#cp ../../../../TestTorrent/Coord1/Gopher.torrent /tmp/Gopher.torrent
+
+python test_proxyservice_as_coord.py singtest_good_proxy
+python test_proxyservice_as_coord.py singtest_bad_proxy_ask_for_help
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_not_bdecodable
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_not_dict1
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_not_dict2
+python test_proxyservice_as_coord.py singtest_bad_2fast_metadata_empty_dict
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_wrong_dict_keys
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_bad_torrent1
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_bad_torrent2
+python test_proxyservice_as_coord.py singtest_bad_proxy_metadata_bad_torrent3
diff --git a/instrumentation/next-share/BaseLib/Test/test_remote_query.py b/instrumentation/next-share/BaseLib/Test/test_remote_query.py
new file mode 100644 (file)
index 0000000..f3caee3
--- /dev/null
@@ -0,0 +1,380 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+# TODO: let one hit to SIMPLE+METADATA be P2PURL
+import unittest
+import os
+import sys
+import time
+from BaseLib.Core.Utilities.Crypto import sha
+from types import StringType, DictType, IntType
+from M2Crypto import EC
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from BaseLib.Core.API import *
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+
+DEBUG=True
+
+
+class TestRemoteQuery(TestAsServer):
+    """ 
+    Testing QUERY message of Social Network extension V1
+    """
+    
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        # Enable remote query
+        self.config.set_remote_query(True)
+        self.config.set_torrent_collecting_dir(os.path.join(self.config_path, "tmp_torrent_collecting"))
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        #self.mypermid = str(self.my_keypair.pub().get_der())
+        #self.hispermid = str(self.his_keypair.pub().get_der())
+        
+        self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        try:
+            # Add two torrents that will match our query and one that shouldn't
+            tdef1, bmetainfo1 = self.get_default_torrent('sumfilename1','Hallo S01E10')
+            dbrec= self.torrent_db.addExternalTorrent(tdef1, extra_info={"filename":"sumfilename1"})
+            
+            tdef2, bmetainfo2 = self.get_default_torrent('sumfilename2','Hallo S02E01')
+            dbrec = self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"sumfilename2"})
+    
+            tdef3, bmetainfo3 = self.get_default_torrent('sumfilename3','Halo Demo')
+            self.torrent_db.addExternalTorrent(tdef3, extra_info={"filename":"sumfilename3"})
+            
+            self.goodtorrents_str = {}
+            self.goodtorrents_str[tdef1.get_infohash()] = bmetainfo1
+            self.goodtorrents_str[tdef2.get_infohash()] = bmetainfo2
+
+            # Unicode: Add two torrents that will match our query and one that shouldn't
+            tdef1, bmetainfo1 = self.get_default_torrent('usumfilename1',u'Ch\u00e8rie S01E10')
+            dbrec= self.torrent_db.addExternalTorrent(tdef1, extra_info={"filename":"usumfilename1"})
+            
+            tdef2, bmetainfo2 = self.get_default_torrent('usumfilename2',u'Ch\u00e8rie S02E01')
+            dbrec = self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"usumfilename2"})
+    
+            tdef3, bmetainfo3 = self.get_default_torrent('usumfilename3',u'Cherie Demo')
+            self.torrent_db.addExternalTorrent(tdef3, extra_info={"filename":"usumfilename3"})
+
+            self.goodtorrents_unicode = {}
+            self.goodtorrents_unicode[tdef1.get_infohash()] = bmetainfo1
+            self.goodtorrents_unicode[tdef2.get_infohash()] = bmetainfo2
+
+            # Unicode: Add two multi-file torrents that will match our query 
+            # because the keyword occuring in a path and one that shouldn't
+            paths1 = ['SomeFile.mkv',u'Besan\u00e7on.txt']
+            tdef1, bmetainfo1 = self.get_default_torrent('psumfilename1',u'Path S01E10',paths=paths1)
+            dbrec= self.torrent_db.addExternalTorrent(tdef1, extra_info={"filename":"psumfilename1"})
+            
+            paths2 = ['SomeFile.mkv',u'Besan\u00e7on.doc']
+            tdef2, bmetainfo2 = self.get_default_torrent('psumfilename2',u'Path S02E01',paths=paths2)
+            dbrec = self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"psumfilename2"})
+    
+            paths3 = ['SomeFile.mkv',u'Besancon']
+            tdef3, bmetainfo3 = self.get_default_torrent('psumfilename3',u'Path Demo',paths=paths3)
+            self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"psumfilename3"})
+
+            self.goodtorrents_path = {}
+            self.goodtorrents_path[tdef1.get_infohash()] = bmetainfo1
+            self.goodtorrents_path[tdef2.get_infohash()] = bmetainfo2
+
+
+            # Add two torrents that will match our two-word query and one that shouldn't
+            tdef1, bmetainfo1 = self.get_default_torrent('ssumfilename1','One Two S01E10')
+            dbrec= self.torrent_db.addExternalTorrent(tdef1, extra_info={"filename":"ssumfilename1"})
+            
+            tdef2, bmetainfo2 = self.get_default_torrent('ssumfilename2','Two S02E01 One')
+            dbrec = self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"ssumfilename2"})
+    
+            tdef3, bmetainfo3 = self.get_default_torrent('ssumfilename3','Two Demo')
+            self.torrent_db.addExternalTorrent(tdef3, extra_info={"filename":"ssumfilename3"})
+            
+            self.goodtorrents_two = {}
+            self.goodtorrents_two[tdef1.get_infohash()] = bmetainfo1
+            self.goodtorrents_two[tdef2.get_infohash()] = bmetainfo2
+
+        
+        except:
+            print_exc()
+        
+
+    def tearDown(self):
+        TestAsServer.tearDown(self)
+        self.session.close_dbhandler(self.torrent_db)
+      
+
+    def get_default_torrent(self,filename,title,paths=None):
+        metainfo = {}
+        metainfo['announce'] = 'http://localhost:0/announce'
+        metainfo['announce-list'] = []
+        metainfo['creation date'] = int(time.time())
+        metainfo['encoding'] = 'UTF-8'
+        info = {}
+        info['name'] = title.encode("UTF-8")
+        info['piece length'] = 2 ** 16
+        info['pieces'] = '*' * 20
+        if paths is None:
+            info['length'] = 481
+        else:
+            d1 = {}
+            d1['path'] = [paths[0].encode("UTF-8")]
+            d1['length'] = 201
+            d2 = {}
+            d2['path'] = [paths[1].encode("UTF-8")]
+            d2['length'] = 280
+            info['files'] = [d1,d2]
+            
+        metainfo['info'] = info
+        path = os.path.join(self.config.get_torrent_collecting_dir(),filename)
+        tdef = TorrentDef.load_from_dict(metainfo)
+        tdef.save(path)
+        return tdef, bencode(metainfo)
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        
+        # 1. test good QUERY
+        self.subtest_good_simple_query("hallo",self.goodtorrents_str)
+        time.sleep(5) # Concurrency between closing of previous olconn and new one, sleep to avoid
+        self.subtest_good_simpleplustorrents_query("hallo",self.goodtorrents_str)
+        time.sleep(5)
+        self.subtest_good_simple_query(u'ch\u00e8rie',self.goodtorrents_unicode)
+        time.sleep(5)
+        self.subtest_good_simpleplustorrents_query(u'ch\u00e8rie',self.goodtorrents_unicode)
+        time.sleep(5)
+        self.subtest_good_simple_query(u'besan\u00e7on',self.goodtorrents_path)
+        time.sleep(5)
+        self.subtest_good_simple_query('one two',self.goodtorrents_two)
+        time.sleep(5)
+
+        # 2. test various bad QUERY messages
+        self.subtest_bad_not_bdecodable()
+        self.subtest_bad_not_dict1()
+        self.subtest_bad_not_dict2()
+        self.subtest_bad_empty_dict()
+        self.subtest_bad_wrong_dict_keys()
+
+        self.subtest_bad_q_not_list()
+        self.subtest_bad_id_not_str()
+
+    #
+    # Good QUERY
+    #
+    def subtest_good_simple_query(self,keyword,goodtorrents):
+        """ 
+            test good QUERY messages: SIMPLE
+        """
+        print >>sys.stderr,"test: good QUERY SIMPLE",`keyword`
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_simple_query(keyword)
+        s.send(msg)
+        resp = s.recv()
+        if len(resp) > 0:
+            print >>sys.stderr,"test: good QUERY: got",getMessageName(resp[0])
+        self.assert_(resp[0] == QUERY_REPLY)
+        self.check_rquery_reply("SIMPLE",resp[1:],goodtorrents)
+        time.sleep(10)
+        # the other side should not have closed the connection, as
+        # this is all valid, so this should not throw an exception:
+        s.send('bla')
+        s.close()
+
+    def create_good_simple_query(self,keyword):
+        d = {}
+        if isinstance(keyword,unicode):
+            d['q'] = 'SIMPLE '+keyword.encode("UTF-8")
+        else:
+            d['q'] = 'SIMPLE '+keyword
+        d['id'] = 'a' * 20
+        return self.create_payload(d)
+
+
+    def subtest_good_simpleplustorrents_query(self,keyword,goodtorrents):
+        """ 
+            test good QUERY messages: SIMPLE+METADATA
+        """
+        print >>sys.stderr,"test: good QUERY SIMPLE+METADATA",`keyword`
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_simpleplustorrents_query(keyword)
+        s.send(msg)
+        resp = s.recv()
+        if len(resp) > 0:
+            print >>sys.stderr,"test: good QUERY: got",getMessageName(resp[0])
+        self.assert_(resp[0] == QUERY_REPLY)
+        self.check_rquery_reply("SIMPLE+METADATA",resp[1:],goodtorrents)
+        time.sleep(10)
+        # the other side should not have closed the connection, as
+        # this is all valid, so this should not throw an exception:
+        s.send('bla')
+        s.close()
+
+    def create_good_simpleplustorrents_query(self,keyword):
+        d = {}
+        d['q'] = 'SIMPLE+METADATA '+keyword.encode("UTF-8")
+        d['id'] = 'b' * 20
+        return self.create_payload(d)
+
+
+
+    def create_payload(self,r):
+        return QUERY+bencode(r)
+
+    def check_rquery_reply(self,querytype,data,goodtorrents):
+        d = bdecode(data)
+        
+        print >>sys.stderr,"test: Got reply",`d`
+        
+        self.assert_(type(d) == DictType)
+        self.assert_(d.has_key('a'))
+        self.check_adict(d['a'])
+        self.assert_(d.has_key('id'))
+        id = d['id']
+        self.assert_(type(id) == StringType)
+
+        k = d['a'].keys()
+        self.assert_(len(k) == 2)
+        var1 = k[0] == goodtorrents.keys()[0] and k[1] == goodtorrents.keys()[1]
+        var2 = k[0] == goodtorrents.keys()[1] and k[1] == goodtorrents.keys()[0]
+        self.assert_(var1 or var2)
+
+        # OLPROTO_VER_NINETH must contain torrent_size
+        for infohash, torrent in d['a'].iteritems():
+            self.assert_(torrent['torrent_size'], goodtorrents[infohash])
+            
+        if querytype.startswith("SIMPLE+METADATA"):
+            for infohash, torrent in d['a'].iteritems():
+                self.assert_('metadata' in torrent)
+                bmetainfo = torrent['metadata']
+                self.assert_(bmetainfo == goodtorrents[infohash])
+
+
+    def check_adict(self,d):
+        self.assert_(type(d) == DictType)
+        for key,value in d.iteritems():
+            self.assert_(type(key) == StringType)
+            self.assert_(len(key) == 20)
+            self.check_rdict(value)
+    
+    def check_rdict(self,d):
+        self.assert_(type(d) == DictType)
+        self.assert_('content_name' in d)
+        self.assert_(type(d['content_name']) == StringType)
+        self.assert_('length' in d)
+        self.assert_(type(d['length']) == IntType)
+        self.assert_('leecher' in d)
+        self.assert_(type(d['leecher']) == IntType)
+        self.assert_('seeder' in d)
+        self.assert_(type(d['seeder']) == IntType)
+
+
+    # Bad rquery
+    #    
+    def subtest_bad_not_bdecodable(self):
+        self._test_bad(self.create_not_bdecodable)
+
+    def subtest_bad_not_dict1(self):
+        self._test_bad(self.create_not_dict1)
+
+    def subtest_bad_not_dict2(self):
+        self._test_bad(self.create_not_dict2)
+
+    def subtest_bad_empty_dict(self):
+        self._test_bad(self.create_empty_dict)
+
+    def subtest_bad_wrong_dict_keys(self):
+        self._test_bad(self.create_wrong_dict_keys)
+
+    def subtest_bad_q_not_list(self):
+        self._test_bad(self.create_bad_q_not_list)
+
+    def subtest_bad_id_not_str(self):
+        self._test_bad(self.create_bad_id_not_str)
+
+
+    def _test_bad(self,gen_rquery_func):
+        print >>sys.stderr,"test: bad QUERY",gen_rquery_func
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = gen_rquery_func()
+        s.send(msg)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        self.assert_(len(s.recv())==0)
+        s.close()
+
+    def create_not_bdecodable(self):
+        return QUERY+"bla"
+
+    def create_not_dict1(self):
+        rquery = 481
+        return self.create_payload(rquery)
+
+    def create_not_dict2(self):
+        rquery = []
+        return self.create_payload(rquery)
+
+    def create_empty_dict(self):
+        rquery = {}
+        return self.create_payload(rquery)
+
+    def create_wrong_dict_keys(self):
+        rquery = {}
+        rquery['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        rquery['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return self.create_payload(rquery)
+
+
+    #
+    # Bad q
+    #
+    def create_bad_q_not_list(self):
+        rquery = {}
+        rquery['q'] = 481
+        rquery['id'] = 'a' * 20
+        return self.create_payload(rquery)
+
+
+    #
+    # Bad id
+    #
+    def create_bad_id_not_str(self):
+        rquery = {}
+        rquery['q'] = ['hallo']
+        rquery['id'] = 481
+        return self.create_payload(rquery)
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestRemoteQuery))
+    
+    return suite
+
+def sign_data(plaintext,keypair):
+    digest = sha(plaintext).digest()
+    return keypair.sign_dsa_asn1(digest)
+
+def verify_data(plaintext,permid,blob):
+    pubkey = EC.pub_key_from_der(permid)
+    digest = sha(plaintext).digest()
+    return pubkey.verify_dsa_asn1(digest,blob)
+
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.bat b/instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.bat
new file mode 100755 (executable)
index 0000000..04c95e1
--- /dev/null
@@ -0,0 +1,8 @@
+set PYTHONPATH=..\..\r
+\r
+python test_rquery_reply_active.py singtest_good_simple_reply\r
+python test_rquery_reply_active.py singtest_good_simpleplustorrents_reply\r
+python test_rquery_reply_active.py singtest_good_simple_reply_unicode\r
+python test_rquery_reply_active.py singtest_good_simpleplustorrents_reply_unicode\r
+python test_rquery_reply_active.py singtest_bad_not_bdecodable\r
+python test_rquery_reply_active.py singtest_bad_not_bdecodable_torrentfile\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.py b/instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.py
new file mode 100644 (file)
index 0000000..1da0a5b
--- /dev/null
@@ -0,0 +1,259 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+import sys
+import time
+from time import sleep
+from types import StringType, DictType
+
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import QUERY, QUERY_REPLY, getMessageName
+
+from olconn import OLConnection
+from BaseLib.Test.test_as_server import TestAsServer
+
+DEBUG=True
+
+LENGTH = 481
+LEECHERS = 22
+SEEDERS = 11
+CATEGORY = ' Video'
+
+class TestQueryReplyActive(TestAsServer):
+
+    """  
+    Testing QUERY_REPLY message of Query extension V1 
+
+    This test checks how the Tribler code responds to good and bad 
+    QUERY_REPLY messages. I.e. the Tribler client initiates
+    the dialback by connecting to us and sending a QUERY and we
+    reply with good and bad messages.
+
+    This test allows authoritative answers from superpeers.
+
+    WARNING: Each of the test_ methods should be tested by running the TestCase 
+    in a separate Python interpreter to prevent problems with our singleton 
+    classes, e.g. SuperPeerDB, etc.
+    """
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: Pre Tribler Init"
+        TestAsServer.setUpPreSession(self)
+        print >> sys.stderr,"test: Pre Tribler Init: config_path",self.config_path
+        # Enable remote querying
+        self.config.set_remote_query(True)
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+        self.hispermid = str(self.his_keypair.pub().get_der())
+        self.my_permid = str(self.my_keypair.pub().get_der())
+
+    def pretest_simple(self,keyword):
+        self.pretest_q('SIMPLE',keyword)
+
+    def pretest_simpleplustorrents(self,keyword):
+        self.pretest_q('SIMPLE+METADATA',keyword)
+
+    def pretest_q(self,queryprefix,keyword):
+        
+        query = queryprefix+' '+keyword
+        
+        self.content_name = keyword.upper()+' S22E44'
+        self.tdef = TorrentDef()
+        self.tdef.set_tracker('http://localhost:0/announce')
+        self.tdef.set_piece_length(2 ** 15)
+        self.tdef.create_live(self.content_name,2 ** 16)
+        self.tdef.finalize()
+        
+        # 1. First connect to Tribler
+        self.openconn = OLConnection(self.my_keypair,'localhost',self.hisport)
+        sleep(3)
+        
+        # 2. Make Tribler send query
+        self.query = query
+        self.session.query_connected_peers(query,self.query_usercallback,max_peers_to_query=10)
+
+    def query_usercallback(self,permid,query,hits):
+        
+        print >>sys.stderr,"test: query_usercallback:",`permid`,`query`,`hits`
+        
+        self.assert_(query == self.query)
+        self.assert_(permid == self.my_permid)
+        self.check_good_qreply(hits)
+        
+        # TODO: if SIMPLE+METADATA: check torrent now in db.
+        
+
+    #
+    # Good SIMPLE QUERY, builds on TestQueryReply code
+    #    
+    def singtest_good_simple_reply(self):
+        self.pretest_simple('hallo')
+        self._test_qreply(self.create_good_simple_reply,True)
+
+    #
+    # Good SIMPLE+METADATA QUERY, builds on TestQueryReply code
+    #    
+    def singtest_good_simpleplustorrents_reply(self):
+        self.pretest_simpleplustorrents('hallo')
+        self._test_qreply(self.create_good_simpleplustorrents_reply,True)
+
+
+    #
+    # Good SIMPLE QUERY Unicode, builds on TestQueryReply code
+    #    
+    def singtest_good_simple_reply_unicode(self):
+        self.pretest_simple(u'Ch\u00e8rie')
+        self._test_qreply(self.create_good_simple_reply,True)
+
+    #
+    # Good SIMPLE+METADATA QUERY Unicode, builds on TestQueryReply code
+    #    
+    def singtest_good_simpleplustorrents_reply_unicode(self):
+        self.pretest_simpleplustorrents(u'Ch\u00e8rie')
+        self._test_qreply(self.create_good_simpleplustorrents_reply,True)
+
+
+    #
+    # Bad QUERY, builds on TestQueryReply code
+    #    
+    def singtest_bad_not_bdecodable(self):
+        self.pretest_simple('hallo')
+        self._test_qreply(self.create_not_bdecodable,False)
+
+    #
+    # Bad SIMPLE+METADATA QUERY, builds on TestQueryReply code
+    #    
+    def singtest_bad_not_bdecodable_torrentfile(self):
+        self.pretest_simpleplustorrents('hallo')
+        self._test_qreply(self.create_not_bdecodable_torrentfile,False)
+
+
+    ### TODO: send different valid answers so consensus not reached
+
+    #
+    # Main test code
+    #
+    def _test_qreply(self,gen_qreply,good):
+        print >> sys.stderr,"test: waiting for reply"
+        s = self.openconn
+
+        msg = s.recv()
+        self.assert_(len(msg) > 0)
+        print >> sys.stderr,"test: Received overlay message",getMessageName(msg[0])
+        self.assert_(msg[0] == QUERY)
+        id = self.check_rquery(msg[1:])
+        
+        resp = gen_qreply(id)
+        print >> sys.stderr,"test: sending QUERY_REPLY"
+        s.send(resp)
+        if good:
+            time.sleep(10)
+            # the other side should not have closed the connection, as
+            # this is all valid, so this should not throw an exception:
+            s.send('bla')
+            s.close()
+        else:
+            # the other side should not like this and close the connection
+            self.assert_(len(s.recv())==0)
+            s.close()
+
+
+    def create_good_simple_reply_dict(self,id):
+        r = {}
+        r['content_name'] = self.content_name.encode("UTF-8")
+        r['length'] = LENGTH
+        r['leecher'] = LEECHERS
+        r['seeder'] = SEEDERS
+        r['category'] = CATEGORY
+        # OLPROTO_PROTO_ELEVENTH
+        # set later r['torrent_size'] = 42
+        r['channel_permid'] = '$' * 83
+        r['channel_name'] = 'Nitin Channel' 
+        
+        d2 = {}
+        d2[self.tdef.get_infohash()] = r
+        
+        d = {}
+        d['id'] = id
+        d['a'] = d2
+        return d
+        
+    def create_good_simple_reply(self,id):
+        d = self.create_good_simple_reply_dict(id)
+        bmetainfo = bencode(self.tdef.get_metainfo())
+        d['a'][self.tdef.get_infohash()]['torrent_size'] = len(bmetainfo) 
+        b = bencode(d)
+        return QUERY_REPLY+b
+
+    def create_good_simpleplustorrents_reply(self,id):
+        d = self.create_good_simple_reply_dict(id)
+        bmetainfo = bencode(self.tdef.get_metainfo())
+        d['a'][self.tdef.get_infohash()]['torrent_size'] = len(bmetainfo)
+        d['a'][self.tdef.get_infohash()]['metatype'] = 'application/x-tribler-stream' 
+        d['a'][self.tdef.get_infohash()]['metadata'] = bmetainfo 
+        b = bencode(d)
+        return QUERY_REPLY+b
+
+    
+
+    def check_good_qreply(self,hits):
+        self.assert_(len(hits) == 1)
+        self.assert_(hits.keys()[0] == self.tdef.get_infohash())
+        hit = hits[self.tdef.get_infohash()]
+        self.assert_(hit['content_name'] == self.content_name)
+        self.assert_(hit['length'] == LENGTH)
+        self.assert_(hit['leecher'] == LEECHERS)
+        self.assert_(hit['seeder'] == SEEDERS)
+        self.assert_(hit['category'] ==  CATEGORY)
+    
+        # OLPROTO_VERSION_ELEVENTH
+        bmetainfo = bencode(self.tdef.get_metainfo())
+        self.assert_(hit['torrent_size'] == len(bmetainfo))
+        if self.query.startswith('SIMPLE+METADATA'):
+            self.assert_(hit['metadata'] == bmetainfo)
+
+    def create_not_bdecodable(self,id):
+        return QUERY_REPLY+"bla"
+
+    def create_not_bdecodable_torrentfile(self,id):
+        d = self.create_good_simple_reply_dict(id)
+        d['a'][self.tdef.get_infohash()]['torrent_size'] = 3 # consistent with metadata. Should be named "metasize"
+        d['a'][self.tdef.get_infohash()]['metadata'] = 'bla'
+        b = bencode(d)
+        return QUERY_REPLY+b
+
+    def check_rquery(self,data):
+        d = bdecode(data)
+        self.assert_(type(d) == DictType)
+        self.assert_(d.has_key('q'))
+        q = d['q']
+        self.assert_(type(q) == StringType)
+        self.assert_(d.has_key('id'))
+        id = d['id']
+        self.assert_(type(id) == StringType)
+
+        self.assert_(q == self.query.encode("UTF-8"))
+        return d['id']
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. SuperPeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_rquery_active_reply.py <method name>"
+    else:
+        suite.addTest(TestQueryReplyActive(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+    
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.sh b/instrumentation/next-share/BaseLib/Test/test_rquery_reply_active.sh
new file mode 100755 (executable)
index 0000000..6a62324
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_rquery_reply_active.py singtest_good_simple_reply
+python test_rquery_reply_active.py singtest_good_simpleplustorrents_reply
+python test_rquery_reply_active.py singtest_good_simple_reply_unicode
+python test_rquery_reply_active.py singtest_good_simpleplustorrents_reply_unicode
+python test_rquery_reply_active.py singtest_bad_not_bdecodable
+python test_rquery_reply_active.py singtest_bad_not_bdecodable_torrentfile
diff --git a/instrumentation/next-share/BaseLib/Test/test_searchgridmanager.py b/instrumentation/next-share/BaseLib/Test/test_searchgridmanager.py
new file mode 100644 (file)
index 0000000..7e437d6
--- /dev/null
@@ -0,0 +1,83 @@
+import os\r
+import sys\r
+import unittest\r
+from BaseLib.Main.vwxGUI.SearchGridManager import TorrentSearchGridManager\r
+\r
+manager = TorrentSearchGridManager(None)\r
+\r
+class Test_SearchGridManager(unittest.TestCase):\r
+\r
+    \r
+    def test_sort(self):\r
+        manager.hits = []\r
+        hit = {'infohash':'1', 'num_seeders':23, 'votes':12, 'subscriptions':34}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'2', 'num_seeders':3, 'votes':2, 'subscriptions':4}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'3', 'num_seeders':256, 'votes':1, 'subscriptions':3}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'4', 'num_seeders':9656, 'votes':12, 'subscriptions':33}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'5', 'num_seeders':28, 'votes':1, 'subscriptions':2}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'6', 'num_seeders':2367, 'votes':-1, 'subscriptions':3}\r
+        manager.hits.append(hit)\r
+        manager.sort()\r
+        \r
+        print >> sys.stderr, repr(manager.hits)\r
+        \r
+        #\r
+        # Process: for each factor, scale it (f - mean(f)) / sd(f). Then sum all factors and order by this sum. \r
+        # This results in :\r
+        self.assertEquals('4', manager.hits[0]['infohash'])\r
+        self.assertEquals('1', manager.hits[1]['infohash'])\r
+        self.assertEquals('6', manager.hits[2]['infohash'])\r
+        self.assertEquals('2', manager.hits[3]['infohash'])\r
+        self.assertEquals('3', manager.hits[4]['infohash'])\r
+        self.assertEquals('5', manager.hits[5]['infohash'])\r
+        \r
+        \r
+    def test_sort_empty(self):\r
+        manager.hits = []\r
+        manager.sort()\r
+        \r
+        self.assertEquals(0, len(manager.hits))\r
+\r
+    def test_sort_equal_torrents(self):\r
+        manager.hits = []\r
+        hit = {'infohash':'1', 'num_seeders':3, 'votes':2, 'subscriptions':4}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'2', 'num_seeders':3, 'votes':2, 'subscriptions':4}\r
+        manager.hits.append(hit)\r
+        manager.sort()\r
+        \r
+        self.assertEquals('1', manager.hits[0]['infohash'])\r
+        self.assertEquals('2', manager.hits[1]['infohash'])\r
+\r
+    def test_sort_some_zeros(self):\r
+        manager.hits = []\r
+        hit = {'infohash':'1', 'num_seeders':1, 'votes':10, 'subscriptions':3}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'2', 'num_seeders':3, 'votes':0, 'subscriptions':4}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'3', 'num_seeders':25, 'votes':1, 'subscriptions':3}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'4', 'num_seeders':96, 'votes':0, 'subscriptions':0}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'5', 'num_seeders':28, 'votes':0, 'subscriptions':0}\r
+        manager.hits.append(hit)\r
+        hit = {'infohash':'6', 'num_seeders':23, 'votes':-3, 'subscriptions':3}\r
+        manager.hits.append(hit)\r
+        manager.sort()\r
+        \r
+        print >> sys.stderr, repr(manager.hits)\r
+        \r
+        #\r
+        # Process: for each factor, scale it (f - mean(f)) / sd(f). Then sum all factors and order by this sum. \r
+        # This results in :\r
+        self.assertEquals('1', manager.hits[0]['infohash'])\r
+        self.assertEquals('4', manager.hits[1]['infohash'])\r
+        self.assertEquals('3', manager.hits[2]['infohash'])\r
+        self.assertEquals('2', manager.hits[3]['infohash'])\r
+        self.assertEquals('6', manager.hits[4]['infohash'])\r
+        self.assertEquals('5', manager.hits[5]['infohash'])\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_secure_overlay.bat b/instrumentation/next-share/BaseLib/Test/test_secure_overlay.bat
new file mode 100644 (file)
index 0000000..44a046b
--- /dev/null
@@ -0,0 +1,23 @@
+set PYTHONPATH=..\..\r
+\r
+python test_secure_overlay.py singtest_connect_dns_to_dead_peer\r
+python test_secure_overlay.py singtest_connect_dns_to_live_peer\r
+python test_secure_overlay.py singtest_connect_to_dead_peerA\r
+python test_secure_overlay.py singtest_connect_to_dead_peerB\r
+python test_secure_overlay.py singtest_connect_to_live_peer\r
+python test_secure_overlay.py singtest_connect_twice_to_live_peer\r
+python test_secure_overlay.py singtest_send_unopenedA\r
+python test_secure_overlay.py singtest_send_unopenedB\r
+python test_secure_overlay.py singtest_send_local_close\r
+python test_secure_overlay.py singtest_send_remote_close\r
+python test_secure_overlay.py singtest_send_opened\r
+python test_secure_overlay.py singtest_close_unopened\r
+python test_secure_overlay.py singtest_close_opened\r
+python test_secure_overlay.py singtest_receive\r
+python test_secure_overlay.py singtest_got_conn_incoming\r
+python test_secure_overlay.py singtest_got_conn_outgoing\r
+python test_secure_overlay.py singtest_got_conn_local_close\r
+python test_secure_overlay.py singtest_got_conn_remote_close\r
+\r
+python test_connect_overlay.py singtest_connect_overlay\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_secure_overlay.py b/instrumentation/next-share/BaseLib/Test/test_secure_overlay.py
new file mode 100644 (file)
index 0000000..1a2dc21
--- /dev/null
@@ -0,0 +1,699 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# This test checks the new SecureOverlay class created in Fall 2006
+#
+# Note that we start a new Python interpreter for each test case.
+# Also note we create 2 peers and thus two networking stacks. In principle, 
+# they should use two different SecureOverlay instances (not a singleton), but 
+# there may be some interference.
+# 
+# To properly follow the test, enable debugging on BitTornado/SocketHandler,
+# BitTornado/ServerPortHandler and BitTornado/Rawserver in addition to
+# Tribler/Overlay/SecureOverlay
+#
+#
+
+import sys
+import os
+import unittest
+from threading import Event, Thread, currentThread
+from socket import error as socketerror
+from time import sleep
+import tempfile
+from traceback import print_exc
+import shutil
+
+from BaseLib.Core.BitTornado.RawServer import RawServer
+from BaseLib.Core.BitTornado.ServerPortHandler import MultiHandler
+from BaseLib.Core.BitTornado.BT1.MessageID import GET_METADATA
+
+from M2Crypto import EC
+from BaseLib.Core.Overlay.SecureOverlay import SecureOverlay, OLPROTO_VER_CURRENT
+import BaseLib.Core.CacheDB.sqlitecachedb as sqlitecachedb  
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import PeerDBHandler
+from BaseLib.Core.Utilities.utilities import show_permid_short
+
+DEBUG = False
+
+class FakeUserCallbackHandler:
+    def notify(self, *args):
+        pass
+
+class FakeSession:
+    
+    def __init__(self,lm,keypair,permid,listen_port):
+        self.lm = lm
+        self.keypair = keypair
+        self.permid = permid
+        self.listen_port = listen_port
+        self.uch = FakeUserCallbackHandler()
+
+    def get_permid(self):
+        return self.permid
+        
+    def get_listen_port(self):
+        return self.listen_port
+
+# Thread must come as first parent class!
+class Peer(Thread):
+    def __init__(self,testcase,port,secover):
+        Thread.__init__(self)
+        self.setDaemon(True)
+
+        self.testcase = testcase
+
+        self.doneflag = Event()
+        config = {}
+        config['timeout_check_interval'] = 100000
+        config['timeout'] = 100000
+        config['ipv6_enabled'] = 0
+        config['minport'] = port
+        config['maxport'] = port+5
+        config['random_port'] = 0
+        config['bind'] = ''
+        config['ipv6_binds_v4'] = 0
+        config['max_message_length'] = 2 ** 23
+        config['torrent_collecting_dir'] = config['state_dir'] = config['install_dir'] = tempfile.mkdtemp()
+        config['peer_icon_path'] = 'icons'
+
+        self.rawserver = RawServer(self.doneflag,
+                                   config['timeout_check_interval'],
+                                   config['timeout'],
+                                   ipv6_enable = config['ipv6_enabled'],
+                                   failfunc = self.report_failure,
+                                   errorfunc = self.report_error)
+        while 1:
+            try:
+                self.listen_port = self.rawserver.find_and_bind(0, 
+                                config['minport'], config['maxport'], config['bind'], 
+                                reuse = True,
+                                ipv6_socket_style = config['ipv6_binds_v4'], 
+                                randomizer = config['random_port'])
+                print >> sys.stderr,"test: Got listen port", self.listen_port
+                break
+            except socketerror, e:
+                self.report_failure(str(e))
+                msg = "Couldn't not bind to listen port - " + str(e)
+                self.report_failure(msg)
+                return
+
+        self.multihandler = MultiHandler(self.rawserver, self.doneflag)
+        # Note: We don't want a singleton, we want
+        # two different instances for peer1 and peer2
+        self.secure_overlay = secover
+
+        self.my_keypair = EC.gen_params(EC.NID_sect233k1)
+        self.my_keypair.gen_key()
+        self.my_permid = str(self.my_keypair.pub().get_der())
+
+
+        self.session = FakeSession(self,self.my_keypair,self.my_permid,self.listen_port)
+        self.peer_db = PeerDBHandler.getInstance()
+
+        self.secure_overlay.register(self,config['max_message_length'])
+        print >>sys.stderr,"Peer: Setting",self.secure_overlay.get_handler(),"as handler at SocketHandler"
+        self.rawserver.sockethandler.set_handler(self.secure_overlay.get_handler())
+        self.secure_overlay.start_listening()
+
+        # Stupid rawserver goes into very long wait if there are no short
+        # term tasks. Emulate this
+        self.rawserver.add_task(self.dummy_task,0)
+
+    def run(self):
+        print >> sys.stderr,"test: MyServer: run called by",currentThread().getName()
+        self.multihandler.listen_forever()
+
+    def report_failure(self,msg):
+        self.testcase.assertRaises(Exception, self.report_failure)
+
+    def report_error(self,msg):
+        self.testcase.assertRaises(Exception, self.report_error)
+
+    def dummy_task(self):
+        self.rawserver.add_task(self.dummy_task,1)
+
+    def get_ext_ip(self):
+        return '127.0.0.1'
+
+    def shutdown(self):
+        self.doneflag.set()
+        self.rawserver.shutdown()
+
+
+class TestSecureOverlay(unittest.TestCase):
+    
+    def setUp(self):
+        self.config_path = tempfile.mkdtemp()
+        config = {}
+        config['state_dir'] = self.config_path
+        config['install_dir'] = os.path.join('..','..')
+        config['torrent_collecting_dir'] = self.config_path
+        config['peer_icon_path'] = os.path.join(self.config_path,'peer_icons')
+        config['superpeer'] = False
+        sqlitecachedb.init(config, self.rawserver_fatalerrorfunc)
+        
+        secover1 = SecureOverlay.getInstance()
+        secover1.resetSingleton()
+        secover2 = SecureOverlay.getInstance()
+        secover2.resetSingleton()
+        
+        self.peer1 = Peer(self,1234,secover1)
+        self.peer2 = Peer(self,5678,secover2)
+        self.peer1.start()
+        self.peer2.start()
+        self.wanted = False
+        self.wanted2 = False
+        self.got = False
+        self.got2 = False
+        self.first = True
+
+        print >>sys.stderr,"test: setUp: peer1 permid is",show_permid_short(self.peer1.my_permid)
+        print >>sys.stderr,"test: setUp: peer2 permid is",show_permid_short(self.peer2.my_permid)
+
+        sleep(2) # let server threads start
+
+    def tearDown(self):
+        print >> sys.stderr,"test: tearDown: waiting 10 secs"
+        sleep(10)
+        if self.wanted and not self.got:
+            self.assert_(False,"callback was not called")
+        if self.wanted2 and not self.got2:
+            self.assert_(False,"other callback was not called")
+        self.peer1.shutdown()
+        self.peer2.shutdown()
+        sleep(5)
+        try:
+            shutil.rmtree(self.config_path)
+        except:
+            # Not fatal if something goes wrong here, and Win32 often gives
+            # spurious Permission Denied errors.
+            #print_exc()
+            pass
+
+    #
+    # connect_dns() to an address that noone responds at
+    #
+    def singtest_connect_dns_to_dead_peer(self):
+        print >> sys.stderr,"test: test_connect_dns_to_dead_peer"
+        self.wanted = True
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 22220),self.connect_dns_to_dead_peer_callback)
+        # Arno, 2009-04-23: was 2 secs, somehow the failed event comes in real slow now.
+        sleep(4) # let rawserver thread establish connection, which should fail
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def connect_dns_to_dead_peer_callback(self,exc,dns,permid,selver):
+        print >> sys.stderr,"test: connect_dns_to_dead_peer_callback"
+        self.assert_(exc is not None)
+        self.assert_(dns == ("127.0.0.1", 22220))
+        self.assert_(permid is None)
+        self.got = True
+
+
+    #
+    # connect_dns() to an address that peer2 responds
+    #
+    def singtest_connect_dns_to_live_peer(self):
+        print >> sys.stderr,"test: test_connect_dns_to_live_peer"
+        self.wanted = True
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 1)
+        self.assert_(self.peer1.secure_overlay.iplport2oc.has_key('127.0.0.1:5678'))
+
+    def connect_dns_to_live_peer_callback(self,exc,dns,permid,selver):
+        print >> sys.stderr,"test: connect_dns_to_live_peer_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+
+    #
+    # connect() to a fake permid
+    #
+    def singtest_connect_to_dead_peerA(self):
+        print >> sys.stderr,"test: test_connect_to_dead_peer"
+        self.wanted = True
+        hispermid = 'blabla'
+        self.peer1.secure_overlay.connect(hispermid,self.connect_to_dead_peerA_callback)
+        sleep(2) # let rawserver thread establish connection, which should fail
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def connect_to_dead_peerA_callback(self,exc,dns,permid,selver):
+        print >> sys.stderr,"test: connect_to_dead_peer_callback"
+        self.assert_(exc is not None)
+        self.assert_(permid == 'blabla')
+        self.got = True
+
+    #
+    # connect() to a real permid for which there is an address in the
+    # database that noone responds at
+    #
+    def singtest_connect_to_dead_peerB(self):
+        print >> sys.stderr,"test: test_connect_to_dead_peerB"
+        self.wanted = True
+
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid, {'ip':"127.0.0.1", 'port':22220})
+
+        self.peer1.secure_overlay.connect(hispermid,self.connect_to_dead_peerB_callback)
+        # Arno, 2009-04-23: was 2 secs, somehow the failed event comes in real slow now.
+        sleep(4) # let rawserver thread establish connection, which should fail
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def connect_to_dead_peerB_callback(self,exc,dns,permid,selver):
+        print >> sys.stderr,"test: connect_to_dead_peerB_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(dns == ("127.0.0.1", 22220))
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+
+    #
+    # connect() to peer2 which responds
+    #
+    def singtest_connect_to_live_peer(self):
+        print >> sys.stderr,"test: test_connect_to_live_peer"
+        self.wanted = True
+
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+
+        self.peer1.secure_overlay.connect(hispermid,self.connect_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 1)
+        self.assert_(self.peer1.secure_overlay.iplport2oc.has_key('127.0.0.1:5678'))
+
+    def connect_to_live_peer_callback(self,exc,dns,permid,selver):
+        print >> sys.stderr,"test: connect_to_live_peer_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+
+    #
+    # connect() to peer2 which responds, and then connect again
+    #
+    def singtest_connect_twice_to_live_peer(self):
+        print >> sys.stderr,"test: test_connect_to_live_peer"
+        self.wanted = True
+        self.wanted2 = True
+        
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+
+        self.peer1.secure_overlay.connect(hispermid,self.connect_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 1)
+        self.assert_(self.peer1.secure_overlay.iplport2oc.has_key('127.0.0.1:5678'))
+        self.peer1.secure_overlay.connect(hispermid,self.connect_to_live_peer_again_callback)
+
+    def connect_to_live_peer_again_callback(self,exc,dns,permid,selver):
+        print >> sys.stderr,"test: connect_to_live_peer_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.got2 = True
+
+
+    #
+    # send() over a non-existing connection to peer2
+    #
+    def singtest_send_unopenedA(self):
+        print >> sys.stderr,"test: test_send_unopenedA"
+        self.wanted = True
+        hispermid = self.peer2.my_permid
+        self.peer1.secure_overlay.send(hispermid,'msg=bla',self.send_unopenedA_send_callback)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def send_unopenedA_send_callback(self,exc,permid):
+        print >> sys.stderr,"test: send_unopenedA_send_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+
+    #
+    # send() over a non-existing connection to peer2 whose address is in database
+    #
+    def singtest_send_unopenedB(self):
+        print >> sys.stderr,"test: test_send_unopenedB"
+        self.wanted = True
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+        self.peer1.secure_overlay.send(hispermid,'msg=bla',self.send_unopenedB_send_callback)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def send_unopenedB_send_callback(self,exc,permid):
+        print >> sys.stderr,"test: send_unopenedB_send_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+
+    #
+    # send() over a connection to peer2 that peer1 closed
+    #
+    def singtest_send_local_close(self):
+        print >> sys.stderr,"test: test_send_local_close"
+        self.wanted = True
+
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 1)
+        self.assert_(self.peer1.secure_overlay.iplport2oc.has_key('127.0.0.1:5678'))
+
+        hispermid = self.peer2.my_permid
+        self.peer1.secure_overlay.close(hispermid)
+        self.peer1.secure_overlay.send(hispermid,'msg=bla',self.send_local_close_send_callback)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def send_local_close_send_callback(self,exc,permid):
+        print >> sys.stderr,"test: send_local_close_send_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+    #
+    # send() over a connection to peer2 that peer2 closed
+    #
+    def singtest_send_remote_close(self):
+        print >> sys.stderr,"test: test_send_remote_close"
+
+        self.wanted = True
+        self.wanted2 = True
+
+        # register handler for connections
+        self.peer2.secure_overlay.register_conns_callback(self.send_remote_close_conns_callback)
+
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        # let rawserver thread establish connection, which should succeed
+        # then let rawserver thread close connection, which should succeed
+        # net result is no connection to peer2
+        self.peer1.secure_overlay.send(self.peer2.my_permid,'msg=bla',self.send_remote_close_send_callback)
+        sleep(2) 
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def send_remote_close_conns_callback(self,exc,permid,selversion,locally_initiated,hisdns=None):
+        print  >> sys.stderr,"test: send_remote_close_conns_callback",exc,show_permid_short(permid)
+        if self.first:
+            self.assert_(exc is None)
+            self.assert_(permid == self.peer1.my_permid)
+            self.assert_(selversion == OLPROTO_VER_CURRENT)
+            self.assert_(not locally_initiated)
+            self.first = False
+            self.got2 = True
+
+            hispermid = self.peer1.my_permid
+            self.peer2.secure_overlay.close(hispermid)
+        else:
+            self.assert_(exc is not None)
+            self.assert_(permid == self.peer1.my_permid)
+            self.assert_(selversion == OLPROTO_VER_CURRENT)
+            self.assert_(not locally_initiated)
+
+    def send_remote_close_send_callback(self,exc,permid):
+        print >> sys.stderr,"test: send_remote_close_send_callback",exc
+        self.assert_(exc is not None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.got = True
+
+
+    #
+    # send() over an open connection to peer2
+    #
+    def singtest_send_opened(self):
+        print >> sys.stderr,"test: test_send_opened"
+        self.wanted = True
+        self.wanted2 = True
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+        msg = GET_METADATA+'12345678901234567890'
+        self.peer1.secure_overlay.connect(hispermid,lambda e,d,p,s: self.send_opened_connect_callback(e,d,p,s,msg))
+
+    def send_opened_connect_callback(self,exc,dns,permid,selver,msg):
+        print >> sys.stderr,"test: send_opened_connect_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.peer1.secure_overlay.send(permid,msg,self.send_opened_send_callback)
+        self.got = True
+
+    def send_opened_send_callback(self,exc,permid):
+        print >> sys.stderr,"test: send_opened_send_callback"
+        self.assert_(exc is None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.got2 = True
+
+
+    #
+    # close() a non-existing to peer2
+    #
+    def singtest_close_unopened(self):
+        print >> sys.stderr,"test: test_close_unopened"
+        hispermid = self.peer2.my_permid
+        self.peer1.secure_overlay.close(hispermid)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+
+    #
+    # close() an open connection to peer2
+    #
+    def singtest_close_opened(self):
+        print >> sys.stderr,"test: test_close_opened"
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 1)
+        self.assert_(self.peer1.secure_overlay.iplport2oc.has_key('127.0.0.1:5678'))
+
+        hispermid = self.peer2.my_permid
+        self.peer1.secure_overlay.close(hispermid)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+
+    #
+    # Let peer2 register an receive callback and let peer1 send a message
+    #
+    def singtest_receive(self):
+        print >> sys.stderr,"test: test_receive"
+        self.wanted = True
+        self.wanted2 = True
+        # register handler for messages
+        self.peer2.secure_overlay.register_recv_callback(self.receive_msg_callback)
+
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+        msg = GET_METADATA+'12345678901234567890'
+        self.peer1.secure_overlay.connect(hispermid,lambda e,d,p,s: self.receive_connect_callback(e,d,p,s,msg))
+
+    def receive_connect_callback(self,exc,dns,permid,selver,msg):
+        print >> sys.stderr,"test: receive_connect_callback"
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.peer1.secure_overlay.send(permid,msg,self.receive_send_callback)
+        print >> sys.stderr,"test: test_receive exiting"
+
+    def receive_send_callback(self,exc,permid):
+        print >> sys.stderr,"test: receive_send_callback"
+        self.assert_(exc is None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.got2 = True
+
+    def receive_msg_callback(self,permid,selversion,message):
+        print  >> sys.stderr,"test: testcase succesfully received message"
+        self.got = True
+        self.assert_(message[0] == GET_METADATA)
+        self.assert_(permid == self.peer1.my_permid)
+        self.assert_(selversion == OLPROTO_VER_CURRENT)
+        return True
+
+    #
+    # Let peer2 register an connection callback and let peer1 send a message
+    # which implies setting up a connection
+    #
+    def singtest_got_conn_incoming(self):
+        print >> sys.stderr,"test: test_got_conn_incoming"
+        self.wanted = True
+        self.wanted2 = True
+        # register handler for messages
+        self.peer2.secure_overlay.register_recv_callback(self.receive_msg_callback)
+        # register handler for connections
+        self.peer2.secure_overlay.register_conns_callback(self.got_conn_incoming_conns_callback)
+
+
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+        msg = GET_METADATA+'12345678901234567890'
+        self.peer1.secure_overlay.connect(hispermid,lambda e,d,p,s:self.got_conn_incoming_connect_callback(e,d,p,s,msg))
+
+
+    def got_conn_incoming_connect_callback(self,exc,dns,permid,selver,msg):
+        print >> sys.stderr,"test: got_conn_incoming_connect_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.peer1.secure_overlay.send(permid,msg,self.receive_send_callback)
+        print >> sys.stderr,"test: test_got_conn_incoming exiting"
+
+    def got_conn_incoming_conns_callback(self,exc,permid,selversion,locally_initiated,hisdns=None):
+        print  >> sys.stderr,"test: got_conn_incoming_conns_callback",exc,show_permid_short(permid)
+        self.assert_(exc is None)
+        self.assert_(permid == self.peer1.my_permid)
+        self.assert_(selversion == OLPROTO_VER_CURRENT)
+        self.assert_(not locally_initiated)
+        self.got = True
+
+
+    #
+    # Let peer1 register an connection callback and let peer1 send a message
+    # which implies setting up a connection
+    #
+    def singtest_got_conn_outgoing(self):
+        print >> sys.stderr,"test: test_got_conn_outgoing"
+        self.wanted = True
+        self.wanted2 = True
+        # register handler for connections
+        self.peer1.secure_overlay.register_conns_callback(self.got_conn_outgoing_conns_callback)
+
+        peer_db = PeerDBHandler.getInstance()
+        hispermid = self.peer2.my_permid
+        peer_db.addPeer(hispermid,{'ip':"127.0.0.1", 'port':5678})
+        msg = GET_METADATA+'12345678901234567890'
+        self.peer1.secure_overlay.connect(hispermid,lambda e,d,p,s:self.got_conn_outgoing_connect_callback(e,d,p,s,msg))
+
+
+    def got_conn_outgoing_connect_callback(self,exc,dns,permid,selver,msg):
+        print >> sys.stderr,"test: got_conn_outgoing_connect_callback",exc
+        self.assert_(exc is None)
+        self.assert_(dns == ("127.0.0.1", 5678))
+        self.assert_(permid == self.peer2.my_permid)
+        self.got2 = True
+
+    def got_conn_outgoing_conns_callback(self,exc,permid,selversion,locally_initiated,hisdns=None):
+        print  >> sys.stderr,"test: got_conn_outgoing_conns_callback",exc,show_permid_short(permid)
+        self.assert_(exc is None)
+        self.assert_(permid == self.peer2.my_permid)
+        self.assert_(selversion == OLPROTO_VER_CURRENT)
+        self.assert_(locally_initiated)
+        self.got = True
+
+
+
+    #
+    # Let peer2 register a connection callback and let peer1 close the connection
+    # after succesful setup.
+    #
+    def singtest_got_conn_local_close(self):
+        print >> sys.stderr,"test: test_got_conn_local_close"
+
+        self.wanted = True
+        self.wanted2 = True
+
+        # register handler for connections
+        self.peer2.secure_overlay.register_conns_callback(self.got_conn_local_close_conns_callback)
+
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        sleep(2) # let rawserver thread establish connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 1)
+        self.assert_(self.peer1.secure_overlay.iplport2oc.has_key('127.0.0.1:5678'))
+
+        hispermid = self.peer2.my_permid
+        self.peer1.secure_overlay.close(hispermid)
+        sleep(2) # let rawserver thread close connection, which should succeed
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+
+    def got_conn_local_close_conns_callback(self,exc,permid,selversion,locally_initiated,hisdns=None):
+        print  >> sys.stderr,"test: got_conn_local_close_conns_callback",exc,show_permid_short(permid)
+        if self.first:
+            self.assert_(exc is None)
+            self.assert_(permid == self.peer1.my_permid)
+            self.assert_(selversion == OLPROTO_VER_CURRENT)
+            self.assert_(not locally_initiated)
+            self.first = False
+            self.got2 = True
+        else:
+            self.assert_(exc is not None)
+            self.assert_(permid == self.peer1.my_permid)
+            self.assert_(selversion == OLPROTO_VER_CURRENT)
+            self.assert_(not locally_initiated)
+            self.got = True
+
+    #
+    # Let peer2 register a connection callback and let peer2 close the connection
+    # after succesful setup.
+    #
+    def singtest_got_conn_remote_close(self):
+        print >> sys.stderr,"test: test_got_conn_remote_close"
+
+        self.wanted = True
+        self.wanted2 = True
+
+        # register handler for connections
+        self.peer2.secure_overlay.register_conns_callback(self.got_conn_remote_close_conns_callback)
+
+        self.peer1.secure_overlay.connect_dns(("127.0.0.1", 5678),self.connect_dns_to_live_peer_callback)
+        # let rawserver thread establish connection, which should succeed
+        # then let rawserver thread close connection, which should succeed
+        # net result is no connection to peer2
+        sleep(2) 
+        self.assert_(len(self.peer1.secure_overlay.iplport2oc) == 0)
+
+    def got_conn_remote_close_conns_callback(self,exc,permid,selversion,locally_initiated,hisdns=None):
+        print  >> sys.stderr,"test: got_conn_remote_close_conns_callback",exc,show_permid_short(permid)
+        if self.first:
+            self.assert_(exc is None)
+            self.assert_(permid == self.peer1.my_permid)
+            self.assert_(selversion == OLPROTO_VER_CURRENT)
+            self.assert_(not locally_initiated)
+            self.first = False
+            self.got2 = True
+
+            hispermid = self.peer1.my_permid
+            self.peer2.secure_overlay.close(hispermid)
+        else:
+            self.assert_(exc is not None)
+            self.assert_(permid == self.peer1.my_permid)
+            self.assert_(selversion == OLPROTO_VER_CURRENT)
+            self.assert_(not locally_initiated)
+            self.got = True
+
+    def rawserver_fatalerrorfunc(self,e):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"test_secure_overlay: RawServer fatal error func called",e
+        print_exc()
+        self.assert_(False)
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_so.py <method name>"
+    else:
+        suite.addTest(TestSecureOverlay(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_secure_overlay.sh b/instrumentation/next-share/BaseLib/Test/test_secure_overlay.sh
new file mode 100755 (executable)
index 0000000..ee23129
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent 
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+python test_secure_overlay.py singtest_connect_dns_to_dead_peer
+python test_secure_overlay.py singtest_connect_dns_to_live_peer
+python test_secure_overlay.py singtest_connect_to_dead_peerA
+python test_secure_overlay.py singtest_connect_to_dead_peerB
+python test_secure_overlay.py singtest_connect_to_live_peer
+python test_secure_overlay.py singtest_connect_twice_to_live_peer
+python test_secure_overlay.py singtest_send_unopenedA
+python test_secure_overlay.py singtest_send_unopenedB
+python test_secure_overlay.py singtest_send_local_close
+python test_secure_overlay.py singtest_send_remote_close
+python test_secure_overlay.py singtest_send_opened
+python test_secure_overlay.py singtest_close_unopened
+python test_secure_overlay.py singtest_close_opened
+python test_secure_overlay.py singtest_receive
+python test_secure_overlay.py singtest_got_conn_incoming
+python test_secure_overlay.py singtest_got_conn_outgoing
+python test_secure_overlay.py singtest_got_conn_local_close
+python test_secure_overlay.py singtest_got_conn_remote_close
+
+python test_connect_overlay.py singtest_connect_overlay
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_seeding_stats.py b/instrumentation/next-share/BaseLib/Test/test_seeding_stats.py
new file mode 100644 (file)
index 0000000..83cf700
--- /dev/null
@@ -0,0 +1,93 @@
+# Written by Boudewijn Schoon
+# see LICENSE.txt for license information
+
+import sys
+import unittest
+import cPickle
+
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import CrawlerDBHandler
+from BaseLib.Core.CacheDB.SqliteSeedingStatsCacheDB import *
+
+from olconn import OLConnection
+from test_crawler import TestCrawler
+
+DEBUG=True
+
+class TestSeedingStats(TestCrawler):
+    """ 
+    Testing Seeding-Stats statistics gathering using the Crawler framework
+    """
+
+    def test_all(self):
+        """
+        I want to start a Tribler client once and then connect to it
+        many times. So there must be only one test method to prevent
+        setUp() from creating a new client every time.
+
+        The code is constructed so unittest will show the name of the
+        (sub)test where the error occured in the traceback it prints.
+        """
+        self.subtest_invalid_query()
+        self.subtest_valid_query()
+
+    def subtest_invalid_query(self):
+        """
+        Send a CRAWLER_SEEDINGSTATS_QUERY message to the Tribler
+        instance. Execute an invalid SQL query.
+        """
+        print >>sys.stderr, "-"*80, "\ntest: subtest_invalid_query"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+
+        s = OLConnection(self.my_keypair, "localhost", self.hisport)
+
+        queries = ["FOO BAR", cPickle.dumps(["select * from category", ""])]
+        for query in queries:
+            self.send_crawler_request(s, CRAWLER_SEEDINGSTATS_QUERY, 0, 0, query)
+
+            error, payload = self.receive_crawler_reply(s, CRAWLER_SEEDINGSTATS_QUERY, 0)
+            assert error != 0, error
+            if DEBUG:
+                print >>sys.stderr, "test_seeding_stats:", payload
+
+#        time.sleep(1)
+        
+    def subtest_valid_query(self):
+        """
+        Send a CRAWLER_SEEDINGSTATS_QUERY message to the Tribler
+        instance. Execute a valid SQL query.
+        """
+        print >>sys.stderr, "-"*80, "\ntest: subtest_valid_query"
+
+        # make sure that the OLConnection IS in the crawler_db
+        crawler_db = CrawlerDBHandler.getInstance()
+        crawler_db.temporarilyAddCrawler(self.my_permid)
+        
+        # test with valid data
+        seedingstats_db = SQLiteSeedingStatsCacheDB.getInstance()
+        seedingstats_db.insertMany("SeedingStats", [(50000, 'foobar', 'dummy_seed', 500, 0, 0), (80000, 'bar', 'dummy_seed', 800, 1, 0)])
+        
+        s = OLConnection(self.my_keypair, "localhost", self.hisport, mylistenport=self.listen_port)
+
+        queries = [cPickle.dumps([("read", "SELECT * FROM SeedingStats"), ("read", "SELECT * FROM SeedingStats WHERE crawled = 0")])]
+        for query in queries:
+            self.send_crawler_request(s, CRAWLER_SEEDINGSTATS_QUERY, 0, 0, query)
+
+            error, payload = self.receive_crawler_reply(s, CRAWLER_SEEDINGSTATS_QUERY, 0)
+            assert error == 0, (error, payload)
+
+            if DEBUG:
+                print >>sys.stderr, "test_seeding_stats:", cPickle.loads(payload)
+
+#        time.sleep(1)
+
+if __name__ == "__main__":
+    def test_suite():
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(TestSeedingStats))
+        return suite
+    unittest.main(defaultTest="test_suite")
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_social_overlap.py b/instrumentation/next-share/BaseLib/Test/test_social_overlap.py
new file mode 100644 (file)
index 0000000..3170f42
--- /dev/null
@@ -0,0 +1,316 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import unittest
+import os
+import sys
+import wx
+import time
+from BaseLib.Core.Utilities.Crypto import sha
+from types import StringType, DictType
+from threading import Thread
+from M2Crypto import Rand,EC
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+
+from BaseLib.Core.SocialNetwork.OverlapMsgHandler import ICON_MAX_SIZE
+
+DEBUG=True
+
+class wxServer(Thread):
+    def __init__(self):
+        Thread.__init__(self)
+        self.setDaemon(True)
+        
+        app = wx.App(0)
+        app.MainLoop()
+
+
+class TestSocialOverlap(TestAsServer):
+    """ 
+    Testing SOCIAL_OVERLAP message of Social Network extension V1
+    """
+    
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        Rand.load_file('randpool.dat', -1)
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        # Enable social networking
+        self.config.set_social_networking(True)
+
+        # Give him a usericon to send
+        fn = self.make_filename('usericon-ok.jpg')
+        f = open(fn,"rb")
+        data = f.read()
+        f.close()
+        self.config.set_mugshot(data,'image/jpeg')
+
+
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())        
+        self.myhash = sha(self.mypermid).digest()
+
+
+
+    def tearDown(self):
+        """ override TestAsServer """
+        TestAsServer.tearDown(self)
+        try:
+            os.remove('randpool.dat')
+        except:
+            pass
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+        # 1. test good SOCIAL_OVERLAP
+        #self.subtest_good_soverlap('Beurre Alexander Lucas')
+        self.subtest_good_soverlap(u'esta\xe7\xe3o04')
+
+        # 2. test various bad SOCIAL_OVERLAP messages
+        self.subtest_bad_not_bdecodable()
+        self.subtest_bad_not_dict1()
+        self.subtest_bad_not_dict2()
+        self.subtest_bad_empty_dict()
+        self.subtest_bad_wrong_dict_keys()
+
+        self.subtest_bad_persinfo()
+
+
+    #
+    # Good SOCIAL_OVERLAP
+    #
+    def subtest_good_soverlap(self,name):
+        """ 
+            test good SOCIAL_OVERLAP messages
+        """
+        print >>sys.stderr,"test: good SOCIAL_OVERLAP"
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = self.create_good_soverlap(name)
+        s.send(msg)
+        resp = s.recv()
+        self.assert_(resp[0] == SOCIAL_OVERLAP)
+        self.check_soverlap(resp[1:])
+        time.sleep(10)
+        # the other side should not have closed the connection, as
+        # this is all valid, so this should not throw an exception:
+        s.send('bla')
+        s.close()
+
+    def create_good_soverlap(self,name=None):
+        d = {}
+        [pi_sig,pi] = self.create_good_persinfo(name)
+
+        d['persinfo'] = pi
+        return self.create_payload(d)
+
+    def create_good_persinfo(self,name=None):
+        pi = {}
+        if name is not None:
+            pi['name'] = name.encode("UTF-8")
+        else:
+            pi['name'] = 'Beurre Alexander Lucas'
+        pi['icontype'] = 'image/jpeg'
+        pi['icondata'] = self.read_usericon_ok()
+        sig = None
+        return [sig,pi]
+
+    def read_usericon_ok(self):
+        return self.read_file(self.make_filename('usericon-ok.jpg'))
+
+    def make_filename(self,filename):
+        """ Test assume to be run from new Tribler/Test """
+        return filename
+
+    def read_file(self,filename):
+        f = open( filename, 'rb')
+        data = f.read()
+        f.close()
+        return data
+    
+    def create_payload(self,r):
+        return SOCIAL_OVERLAP+bencode(r)
+
+    def check_soverlap(self,data):
+        d = bdecode(data)
+        self.assert_(type(d) == DictType)
+        self.assert_(d.has_key('persinfo'))
+        self.check_persinfo(d['persinfo'])
+
+    def check_persinfo(self,d):
+        self.assert_(type(d) == DictType)
+        print >>sys.stderr,"test: persinfo: keys is",d.keys()
+
+        self.assert_(d.has_key('name'))
+        self.assert_(isinstance(d['name'],str))
+        self.assert_(d.has_key('icontype'))
+        if d.has_key('icontype'):
+            print >>sys.stderr,"test: persinfo: HAS ICON"
+        self.assert_(d.has_key('icondata'))
+        self.check_usericon(d['icontype'],d['icondata'])
+
+    def check_usericon(self,icontype,icondata):
+        self.assert_(type(icontype) == StringType)
+        self.assert_(type(icondata) == StringType)
+        idx = icontype.find('/')
+        ridx = icontype.rfind('/')
+        self.assert_(idx != -1)
+        self.assert_(idx == ridx)
+        self.assert_(len(icondata) <= ICON_MAX_SIZE)
+        print >>sys.stderr,"test: check_usericon: len icon is",len(icondata)
+
+    # Bad soverlap
+    #    
+    def subtest_bad_not_bdecodable(self):
+        self._test_bad(self.create_not_bdecodable)
+
+    def subtest_bad_not_dict1(self):
+        self._test_bad(self.create_not_dict1)
+
+    def subtest_bad_not_dict2(self):
+        self._test_bad(self.create_not_dict2)
+
+    def subtest_bad_empty_dict(self):
+        self._test_bad(self.create_empty_dict)
+
+    def subtest_bad_wrong_dict_keys(self):
+        self._test_bad(self.create_wrong_dict_keys)
+
+    #
+    # Bad 'persinfo' 
+    #
+    def subtest_bad_persinfo(self):
+        """ Cut a corner """
+        methods = [
+            self.make_persinfo_not_dict1,
+            self.make_persinfo_not_dict2,
+            self.make_persinfo_empty_dict,
+            self.make_persinfo_wrong_dict_keys,
+            self.make_persinfo_name_not_str,
+            self.make_persinfo_icontype_not_str,
+            self.make_persinfo_icontype_noslash,
+            self.make_persinfo_icondata_not_str,
+            self.make_persinfo_icondata_too_big ]
+        for method in methods:
+            # Hmmm... let's get dirty
+            print >> sys.stderr,"\ntest: ",method,
+            func = lambda: self.create_bad_persinfo(method)
+            self._test_bad(func)
+
+    def _test_bad(self,gen_soverlap_func):
+        print >>sys.stderr,"test: bad SOCIAL_OVERLAP",gen_soverlap_func
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+        msg = gen_soverlap_func()
+        s.send(msg)
+        time.sleep(5)
+        # the other side should not like this and close the connection
+        self.assert_(len(s.recv())==0)
+        s.close()
+
+    def create_not_bdecodable(self):
+        return SOCIAL_OVERLAP+"bla"
+
+    def create_not_dict1(self):
+        soverlap = 481
+        return self.create_payload(soverlap)
+
+    def create_not_dict2(self):
+        soverlap = []
+        return self.create_payload(soverlap)
+
+    def create_empty_dict(self):
+        soverlap = {}
+        return self.create_payload(soverlap)
+
+    def create_wrong_dict_keys(self):
+        soverlap = {}
+        soverlap['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        soverlap['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return self.create_payload(soverlap)
+
+
+    #
+    # Bad persinfo
+    #
+    def create_bad_persinfo(self,gen_persinfo_func):
+        soverlap = {}
+        pi = gen_persinfo_func()
+        soverlap['persinfo'] = pi
+        return self.create_payload(soverlap)
+
+    def make_persinfo_not_dict1(self):
+        return 481
+
+    def make_persinfo_not_dict2(self):
+        return []
+
+    def make_persinfo_empty_dict(self):
+        return {}
+
+    def make_persinfo_wrong_dict_keys(self):
+        pi = {}
+        pi['bla1'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        pi['bla2'] = '\x00\x00\x00\x00\x00\x30\x00\x00'
+        return pi
+
+    def make_persinfo_name_not_str(self):
+        [sig,pi] = self.create_good_persinfo()
+        pi['name'] = 481
+        return pi
+
+    def make_persinfo_icontype_not_str(self):
+        [sig,pi] = self.create_good_persinfo()
+        pi['icontype'] = 481
+        return pi
+
+    def make_persinfo_icontype_noslash(self):
+        [sig,pi] = self.create_good_persinfo()
+        pi['icontype'] = 'image#jpeg'
+        return pi
+
+    def make_persinfo_icondata_not_str(self):
+        [sig,pi] = self.create_good_persinfo()
+        pi['icondata'] = 481
+        return pi
+
+    def make_persinfo_icondata_too_big(self):
+        [sig,pi] = self.create_good_persinfo()
+        pi['icondata'] = "".zfill(ICON_MAX_SIZE+100)
+        return pi
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestSocialOverlap))
+    
+    return suite
+
+def sign_data(plaintext,keypair):
+    digest = sha(plaintext).digest()
+    return keypair.sign_dsa_asn1(digest)
+
+def verify_data(plaintext,permid,blob):
+    pubkey = EC.pub_key_from_der(permid)
+    digest = sha(plaintext).digest()
+    return pubkey.verify_dsa_asn1(digest,blob)
+
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_sqlitecachedb.py b/instrumentation/next-share/BaseLib/Test/test_sqlitecachedb.py
new file mode 100644 (file)
index 0000000..de1707a
--- /dev/null
@@ -0,0 +1,847 @@
+import os
+import sys
+import unittest
+from traceback import print_exc
+import thread
+from threading import Thread
+from time import time,sleep
+import math
+from random import shuffle
+import apsw
+
+
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, DEFAULT_BUSY_TIMEOUT,CURRENT_MAIN_DB_VERSION
+from bak_tribler_sdb import *    
+
+CREATE_SQL_FILE = os.path.join('..',"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql")
+
+import BaseLib.Core.CacheDB.sqlitecachedb
+print >>sys.stderr,"TEST: ENABLE DBUPGRADE HACK"
+BaseLib.Core.CacheDB.sqlitecachedb.TEST_SQLITECACHEDB_UPGRADE = True
+
+def init():
+    init_bak_tribler_sdb()
+
+    assert os.path.isfile(CREATE_SQL_FILE)
+
+
+SQLiteCacheDB.DEBUG = False
+DEBUG = True
+INFO = True
+
+class SQLitePerformanceTest:
+    def __init__(self):
+        self.db = SQLiteCacheDB.getInstance()
+        
+    def openDB(self, *args, **argv):
+        self.db.openDB(*args, **argv)
+    
+    def initDB(self, *args, **argv):
+        self.db.initDB(*args, **argv)
+        #self.remove_t_index()
+        #self.remove_p_index()
+        
+    def remove_t_index(self):
+        indices = [
+        'Torrent_length_idx',
+        'Torrent_creation_date_idx',
+        'Torrent_relevance_idx',
+        'Torrent_num_seeders_idx',
+        'Torrent_num_leechers_idx',
+        #'Torrent_name_idx',
+        ]
+        for index in indices:
+            sql = 'drop index ' + index
+            self.db.execute_write(sql)
+            
+    def remove_p_index(self):
+        indices = [
+        'Peer_name_idx',
+        'Peer_ip_idx',
+        'Peer_similarity_idx',
+        'Peer_last_seen_idx',
+        'Peer_last_connected_idx',
+        'Peer_num_peers_idx',
+        'Peer_num_torrents_idx'
+        ]
+        for index in indices:
+            sql = 'drop index ' + index
+            self.db.execute_write(sql)
+        
+    def close(self, clean=False):
+        self.db.close(clean=clean)
+    
+    def test(self):
+        try:
+            self.testBrowse()
+            self.testBrowseCategory()
+            self.testGetSimilarTorrents(200)
+            self.testGetPeerHistory(2000)
+        finally:
+            self.db.close()
+        
+    #######  performance test units #########
+    def testBrowseItems(self, table_name, limit, order=None, where='', num_pages=50, shuffle_page=True):
+        start = time()
+        nrec = self.db.size(table_name)
+        pages = int(math.ceil(1.0*nrec/limit))
+        offsets = []
+        for i in range(pages):
+            offset = i*limit
+            offsets.append(offset)
+        if shuffle_page:
+            shuffle(offsets)
+        sql = "SELECT * FROM %s"%table_name
+        if where:
+            sql += " WHERE %s"%where
+        if order:
+            sql += " ORDER BY %s"%order
+        if limit:
+            sql += " LIMIT %s"%limit
+        sql += " OFFSET ?"
+        nrec = 0
+        npage = 0
+        print 'browse %7s by %14s:'%(table_name, order), 
+        if where:
+            print where,
+        sys.stdout.flush()
+        start2 = time()
+        long_time = 0
+        for offset in offsets[-1*num_pages:]:
+            res = self.db.fetchall(sql, (offset,))
+            nrec += len(res)
+            npage += 1
+            now = time()
+            past = now - start2
+            start2 = now
+            if past>1:
+                print >> sys.stderr, npage, past
+                sys.stderr.flush()
+                long_time += 1
+                if long_time>=10:   # at most 10 times long waiting
+                    break
+
+        if npage == 0:
+            return 1
+        total_time = time()-start
+        page_time = total_time/npage
+        if page_time > 0:
+            pages_sec = 1/page_time
+        else:
+            pages_sec = 0
+        print '%5.4f %6.1f %4d %2d %5.3f'%(page_time, pages_sec, nrec, npage, total_time)
+        sys.stdout.flush()
+        return page_time
+    
+    def banchTestBrowse(self, table_name, nitems, sort_keys):
+        nrecs = self.db.size(table_name)
+        page_times = []
+        for key in sort_keys:
+            page_time=self.testBrowseItems(table_name, nitems, key)
+            page_times.append(page_time)
+        table_row = page_times[:]
+        table_row.insert(0, nrecs)    # insert second
+        table_row.insert(0, 'test')    # insert first
+        avg_sorted_page_time = sum(page_times[1:])/len(page_times[1:])
+        table_row.insert(len(sort_keys)*2, avg_sorted_page_time)    # insert last
+        table_row.insert(len(sort_keys)*2, 1.0/avg_sorted_page_time)    # insert last
+        return table_row
+
+    def printTableRow(self, table_row):
+        print '|| %5s'%table_row[0],
+        print '||%6d'%table_row[1],
+        for i in range(len(table_row[2:-1])):
+            print '|| %5.4f'%table_row[i+2],
+        print '|| %5.1f ||'%table_row[-1]
+        
+    def testBrowse(self):
+        #print "page_time, pages_sec, nrec, num_pages, total_time"
+        nitems = 20
+        table_name = 'CollectedTorrent'
+        torrent_sort_keys = [None, 'length','creation_date', 'num_seeders', 'num_leechers', 'relevance', 'source_id', 'name']
+        torrent_table_row = self.banchTestBrowse(table_name, nitems, torrent_sort_keys)
+        print
+        table_name = 'Peer'
+        peer_sort_keys = [None, 'last_connected', 'num_torrents', 'num_peers', 'similarity', 'name']
+        peer_table_row = self.banchTestBrowse(table_name, nitems, peer_sort_keys)
+        print
+        
+        type = 'test'
+        if type=='test':
+            print '|| DB Type || #Torrents',
+            for key in torrent_sort_keys:
+                print '||', key, 
+            print '|| avg sec/page || avg pages/sec ||'
+        
+        self.printTableRow(torrent_table_row)
+        
+        if type=='test':
+            print '|| DB Type || #Peers',
+            for key in peer_sort_keys:
+                print '||', key, 
+            print '|| avg sec/page || avg pages/sec ||'
+        
+        self.printTableRow(peer_table_row)
+        print
+        
+    def testBrowseCategory(self):
+        nitems = 20
+        table_name = 'CollectedTorrent'
+        key = 'num_seeders'
+        categories = range(1,9)
+        nrecs = self.db.size(table_name)
+        page_times = []
+        for category in categories:
+            where = 'category_id=%d'%category
+            page_time=self.testBrowseItems(table_name, nitems, key, where)
+            page_times.append(page_time)
+        table_row = page_times[:]
+        table_row.insert(0, nrecs)    # insert second
+        table_row.insert(0, 'test')    # insert first
+        avg_sorted_page_time = sum(page_times[1:])/len(page_times[1:])
+        table_row.insert(len(categories)*2, avg_sorted_page_time)    # insert last
+        table_row.insert(len(categories)*2, 1.0/avg_sorted_page_time)    # insert last
+        
+        cat_name = {1: 'Video',
+                    2: 'VideoClips',
+                    3: 'Audio',
+                    4: 'Compressed',
+                    5: 'Document',
+                    6: 'Picture',
+                    7: 'xxx',
+                    8: 'other'}
+
+        print '|| DB Type || #Torrents',
+        for key in categories:
+            print '||', cat_name[key], 
+        print '|| avg sec/page || avg pages/sec ||'
+        
+        self.printTableRow(table_row)
+        print
+        
+    def getNumOwners(self, torrent_id):
+        sql = "select count(peer_id) from Preference where torrent_id=?"
+        pop_torrent = self.db.fetchone(sql, (torrent_id,))
+        
+        return pop_torrent
+    
+#    def getTorrentName(self, torrent_id):
+#        torrent_name_sql = "select name from CollectedTorrent where torrent_id=?"
+#        self.cur.execute(torrent_name_sql, (torrent_id,))
+#        name = self.cur.fetchone()
+#        if name is not None:
+#            return name[0]
+#        return None
+    
+    def testGetSimilarTorrents(self, num, num_sim=10):
+        sql = 'select torrent_id from CollectedTorrent'
+        res = self.db.fetchall(sql)
+        shuffle(res)
+        start = time()
+        real_num = 0
+        real_num2 = 0
+        skip_time = 0
+        for torrent_id in res[:num]:
+            real_num += 1
+            torrent_id = torrent_id[0]
+            skip_begin = time()
+            pop_torrent = self.getNumOwners(torrent_id)
+            skip_time += time()-skip_begin
+            if pop_torrent < 2:
+                continue
+            sql = """
+                select torrent_id,count(torrent_id) as pop from Preference 
+                where peer_id in
+                (select peer_id from Preference where torrent_id=?) and 
+                torrent_id in (select torrent_id from CollectedTorrent)
+                group by torrent_id 
+            """
+            sim_torrents = self.db.fetchall(sql, (torrent_id,))
+            sim_res = []
+            real_num2 += 1
+
+#            
+            #print len(sim_torrents)
+            if len(sim_torrents) > num:
+                for sim_torrent_id, com in sim_torrents:
+                    if com < 1 or sim_torrent_id==torrent_id:
+                        continue
+                    pop_sim_torrent = self.getNumOwners(sim_torrent_id)
+                    sim = com/(pop_sim_torrent*pop_torrent)**0.5
+                    sim_res.append((sim,sim_torrent_id))
+                sim_res.sort()
+                sim_res.reverse()
+                sim_torrents_id = tuple([int(ti) for (sim,ti) in sim_res[:num_sim]])
+            else:
+                sim_torrents_id = tuple([int(ti) for (ti,co) in sim_torrents])
+
+            if len(sim_torrents_id) > 0:
+                if len(sim_torrents_id) == 1:
+                    sim_torrents = '(' + str(sim_torrents_id[0]) +')'
+                else:
+                    sim_torrents = repr(sim_torrents_id)
+                sql = "select name,torrent_id from CollectedTorrent where torrent_id in " + \
+                   sim_torrents + " order by name"
+                sim_names = self.db.fetchall(sql)
+                #for name,ti in sim_names:
+                #    print name, ti
+                
+            
+            #print res
+        past = time()-start
+        if real_num>0:
+            if real_num2>0:
+                print "Time for sim torrent %.4f %.4f"%(past/real_num, (past-skip_time)/real_num2), past, real_num, real_num2
+            else:
+                print "Time for sim torrent %.4f"%(past/real_num), '-', past, real_num, real_num2
+            return past/num
+        return 1
+        
+    # TODO: 
+    # suggest: 1. include torrent name in buddycast 
+    #          2. create a table like pocketlens to maintain sim(Ii,Ij)
+    #          3. torrent in CollectedTorrent table may have no owners due to remove peers
+    #          4. In GUI, we may need a async display for sim torrents
+        
+    def testGetPeerHistory(self, num):
+        sql = 'select peer_id from Peer'
+        res = self.db.fetchall(sql)
+        shuffle(res)
+        start = time()
+        real_num = 0
+        for peer_id in res[:num]:
+            peer_id = peer_id[0]
+            sql = """select name, torrent_id from CollectedTorrent 
+                     where torrent_id in 
+                     (select torrent_id from Preference where peer_id=?)
+                  """
+            res = self.db.fetchall(sql, (peer_id,))
+            real_num += 1
+        past = time()-start
+        if real_num>0:
+            print "Time for peer history %.4f"%(past/real_num), past, real_num
+        
+
+class TestSQLitePerformance(unittest.TestCase):
+    
+    def tearDown(self):
+        sqlite_test = SQLitePerformanceTest()
+        sqlite_test.close(clean=True)
+        
+    def test_benchmark_db(self):
+        sqlite_test = SQLitePerformanceTest()
+        sqlite_test.initDB(TRIBLER_DB_PATH, CREATE_SQL_FILE)
+        sqlite_test.test()
+        sqlite_test.close(clean=True)
+    
+    def _test_thread_benchmark_with_db(self):
+        class Worker1(Thread):
+            def run(self):
+                sqlite_test = SQLitePerformanceTest()
+                sqlite_test.initDB(TRIBLER_DB_PATH, CREATE_SQL_FILE)
+                sqlite_test.testBrowse()
+                sqlite_test.close()
+        
+        class Worker2(Thread):
+            def run(self):
+                sqlite_test = SQLitePerformanceTest()
+                sqlite_test.initDB(TRIBLER_DB_PATH, CREATE_SQL_FILE)
+                sqlite_test.testBrowseCategory()
+                sqlite_test.close()
+        
+        class Worker3(Thread):
+            def run(self):
+                sqlite_test = SQLitePerformanceTest()
+                sqlite_test.initDB(TRIBLER_DB_PATH, CREATE_SQL_FILE)
+                sqlite_test.testGetSimilarTorrents(200)
+                sqlite_test.close()
+        
+        class Worker4(Thread):
+            def run(self):
+                sqlite_test = SQLitePerformanceTest()
+                sqlite_test.initDB(TRIBLER_DB_PATH, CREATE_SQL_FILE)
+                sqlite_test.testGetPeerHistory(2000)
+                sqlite_test.close()
+        
+        w1 = Worker1()
+        w2 = Worker2()
+        w3 = Worker3()
+        w4 = Worker4()
+        
+        w1.start()
+        w2.start()
+        w3.start()
+        w4.start()
+        
+        w1.join()
+        w2.join()
+        w3.join()
+        w4.join()
+        
+
+class TestSqliteCacheDB(unittest.TestCase):
+    
+    def setUp(self):
+        self.db_path = 'tmp.db'
+        if os.path.exists(self.db_path):
+            os.remove(self.db_path) 
+        self.db_name = os.path.split(self.db_path)[1]
+        
+    def tearDown(self):
+        db = SQLiteCacheDB.getInstance()
+        db.close(clean=True)
+        if os.path.exists(self.db_path):
+            os.remove(self.db_path)
+            
+    def test_open_close_db(self):
+        sqlite_test = SQLiteCacheDB.getInstance()
+        sqlite_test.openDB(self.db_path, 0)
+        sqlite_test.close()
+
+    def test_thread_open_close_db(self):
+        thread.start_new_thread(self.test_open_close_db, ())
+        sleep(2)
+                
+    def test_create_temp_db(self):
+        sqlite_test = SQLiteCacheDB.getInstance()
+        sql = "create table person(lastname, firstname);"
+        sqlite_test.createDBTable(sql, self.db_path)
+        sqlite_test.close()
+        
+    def basic_funcs(self):
+        db = SQLiteCacheDB.getInstance()
+        create_sql = "create table person(lastname, firstname);"
+        db.createDBTable(create_sql, self.db_path)
+        db.insert('person', lastname='a', firstname='b')
+        one = db.fetchone('select * from person')
+        assert one == ('a','b')
+        
+        one = db.fetchone("select lastname from person where firstname == 'b'")
+        assert one == 'a'
+
+        one = db.fetchone("select lastname from person where firstname == 'c'")
+        assert one == None
+
+        values = []
+        for i in range(100):
+            value = (str(i), str(i**2))
+            values.append(value)
+        db.insertMany('person', values)
+        all = db.fetchall('select * from person')
+        assert len(all) == 101
+        
+        all = db.fetchall("select * from person where lastname=='101'")
+        assert all == []
+        
+        num = db.size('person')
+        assert num == 101
+        
+        db.insert('person', lastname='1', firstname='abc')
+        one = db.fetchone("select firstname from person where lastname == '1'")
+        assert one == '1' or one == 'abc'
+        all = db.fetchall("select firstname from person where lastname == '1'")
+        assert len(all) == 2
+        
+        db.update('person', "lastname == '2'", firstname='56')
+        one = db.fetchone("select firstname from person where lastname == '2'")
+        assert one == '56', one
+        
+        db.update('person', "lastname == '3'", firstname=65)
+        one = db.fetchone("select firstname from person where lastname == '3'")
+        assert one == 65, one
+        
+        db.update('person', "lastname == '4'", firstname=654, lastname=44)
+        one = db.fetchone("select firstname from person where lastname == 44")
+        assert one == 654, one
+        
+        db.close()
+        
+    def test_basic_funcs_lib0(self):
+        self.basic_funcs()
+        
+    def test_insertPeer(self):
+        create_sql = """
+        CREATE TABLE Peer (
+          peer_id              integer PRIMARY KEY AUTOINCREMENT NOT NULL,
+          permid               text NOT NULL,
+          name                 text,
+          ip                   text,
+          port                 integer,
+          thumbnail            text,
+          oversion             integer,
+          similarity           numeric,
+          friend               integer,
+          superpeer            integer,
+          last_seen            numeric,
+          last_connected       numeric,
+          last_buddycast       numeric,
+          connected_times      integer,
+          buddycast_times      integer,
+          num_peers            integer,
+          num_torrents         integer,
+          num_prefs            integer,
+          num_queries          integer
+        );
+        """
+        db = SQLiteCacheDB.getInstance()
+        db.createDBTable(create_sql, self.db_path)
+        assert db.size('Peer') == 0
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x'}
+        permid = peer_x.pop('permid')
+        db.insertPeer(permid, update=False, **peer_x)
+        assert db.size('Peer') == 1
+        assert db.getOne('Peer', 'name', peer_id=1) == peer_x['name']
+        peer_x['port']=456
+        db.insertPeer(permid, update=False, **peer_x)
+        assert db.getOne('Peer', 'port', peer_id=1) == 234
+        db.insertPeer(permid, update=True, **peer_x)
+        assert db.getOne('Peer', 'port', peer_id=1) == 456
+        
+class TestThreadedSqliteCacheDB(unittest.TestCase):
+    def setUp(self):
+        self.db_path = 'tmp.db'
+        if os.path.exists(self.db_path):
+            os.remove(self.db_path) 
+        self.db_name = os.path.split(self.db_path)[1]
+        SQLiteCacheDB.DEBUG = False
+        
+    def tearDown(self):
+        db = SQLiteCacheDB.getInstance()
+        db.close(clean=True)
+        del db
+        if os.path.exists(self.db_path):
+            os.remove(self.db_path)
+        
+    def create_db(self, db_path, busytimeout=DEFAULT_BUSY_TIMEOUT):
+        create_sql = "create table person(lastname, firstname);"
+        db = SQLiteCacheDB.getInstance()
+        tmp_sql_path = 'tmp.sql'
+        f = open(tmp_sql_path, 'w')
+        f.write(create_sql)
+        f.close()
+        #print "initDB", db_path
+        db.initDB(db_path, tmp_sql_path, busytimeout=busytimeout, check_version=False)
+        os.remove(tmp_sql_path)
+                    
+    def write_data(self):
+        db = SQLiteCacheDB.getInstance()
+        #db.begin()
+        db.insert('person', lastname='a', firstname='b')
+        values = []
+        for i in range(100):
+            value = (str(i), str(i**2))
+            values.append(value)
+        db.insertMany('person', values)
+        db.commit()
+        #db.begin()
+        db.commit()
+        db.commit()
+        db.close()
+        
+    def read_data(self):
+        db = SQLiteCacheDB.getInstance()
+        one = db.fetchone('select * from person')
+        assert one == ('a','b'), str(one)
+        
+        one = db.fetchone("select lastname from person where firstname == 'b'")
+        assert one == 'a'
+
+        one = db.fetchone("select lastname from person where firstname == 'c'")
+        assert one == None
+        
+        all = db.fetchall('select * from person')
+        assert len(all) == 101, len(all)
+        
+        num = db.size('person')
+        assert num == 101
+        
+        db.insert('person', lastname='1', firstname='abc')
+        one = db.fetchone("select firstname from person where lastname == '1'")
+        assert one == '1' or one == 'abc'
+        all = db.fetchall("select firstname from person where lastname == '1'")
+        assert len(all) == 2
+        
+        db.update('person', "lastname == '2'", firstname='56')
+        one = db.fetchone("select firstname from person where lastname == '2'")
+        assert one == '56', one
+        
+        db.update('person', "lastname == '3'", firstname=65)
+        one = db.fetchone("select firstname from person where lastname == '3'")
+        assert one == 65, one
+        
+        db.update('person', "lastname == '4'", firstname=654, lastname=44)
+        one = db.fetchone("select firstname from person where lastname == 44")
+        assert one == 654, one
+        db.close()
+
+    def test_open_close_db(self):
+        sqlite_test = SQLiteCacheDB.getInstance()
+        sqlite_test.openDB(self.db_path, 1250)
+        sqlite_test.close()
+        sqlite_test.close()
+        sqlite_test.openDB(self.db_path, 0)
+        sqlite_test.close()
+
+    def test_create_temp_db(self):
+        sqlite_test = SQLiteCacheDB.getInstance()
+        sql = "create table person(lastname, firstname);"
+        sqlite_test.createDBTable(sql, self.db_path)
+        sqlite_test.close(clean=True)
+        
+    def basic_funcs(self):
+        self.create_db(self.db_path)
+        self.write_data()
+        sleep(1)
+        self.read_data()
+        
+    def test_basic_funcs_lib0(self):
+        self.basic_funcs()
+
+    def test_new_thread_basic_funcs(self):
+        # test create/write/read db by 3 different threads
+        # 3 seperate connections should be created, one per thread
+        #print >> sys.stderr, '------>>>>> test_new_thread_basic_funcs', threading.currentThread().getName()
+        self.create_db(self.db_path)
+        thread.start_new_thread(self.write_data, ())
+        sleep(2)
+        thread.start_new_thread(self.read_data, ())
+        sleep(2)
+        
+    def test_concurrency(self):
+        class Reader(Thread):
+            def __init__(self, period):
+                self.period = period
+                Thread.__init__(self)
+                self.setName('Reader.'+self.getName())
+                self.read_locks = 0
+                self.num = ' R%3s '%self.getName().split('-')[-1]
+                
+            def keep_reading_data(self, period):
+                db = SQLiteCacheDB.getInstance()
+                st = time()
+                oldnum = 0
+                self.all = []
+                self.read_times = 0
+                if DEBUG_R:
+                    print "begin read", self.getName(), period, time()
+                while True:
+                    et = time()
+                    if et-st > period:
+                        break
+                    if DEBUG_R:
+                        print "...start read", self.getName(), time()
+                        sys.stdout.flush()
+                    
+                    try:
+                        self.all = db.fetchall("select * from person")
+                        self.last_read = time()-st
+                        self.read_times += 1
+                    except Exception, msg:
+                        print_exc()
+                        print "*-*", Exception, msg
+                        self.read_locks += 1
+                        if DEBUG:
+                            print >> sys.stdout, "Locked while reading!", self.read_locks
+                            sys.stdout.flush()
+                    else:
+                        if DEBUG_R:
+                            print "...end read", self.getName(), time(), len(self.all)
+                            sys.stdout.flush()
+                    
+#                    num = len(all)
+                    #print "----------- read", self.getName(), num
+#                    if DEBUG_R:
+#                        if num>oldnum:
+#                            print self.getName(), "readed", num-oldnum
+#                            sys.stdout.flush()
+                db.close()
+                if DEBUG_R:
+                    print "done read", self.getName(), len(self.all), time()-st
+                    sys.stdout.flush()
+                    
+                    
+                #assert self.read_locks == 0, self.read_locks
+                
+            def run(self):
+                self.keep_reading_data(self.period)
+        
+        class Writer(Thread):
+            def __init__(self, period, num_write, commit):
+                self.period = period
+                Thread.__init__(self)
+                self.setName('Writer.'+self.getName())
+                self.write_locks = 0
+                self.writes = 0
+                self.commit = commit
+                self.num_write = num_write
+                self.num = ' W%3s '%self.getName().split('-')[-1]
+                
+            def keep_writing_data(self, period, num_write, commit=False):
+                db = SQLiteCacheDB.getInstance()
+                st = time()
+                if DEBUG:
+                    print "begin write", self.getName(), period, time()
+                    sys.stdout.flush()
+                begin_time = time()
+                w_times = []
+                c_times = []
+                self.last_write = 0
+                try:
+                    while True:
+                        st = time()
+                        if st-begin_time > period:
+                            break
+                        #db.begin()
+                        values = []
+                        
+                        for i in range(num_write):
+                            value = (str(i)+'"'+"'", str(i**2)+'"'+"'")
+                            values.append(value)
+                        
+                        try:
+                            st = time()
+                            if DEBUG:
+                                print '-'+self.num + "start write", self.getName(), self.writes, time()-begin_time
+                                sys.stdout.flush()
+                                
+                            sql = 'INSERT INTO person VALUES (?, ?)'
+                            db.executemany(sql, values, commit=commit)
+                            self.last_write = time()-begin_time
+
+                            write_time = time()-st
+                            w_times.append(write_time)
+                            if DEBUG:
+                                print '-'+self.num + "end write", self.getName(), '+', write_time 
+                                sys.stdout.flush()
+                            self.writes += 1
+                        except apsw.BusyError:
+                            self.write_locks += 1
+                            if DEBUG:
+                                if commit:
+                                    s = "Writing/Commiting"
+                                else:
+                                    s = "Writing"
+                                print >> sys.stdout, '>'+self.num + "Locked while ", s, self.getName(), self.write_locks, time()-st
+                                sys.stdout.flush()
+                            continue
+                                                
+                        if SLEEP_W >= 0:
+                            sleep(SLEEP_W/1000.0)
+                            
+                        if DO_STH > 0:
+                            do_sth(DO_STH)
+                            
+                except Exception, msg:
+                    print_exc()
+                    print >> sys.stderr, "On Error", time(), begin_time, time()-begin_time, Exception, msg, self.getName()
+                if INFO:
+                    avg_w = avg_c = max_w = max_c = min_w = min_c = -1
+                    if len(w_times) > 0:
+                        avg_w = sum(w_times)/len(w_times)
+                        max_w = max(w_times)  
+                        min_w = min(w_times) 
+                        
+                    output = self.num + " # W Locks: %d;"%self.write_locks + " # W: %d;"%self.writes
+                    output += " Time: %.1f;"%self.last_write + ' Min Avg Max W: %.2f %.2f %.2f '%(min_w, avg_w, max_w)
+                    self.result = output
+                
+                db.commit()
+                db.commit()
+                db.commit() # test if it got problem if it is called more than once
+                db.close()
+                
+            def run(self):
+                self.keep_writing_data(self.period, self.num_write, commit=self.commit)
+                
+        def do_sth(n=300):
+            # 1000: 1.4 second
+            # 500: 0.34
+            # 300: 0.125
+            for i in xrange(n):
+                l = range(n)
+                shuffle(l)
+                l.sort()
+            
+
+        def start_testing(nwriters,nreaders,write_period,num_write,read_period, 
+                          db_path, busytimeout, commit):
+            self.create_db(db_path, busytimeout)
+            if INFO:
+                print "Busy Timeout:", busytimeout, "milliseconds" 
+                library = 'APSW'
+                print 'Library:', library, 'Writers:', nwriters, 'Readers:', nreaders, \
+                    "Num Writes:", num_write, "Write Period:", write_period, "Read Period:", read_period, "Commit:", commit, "Busytimeout:", busytimeout
+                sys.stdout.flush()
+            writers = []
+            for i in range(nwriters):
+                w = Writer(write_period, num_write, commit)
+                w.start()
+                writers.append(w)
+            
+            readers = []
+            for i in range(nreaders):
+                r = Reader(read_period)
+                r.start()
+                readers.append(r)
+                
+            total_rlock = 0
+            for r in readers:
+                r.join()
+                total_rlock += r.read_locks
+                if INFO:
+                    print >> sys.stdout, r.num, "# R Locks: %d;"%r.read_locks, "# R: %d;"%len(r.all), "Last read: %.3f;"%r.last_read, "Read Times:", r.read_times
+                    sys.stdout.flush()
+                del r
+                
+            total_wlock = 0
+            for w in writers:
+                w.join()
+                total_wlock += w.write_locks
+                if INFO:
+                    print w.result
+                    sys.stdout.flush()
+                del w
+                
+            return total_rlock, total_wlock
+        
+        #sys.setcheckinterval(1)
+        DEBUG_R = False
+        DEBUG = False
+        INFO = False
+        SLEEP_W = -10 # millisecond. -1 to disable, otherwise indicate how long to sleep
+        DO_STH = 0                
+        NLOOPS = 1
+        total_rlock = total_wlock = 0
+        
+        for i in range(NLOOPS):
+            rlock, wlock = start_testing(nwriters=1, nreaders=0, num_write=100, write_period=5, read_period=5, 
+                          db_path=self.db_path, busytimeout=5000, commit=True)
+            total_rlock += rlock
+            total_wlock += wlock
+
+        db = SQLiteCacheDB.getInstance()
+        all = db.fetchall("select * from person")
+        if INFO:
+            print "Finally inserted", len(all)
+            
+        assert total_rlock == 0 and total_wlock == 0, (total_rlock, total_wlock)
+        assert len(all) > 0, len(all)
+        
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestSqliteCacheDB))
+    suite.addTest(unittest.makeSuite(TestThreadedSqliteCacheDB))
+    suite.addTest(unittest.makeSuite(TestSQLitePerformance))
+    
+    return suite
+        
+def main():
+    init()
+    unittest.main(defaultTest='test_suite')
+
+    
+if __name__ == '__main__':
+    main()    
+            
+            
diff --git a/instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.bat b/instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.bat
new file mode 100644 (file)
index 0000000..1ae064c
--- /dev/null
@@ -0,0 +1,49 @@
+set PYTHONPATH=..\..\r
+\r
+python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_size\r
+python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getOne\r
+python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getAll\r
+\r
+python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_get\r
+python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_put\r
+\r
+python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_setSuperPeer\r
+python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_addExternalSuperPeer\r
+\r
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_size\r
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_getFriends\r
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_setFriendState\r
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_addExternalFriend\r
+\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getList\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerSim\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerList\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeers\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_addPeer\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_aa_hasPeer\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_findPeers\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_deletePeer\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPermIDByIP\r
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_loadPeers\r
+\r
+python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_getPrefList\r
+python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPreference\r
+python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPeerPreferences\r
+\r
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtested_functions\r
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_count\r
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_loadTorrents\r
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_add_update_delete_Torrent\r
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_getCollectedTorrentHashes\r
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_freeSpace\r
+\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getPrefList\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getCreationTime\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getRecentLivePrefList\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_hasMyPreference\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_addMyPreference_deletePreference\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_updateProgress\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefListInfohash\r
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefStats\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.py b/instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.py
new file mode 100644 (file)
index 0000000..9223685
--- /dev/null
@@ -0,0 +1,1289 @@
+import os
+import sys
+import unittest
+from traceback import print_exc
+from time import time
+from binascii import unhexlify
+from shutil import copy as copyFile, move
+
+
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, DEFAULT_BUSY_TIMEOUT,CURRENT_MAIN_DB_VERSION
+from bak_tribler_sdb import *    
+
+CREATE_SQL_FILE = os.path.join('..',"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql")
+
+import BaseLib.Core.CacheDB.sqlitecachedb
+print >>sys.stderr,"TEST: ENABLE DBUPGRADE HACK"
+BaseLib.Core.CacheDB.sqlitecachedb.TEST_SQLITECACHEDB_UPGRADE = True
+
+
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import TorrentDBHandler, MyPreferenceDBHandler, MyDBHandler, BasicDBHandler, PeerDBHandler, PreferenceDBHandler, SuperPeerDBHandler, FriendDBHandler, PopularityDBHandler
+from BaseLib.Category.Category import Category
+from bak_tribler_sdb import *
+
+S_TORRENT_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_single.torrent')
+S_TORRENT_PATH = os.path.join(FILES_DIR, 'single.torrent')
+
+M_TORRENT_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_multiple.torrent')    
+M_TORRENT_PATH = os.path.join(FILES_DIR, 'multiple.torrent')    
+
+BUSYTIMEOUT = 5000
+SHOW_NOT_TESTED_FUNCTIONS = False    # Enable this to show the functions not tested yet
+
+def init():
+    init_bak_tribler_sdb()
+    
+    SQLiteCacheDB.getInstance().initDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT)
+    TorrentDBHandler.getInstance().register(Category.getInstance('..'),'.')
+
+
+def getFuncs2Test(calss_name):
+    return filter(lambda s:s != 'lock' and not s.startswith('__') and s not in dir(BasicDBHandler), dir(calss_name))
+            
+SQLiteCacheDB.DEBUG = False
+
+
+class TestSqliteBasicDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        self.sqlitedb = SQLiteCacheDB.getInstance()
+        self.sqlitedb.initDB(db_path, busytimeout=BUSYTIMEOUT)
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+            
+    def singtest_size(self):
+        table_name = 'Peer'
+        db = BasicDBHandler(self.sqlitedb,table_name)
+        size = db.size()
+        assert size == 3995,size
+
+    def singtest_getOne(self):
+        table_name = 'Peer'
+        db = BasicDBHandler(self.sqlitedb,table_name)
+        
+        ip = db.getOne('ip', peer_id=1)
+        assert ip == '1.1.1.1', ip
+        
+        pid = db.getOne('peer_id', ip='1.1.1.1')
+        assert pid == 1, pid
+        
+        name = db.getOne('name', ip='1.1.1.1', port=1)
+        assert name == 'Peer 1', name
+        
+        name = db.getOne('name', ip='68.108.115.221', port=6882)
+        assert name == None, name
+        
+        tid = db.getOne('peer_id', conj='OR', ip='1.1.1.1', name='Peer 1')
+        assert tid == 1, tid
+        
+        tid = db.getOne('peer_id', conj='OR', ip='1.1.1.1', name='asdfasfasfXXXXXXxx...')
+        assert tid == 1, tid
+
+        tid = db.getOne('peer_id', conj='OR', ip='1.1.1.123', name='Peer 1')
+        assert tid == 1, tid
+
+        lbt = db.getOne('last_buddycast', peer_id=1)
+        assert lbt == 1193379432, lbt
+        
+        name, ip, lbt = db.getOne(('name','ip','last_buddycast'), peer_id=1)
+        assert name == 'Peer 1' and ip == '1.1.1.1' and lbt == 1193379432, (name, ip, lbt)
+        
+        values = db.getOne('*', peer_id=1)
+        # 03/02/10 Boudewijn: In contrast to the content of the
+        # database, the similarity value is not 12.537961593122299 but
+        # 0 because it is reset as the database is upgraded.
+        results = (1, u'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAA6SYI4NHxwQ8P7P8QXgWAP+v8SaMVzF5+fSUHdAMrs6NvL5Epe1nCNSdlBHIjNjEiC5iiwSFZhRLsr', u'Peer 1', u'1.1.1.1', 1, None, 2, 0, 0, 0, 1194966306, 1193379769, 1193379432, 1, 1, 0, 0, 0, 0, 0)
+        
+        for i in range(len(values)):
+            assert values[i] == results[i], (i, values[i], results[i])
+        
+    def singtest_getAll(self):
+        table_name = 'Peer'
+        db = BasicDBHandler(self.sqlitedb,table_name)
+        
+        ips = db.getAll('ip')
+        assert len(ips) == 3995, len(ips)
+        
+        ips = db.getAll('distinct ip')
+        assert len(ips) == 256, len(ips)
+        
+        ips = db.getAll('ip', "ip like '130.%'")
+        assert len(ips) == 16, len(ips)
+        
+        ids = db.getAll('peer_id', 'thumbnail is NULL')
+        assert len(ids) == 3995, len(ids)
+        
+        ips = db.getAll('ip', "ip like '88.%'", port=88, conj='or')
+        assert len(ips) == 16, len(ips)
+        
+        ips = db.getAll('ip', "ip like '88.%'", port=88, order_by='ip')
+        assert len(ips) == 1, len(ips)
+        assert ips[0][0] == '88.88.88.88', ips[0]
+        
+        names = db.getAll('name', "ip like '88.%'", order_by='ip', limit=4, offset=1)
+        assert len(names) == 4
+        assert names[2][0] == 'Peer 856', names
+        # select name from Peer where ip like '88.%' and port==7762 order by ip limit 4 offset 3
+        
+        ips = db.getAll('count(distinct ip), port', group_by='port')
+        # select count(distinct ip), port from Peer group by port 
+        for nip, port in ips:
+            if port == 6881:
+                assert nip == 2842, nip
+                break
+
+
+class TestSqliteMyDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+            
+    def singtest_get(self):
+        db = MyDBHandler.getInstance()
+        value = db.get('version')
+        assert value == str(CURRENT_MAIN_DB_VERSION), value
+        
+    def singtest_put(self):
+        db = MyDBHandler.getInstance()
+        new_ip = '127.0.0.1'
+        db.put('ip', new_ip)
+        value = db.get('ip')
+        assert value == new_ip, value
+
+        new_ip = ''
+        db.put('ip', new_ip)
+        value = db.get('ip')
+        assert value == new_ip, (value, new_ip)
+        
+class TestSuperPeerDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        self.sp1 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x00\\\xdfXv\xffX\xf2\xfe\x96\xe1_]\xf5\x1b\xb4\x91\x91\xa5I\xf0nl\x81\xd2A\xfb\xb7u)\x01T\xa9*)r\x9b\x81s\xb7j\xd2\xecrSg$;\xc8"7s\xecSF\xd3\x0bgK\x1c'
+        self.sp2 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x01\xdb\x80+O\xd9N7`\xfc\xd3\xdd\xf0 \xfdC^\xc9\xd7@\x97\xaa\x91r\x1c\xdeL\xf2n\x9f\x00U\xc1A\xf9Ae?\xd8t}_c\x08\xb3G\xf8g@N! \xa0\x90M\xfb\xca\xcfZ@'
+        
+    def tearDown(self):
+        s = SQLiteCacheDB.getInstance()
+        s.close()
+            
+    def _test_size(self):
+        db = SuperPeerDBHandler.getInstance()
+        size = db.size()
+        assert size == 8, size
+        
+    def _test_getSuperPeerList(self):
+        db = SuperPeerDBHandler.getInstance()
+        sps = db.getSuperPeers()
+        assert self.sp1 in sps
+        assert self.sp2 in sps
+        
+    def singtest_setSuperPeer(self):
+        db = SuperPeerDBHandler.getInstance()
+        
+        sps = db.getSuperPeers()
+        assert len(sps) == 8, len(sps)
+        
+        db.peer_db_handler.addPeer(self.sp1, {'superpeer':0})
+        sps = db.getSuperPeers()
+        assert self.sp1 not in sps
+        assert len(sps) == 7, len(sps)
+        
+        db.peer_db_handler.addPeer(self.sp1, {'superpeer':0})
+        sps = db.getSuperPeers()
+        assert self.sp1 not in sps
+        assert len(sps) == 7
+
+        db.peer_db_handler.addPeer(self.sp1, {'superpeer':1})
+        sps = db.getSuperPeers()
+        assert self.sp1 in sps
+        assert len(sps) == 8
+        
+        db.peer_db_handler.addPeer(self.sp1, {'superpeer':1})
+        sps = db.getSuperPeers()
+        assert self.sp1 in sps
+        assert len(sps) == 8
+        
+    def singtest_addExternalSuperPeer(self):
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x'}
+        db = SuperPeerDBHandler.getInstance()
+        db.addExternalSuperPeer(peer_x)
+        sps = db.getSuperPeers()
+        assert fake_permid_x in sps
+        assert len(sps) == 9, len(sps)
+        
+        db.addExternalSuperPeer(peer_x)
+        sps = db.getSuperPeers()
+        assert fake_permid_x in sps
+        assert len(sps) == 9, len(sps)
+
+        db._db.deletePeer(fake_permid_x, force=True)
+        sps = db.getSuperPeers()
+        assert fake_permid_x not in sps
+        assert len(sps) == 8, len(sps)
+
+        db._db.deletePeer(fake_permid_x, force=True)
+        sps = db.getSuperPeers()
+        assert fake_permid_x not in sps
+        assert len(sps) == 8, len(sps)
+        
+class TestFriendDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        self.sp1 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x00\\\xdfXv\xffX\xf2\xfe\x96\xe1_]\xf5\x1b\xb4\x91\x91\xa5I\xf0nl\x81\xd2A\xfb\xb7u)\x01T\xa9*)r\x9b\x81s\xb7j\xd2\xecrSg$;\xc8"7s\xecSF\xd3\x0bgK\x1c'
+        self.sp2 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x01\xdb\x80+O\xd9N7`\xfc\xd3\xdd\xf0 \xfdC^\xc9\xd7@\x97\xaa\x91r\x1c\xdeL\xf2n\x9f\x00U\xc1A\xf9Ae?\xd8t}_c\x08\xb3G\xf8g@N! \xa0\x90M\xfb\xca\xcfZ@'
+        self.fr1 = str2bin('MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAL/l2IyVa6lc3KAqQyEnR++rIzi+AamnbzXHCxOFAFy67COiBhrC79PLzzUiURbHDx21QA4p8w3UDHLA')
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+            
+    def singtest_size(self):
+        db = FriendDBHandler.getInstance()
+        size = db.size()
+        assert size == 2, size
+        
+    def singtest_getFriends(self):
+        db = FriendDBHandler.getInstance()
+        friends = db.getFriends()
+        assert self.sp1 not in friends
+        assert self.fr1 in friends
+        
+    def singtest_setFriendState(self):
+        db = FriendDBHandler.getInstance()
+        db.setFriendState(self.sp1)
+        assert db.getFriendState(self.sp1)
+        sps = db.getFriends()
+        assert self.sp1 in sps
+        assert len(sps) == 3
+        
+        db.setFriendState(self.sp1)
+        assert db.getFriendState(self.sp1)
+        sps = db.getFriends()
+        assert self.sp1 in sps
+        assert len(sps) == 3
+        
+        db.deleteFriend(self.sp1)
+        assert not db.getFriendState(self.sp1)
+        sps = db.getFriends()
+        assert self.sp1 not in sps
+        assert len(sps) == 2
+        
+        db.deleteFriend(self.sp1)
+        assert not db.getFriendState(self.sp1)
+        sps = db.getFriends()
+        assert self.sp1 not in sps
+        assert len(sps) == 2
+        
+    def singtest_addExternalFriend(self):
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x'}
+        db = FriendDBHandler.getInstance()
+        db.addExternalFriend(peer_x)
+        sps = db.getFriends()
+        assert fake_permid_x in sps
+        assert len(sps) == 3, len(sps)
+        
+        db.addExternalFriend(peer_x)
+        sps = db.getFriends()
+        assert fake_permid_x in sps
+        assert len(sps) == 3, len(sps)
+
+        db._db.deletePeer(fake_permid_x, force=True)
+        sps = db.getFriends()
+        assert fake_permid_x not in sps
+        assert len(sps) == 2, len(sps)
+        
+        db._db.deletePeer(fake_permid_x, force=True)
+        sps = db.getFriends()
+        assert fake_permid_x not in sps
+        assert len(sps) == 2, len(sps)
+        
+        
+class TestSqlitePeerDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        self.sp1 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x00\\\xdfXv\xffX\xf2\xfe\x96\xe1_]\xf5\x1b\xb4\x91\x91\xa5I\xf0nl\x81\xd2A\xfb\xb7u)\x01T\xa9*)r\x9b\x81s\xb7j\xd2\xecrSg$;\xc8"7s\xecSF\xd3\x0bgK\x1c'
+        self.sp2 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x01\xdb\x80+O\xd9N7`\xfc\xd3\xdd\xf0 \xfdC^\xc9\xd7@\x97\xaa\x91r\x1c\xdeL\xf2n\x9f\x00U\xc1A\xf9Ae?\xd8t}_c\x08\xb3G\xf8g@N! \xa0\x90M\xfb\xca\xcfZ@'
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        hp = db.hasPeer(fake_permid_x)
+        assert not hp
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+        
+    def singtest_getList(self):
+        db = PeerDBHandler.getInstance()
+        sp1 = db.getPeer(self.sp1)
+        sp2 = db.getPeer(self.sp2)
+        assert isinstance(sp1, dict)
+        assert isinstance(sp2, dict)
+        print >>sys.stderr,"singtest_GETLIST SP1",`sp1`
+        print >>sys.stderr,"singtest_GETLIST SP1",`sp2`
+        assert sp1['port'] == 628
+        assert sp2['port'] == 3287
+
+    def singtest_getPeerSim(self):
+        db = PeerDBHandler.getInstance()
+        permid_str = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACPJqLjmKeMNRwkCNKkPH51gjQ5e7u4s2vWv9I/AALXtpf+bFPtY8cyFv6OCzisYDo+brgqOxAtuNZwP'
+        permid = str2bin(permid_str)
+        sim = db.getPeerSim(permid)
+        # 03/02/10 Boudewijn: In contrast to the content of the
+        # database, the similarity value is not 5.82119645394964 but 0
+        # because it is reset as the database is upgraded.
+        assert sim == 0
+        
+        permid_str = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAB0XbUrw5b8CrTrMZST1SPyrzjgSzIE6ynALtlZASGAb+figVXRRGpKW6MSal3KnEm1/q0P3JPWrhCE'
+        permid = str2bin(permid_str)
+        sim = db.getPeerSim(permid)
+        assert sim == 0
+        
+    def singtest_getPeerList(self):
+        db = PeerDBHandler.getInstance()
+        peerlist = db.getPeerList()
+        assert len(peerlist) == 3995
+        peerlist.sort()
+        assert bin2str(peerlist[345]) == 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACxVRvG/Gr19EAPJru2Z5gjctEzv973/PJCQIua2ATMP6euq+Kf4gYpdKbsB/PWqJnfY/wSKPHHfIByV'
+
+    def singtest_getPeers(self):
+        db = PeerDBHandler.getInstance()
+        peerlist = db.getPeerList()
+        peerlist.sort()
+        pl = peerlist[:10]
+        peers = db.getPeers(pl, ['permid', 'peer_id', 'ip', 'port', 'name'])
+        #for p in peers: print p
+        assert peers[7]['name'] == 'Peer 7'
+        assert peers[8]['name'] == 'Peer 8'
+        assert peers[1]['ip'] == '1.1.1.1'
+        assert peers[3]['peer_id'] == 3
+        
+    def singtest_addPeer(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345}
+        oldsize = db.size()
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        #db.addPeer(fake_permid_x, peer_x)
+        #assert db.size() == oldsize+1
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '1.2.3.4'
+        assert p['port'] == 234
+        assert p['name'] == 'fake peer x'
+#        dns = db.getPeer(fake_permid_x, ('ip','port'))
+#        assert dns[0] == '1.2.3.4'
+#        assert dns[1] == 234
+#        dns = db.getPeer(fake_permid_x+'abcd', ('ip','port'))
+#        assert dns == None
+        
+        peer_x['ip'] = '4.3.2.1'
+        peer_x['port'] = 432
+        peer_x['last_seen'] = 1234567
+        db.addPeer(fake_permid_x, peer_x, update_dns=False)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '1.2.3.4'
+        assert p['port'] == 234
+        assert p['last_seen'] == 1234567, p['last_seen']
+
+        peer_x['ip'] = '4.3.2.1'
+        peer_x['port'] = 432
+        peer_x['last_seen'] = 12345
+        db.addPeer(fake_permid_x, peer_x, update_dns=True)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '4.3.2.1'
+        assert p['port'] == 432
+        assert p['last_seen'] == 12345
+
+        peer_x['ip'] = '1.2.3.1'
+        peer_x['port'] = 234
+        db.addPeer(fake_permid_x, peer_x, update_dns=False)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '4.3.2.1'
+        assert p['port'] == 432
+        assert p['last_seen'] == 12345
+
+        peer_x['ip'] = '1.2.3.4'
+        peer_x['port'] = 234
+        peer_x['last_seen'] = 1234569
+        db.addPeer(fake_permid_x, peer_x, update_dns=True)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '1.2.3.4'
+        assert p['port'] == 234
+        assert p['last_seen'] == 1234569
+
+        peer_x['ip'] = '1.2.3.5'
+        peer_x['port'] = 236
+        db.addPeer(fake_permid_x, peer_x, update_dns=True)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '1.2.3.5'
+        assert p['port'] == 236
+
+        db._db.deletePeer(fake_permid_x, force=True)
+        p = db.getPeer(fake_permid_x)
+        assert p == None
+        assert db.size() == oldsize
+
+    def singtest_aa_hasPeer(self):
+        db = PeerDBHandler.getInstance()
+        assert db.hasPeer(self.sp1)
+        assert db.hasPeer(self.sp2)
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        assert not db.hasPeer(fake_permid_x)
+        
+    def singtest_findPeers(self):
+        db = PeerDBHandler.getInstance()
+        find_list = db.findPeers('ip', '88.88.88.88')
+        assert len(find_list) == 16
+        
+        find_list = db.findPeers('ip', '1.2.3.4')
+        assert len(find_list) == 0
+        
+        db = PeerDBHandler.getInstance()
+        find_list = db.findPeers('permid', self.sp1)
+        assert len(find_list) == 1 and find_list[0]['permid'] == self.sp1
+        #assert len(find_list) == 3 and 901 in find_list
+    
+    def singtest_updatePeer(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345}
+        oldsize = db.size()
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '1.2.3.4'
+        assert p['port'] == 234
+        assert p['name'] == 'fake peer x'
+        
+        db.updatePeer(fake_permid_x, ip='4.3.2.1')
+        db.updatePeer(fake_permid_x, port=432)
+        db.updatePeer(fake_permid_x, last_seen=1234567)
+        p = db.getPeer(fake_permid_x)
+        assert p['ip'] == '4.3.2.1'
+        assert p['port'] == 432
+        assert p['last_seen'] == 1234567
+
+        db._db.deletePeer(fake_permid_x, force=True)
+        p = db.getPeer(fake_permid_x)
+        assert p == None
+        assert db.size() == oldsize
+
+    def singtest_deletePeer(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345, 'friend':1, 'superpeer':0}
+        oldsize = db.size()
+        p = db.getPeer(fake_permid_x)
+        assert p == None, p
+        
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        assert db.hasPeer(fake_permid_x)
+        p = db.getPeer(fake_permid_x)
+        assert p != None
+        
+        db.deletePeer(fake_permid_x, force=False)
+        assert db.hasPeer(fake_permid_x)
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert db.size() == oldsize
+        assert not db.hasPeer(fake_permid_x)
+        
+        p = db.getPeer(fake_permid_x)
+        assert p == None
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert db.size() == oldsize
+        
+        p = db.getPeer(fake_permid_x)
+        assert p == None, p
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert db.size() == oldsize
+        
+    def singtest_updateTimes(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345, 'connected_times':3}
+        oldsize = db.size()
+        p = db.getPeer(fake_permid_x)
+        assert p == None, p
+        
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        
+        db.updateTimes(fake_permid_x, 'connected_times')
+        sql = 'select connected_times from Peer where permid='+repr(bin2str(fake_permid_x))
+        ct = db._db.fetchone(sql)
+        assert ct == 4, ct
+        
+        db.updateTimes(fake_permid_x, 'buddycast_times')
+        sql = 'select buddycast_times from Peer where permid='+repr(bin2str(fake_permid_x))
+        ct = db._db.fetchone(sql)
+        assert ct == 1, ct
+        
+        db.updateTimes(fake_permid_x, 'buddycast_times', 3)
+        sql = 'select buddycast_times from Peer where permid='+repr(bin2str(fake_permid_x))
+        ct = db._db.fetchone(sql)
+        assert ct == 4, ct
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert not db.hasPeer(fake_permid_x)
+        
+    def singtest_getPermIDByIP(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345, 'connected_times':3}
+        oldsize = db.size()
+        p = db.getPeer(fake_permid_x)
+        assert p == None, p
+        
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        
+        permid = db.getPermIDByIP('1.2.3.4')
+        assert bin2str(permid) == bin2str(fake_permid_x)
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert not db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize
+        
+    def singtest_loadPeers(self):
+        db = PeerDBHandler.getInstance()
+        peer_size = db.size()
+        res = db.getGUIPeers()
+        assert len(res) == 1477, len(res)
+        data = res[0]
+        p = db.getPeer(data['permid'])
+        assert p['name'] == data['name']
+        assert 70 < len(data['permid']) < 90    # must be binary
+        
+class TestPreferenceDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+    
+    def singtest_getPrefList(self):
+        db = PreferenceDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        pl = db.getPrefList(fake_permid_x)
+        assert pl == [], pl
+        
+        truth = {3127:235, 994:20, 19:1, 5:0}
+        permid = {}
+        permid[3127] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcPezgQ13k1MSOaUrCPisWRhYuNT7Tm+q5rUgHFvAWd9b+BcSut6TCniEgHYHDnQ6TH/vxQBqtY8Loag'
+        permid[994] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJUNmwvDaigRaM4cj7cE2O7lessqnnFEQsan7df9AZS8xeNmVsP/XXVrEt4t7e2TNicYmjn34st/sx2P'
+        permid[19] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAJv2YLuIWa4QEdOEs4CPRxQZDwZphKd/xK/tgbcALG198nNdT10znJ2sZYl+OJIvj7YfYp75PrrnWNX'
+        permid[5] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAB0XbUrw5b8CrTrMZST1SPyrzjgSzIE6ynALtlZASGAb+figVXRRGpKW6MSal3KnEm1/q0P3JPWrhCE'
+        
+        for pid in truth:
+            pl = db.getPrefList(str2bin(permid[pid]))
+            assert len(pl) == truth[pid], [pid, len(pl)]
+        
+    def singtest_addPreference(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345, 'connected_times':3}
+        oldsize = db.size()
+        oldinfohash_size = db._db.size('Torrent')
+        p = db.getPeer(fake_permid_x)
+        assert p == None, p
+        
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        
+        # fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        # fake_infoahsh2 = 'fake_infohash_2'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        # 02/02/10 Boudewijn: infohashes must be 20 bytes long
+        fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00'
+        fake_infoahsh2 = 'fake_infohash_2'+'0R0\x10\x00'
+        prefdb = PreferenceDBHandler.getInstance()
+        oldpref_size = prefdb.size()
+        prefdb.addPreference(fake_permid_x, fake_infoahsh)
+        prefdb.addPreference(fake_permid_x, fake_infoahsh2)
+        assert prefdb.size() == oldpref_size + 2
+        assert oldinfohash_size + 2 == db._db.size('Torrent'), (oldinfohash_size + 2, db._db.size('Torrent'))
+        
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert len(pl) == 2
+        assert fake_infoahsh in pl
+        assert fake_infoahsh2 in pl
+
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert len(pl) == 2, pl
+        assert fake_infoahsh in pl
+        assert fake_infoahsh2 in pl
+        prefdb._deletePeer(fake_permid_x)
+        pl = prefdb.getPrefList(fake_permid_x)
+        assert pl == []
+        assert prefdb.size() == oldpref_size, (prefdb.size(), oldpref_size)
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert not db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize        
+        
+        # add again
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize+1
+        
+        prefdb.addPreference(fake_permid_x, fake_infoahsh)
+        prefdb.addPreference(fake_permid_x, fake_infoahsh2)
+        assert prefdb.size() == oldpref_size + 2
+        assert oldinfohash_size + 2 == db._db.size('Torrent')
+        
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert len(pl) == 2
+        assert fake_infoahsh in pl
+        assert fake_infoahsh2 in pl
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert not db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize      
+        pl = prefdb.getPrefList(fake_permid_x)
+        assert pl == []
+        assert prefdb.size() == oldpref_size, (prefdb.size(), oldpref_size)
+                
+        db._db.deleteInfohash(fake_infoahsh)
+        db._db.deleteInfohash(fake_infoahsh2)
+        tid = db._db.getTorrentID(fake_infoahsh)
+        assert tid is None
+        tid = db._db.getTorrentID(fake_infoahsh2)
+        assert tid is None
+        assert oldinfohash_size == db._db.size('Torrent'), [oldinfohash_size, db._db.size('Torrent')]
+
+    def singtest_addPeerPreferences(self):
+        db = PeerDBHandler.getInstance()
+        fake_permid_x = 'fake_permid_x'+'0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04'
+        peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 
+                  'name':'fake peer x', 'last_seen':12345, 'connected_times':3}
+        oldsize = db.size()
+        oldinfohash_size = db._db.size('Torrent')
+        p = db.getPeer(fake_permid_x)
+        assert p == None, p
+        
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize+1, (db.size(), oldsize+1)
+        
+        # fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        # fake_infoahsh2 = 'fake_infohash_2'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        # 02/02/10 Boudewijn: infohashes must be 20 bytes long
+        fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00'
+        fake_infoahsh2 = 'fake_infohash_2'+'0R0\x10\x00'
+        fi = [fake_infoahsh,fake_infoahsh2]
+        prefdb = PreferenceDBHandler.getInstance()
+        oldpref_size = prefdb.size()
+        prefdb.addPreferences(fake_permid_x, fi)
+        assert prefdb.size() == oldpref_size + 2, [prefdb.size(), oldpref_size]
+        assert oldinfohash_size + 2 == db._db.size('Torrent')
+        
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert len(pl) == 2
+        assert fake_infoahsh in pl, (fake_infoahsh, pl)
+        assert fake_infoahsh2 in pl, (fake_infoahsh2, pl)
+
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert len(pl) == 2, pl
+        assert fake_infoahsh in pl
+        assert fake_infoahsh2 in pl
+        prefdb._deletePeer(fake_permid_x)
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert pl == []
+        assert prefdb.size() == oldpref_size, (prefdb.size(), oldpref_size)
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert not db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize        
+        
+        # add again
+        db.addPeer(fake_permid_x, peer_x)
+        assert db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize+1
+        
+        prefdb.addPreferences(fake_permid_x, fi)
+        assert prefdb.size() == oldpref_size + 2
+        assert oldinfohash_size + 2 == db._db.size('Torrent')
+        
+        pl = prefdb.getPrefList(fake_permid_x, return_infohash=True)
+        assert len(pl) == 2
+        assert fake_infoahsh in pl
+        assert fake_infoahsh2 in pl
+        
+        db.deletePeer(fake_permid_x, force=True)
+        assert not db.hasPeer(fake_permid_x)
+        assert db.size() == oldsize      
+        pl = prefdb.getPrefList(fake_permid_x)
+        assert pl == []
+        assert prefdb.size() == oldpref_size, (prefdb.size(), oldpref_size)
+                
+        db._db.deleteInfohash(fake_infoahsh)
+        db._db.deleteInfohash(fake_infoahsh2)
+        tid = db._db.getTorrentID(fake_infoahsh)
+        assert tid is None
+        tid = db._db.getTorrentID(fake_infoahsh2)
+        assert tid is None
+        assert oldinfohash_size == db._db.size('Torrent'), [oldinfohash_size, db._db.size('Torrent')]
+
+###--------------------------------------------------------------------------------------------------------------------------    
+class TestPopularityDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+        
+    def singtest_addPopularity(self):
+        print "Testing addPopularity module...\n"
+        pop_db = PopularityDBHandler.getInstance()
+
+#torrent_id, peer_id, recv_time, calc_age=sys.maxint, num_seeders=-1, num_leechers=-1, num_sources=-1
+        oldpop_size = pop_db.size()
+        pop_db.addPopularity(2,1,1241787491,78,25,456,5)        
+        pop_db.addPopularity(2,1,1241787490,7028,2,45,15)
+        pop_db.addPopularity(50000,1,1241787495,78,25,4546456,5)
+        assert pop_db.size() == oldpop_size + 3
+###--------------------------------------------------------------------------------------------------------------------------    
+
+    def singtest_storePeerPopularity(self):
+        print "Testing storePeerPopularity module...\n"
+        
+        pop_db = PopularityDBHandler.getInstance()
+        oldpop_size = pop_db.size()
+        #(torrent_id, recv_time, calc_age, num_seeders, num_leechers, num_sources)
+        tList=[(10,123456,1000,5,18,8),
+               (11,123457,1001,52,28,5),
+               (12,123458,1002,53,38,8),
+               (12,123459,1003,55,58,9),
+               (12,123451,1004,57,68,12),
+               (12,1234,100,57,68,12)]
+        tList=[]
+        pop_db.storePeerPopularity(100, tList)
+        
+        pop_db.addPopularity(12,1,895,78,25,4546456,5)        
+        pop_db.addPopularity(12,18,300000,78,25,4546456,5)
+        pop_db.addPopularity(12,1,1895,8,25,4546456,5)        
+        pop_db.addPopularity(13,18,200,8978,25,4546456,5)
+        pop_db.addPopularity(50000,1,85554544,78,25,4546456,5)
+        
+        #assert pop_db.size() == oldpop_size + 5
+
+###--------------------------------------------------------------------------------------------------------------------------        
+    def singtest_countTorrentPopularityRec(self):
+        print "Testing countTorrentPopularity module...\n"
+        
+        pop_db = PopularityDBHandler.getInstance()
+        
+        tList=[(10,123456,1000,5,18,8),
+               (10,123457,1001,52,28,5),
+               (12,123458,2002,53,38,8),
+               (12,123459,1003,55,58,9),
+               (12,123451,4004,57,68,12),
+               (17,123451,1004,57,68,12),
+               (19,123451,1004,57,68,12)]
+        
+        pop_db.storePeerPopularity(100, tList)
+        
+        result = pop_db.countTorrentPopularityRec(10, int(time()))
+        print "(num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])
+
+###--------------------------------------------------------------------------------------------------------------------------        
+    def singtest_countTorrentPeerPopularityRec(self):
+        print "Testing countTorrentPeerPopularity module...\n"
+        
+        pop_db = PopularityDBHandler.getInstance()
+        
+        tList=[(10,123456,1000,5,18,8),
+               (10,123457,1001,52,28,5),
+               (12,123458,1002,53,38,8),
+               (12,123459,6003,55,58,9),
+               (12,123451,3004,57,68,12),
+               (17,123451,1004,57,68,12),
+               (19,123451,1004,57,68,12)]
+        
+        pop_db.storePeerPopularity(100, tList)
+        result = pop_db.countTorrentPeerPopularityRec(12, 100, int(time()))
+        print "(num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])    
+        
+        pop_db.addPopularity(2,1,85554544,78,25,4546456,5)        
+        pop_db.addPopularity(1,1,855545442,78,25,4546456,5)
+        pop_db.addPopularity(1,1,85554523,78,25,4546456,5)
+        pop_db.addPopularity(50000,1,85554544,78,25,4546456,5)
+        result = pop_db.countTorrentPeerPopularityRec(1, 1, int(time()))
+        print "(num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])    
+        
+###--------------------------------------------------------------------------------------------------------------------------    
+    def singtest_deleteOldTorrentRecords(self):
+        print "Testing deleteOldTorrentRecords module...\n"
+        
+        pop_db = PopularityDBHandler.getInstance()
+        
+        tList=[(10,123456,1000,5,18,8),
+               (10,123457,1001,52,28,5),
+               (12,123458,1002,53,38,8),
+               (12,123459,1003,55,58,9),
+               (12,123451,1004,57,68,12),
+               (17,123451,1004,57,68,12),
+               (19,123451,1004,57,68,12)]
+        pop_db.storePeerPopularity(100, tList)
+
+        pop_db.addPopularity(2,1,85554544,78,25,4546456,5)        
+        pop_db.addPopularity(1,1,855545442,788,25,4546456,5)
+        pop_db.addPopularity(1,1,85554523,78,25,4546456,5)
+        pop_db.addPopularity(50000,1,85554544,78,25,4546456,5)
+        
+        result = pop_db.countTorrentPopularityRec(1, int(time()))
+        print "Before delete: (num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])
+        
+        pop_db.deleteOldTorrentRecords(1,1,int(time()))
+        
+        result = pop_db.countTorrentPopularityRec(1, int(time()))
+        print "After delete: (num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])
+        
+###--------------------------------------------------------------------------------------------------------------------------    
+    def singtest_deleteOldTorrentPeerRecords(self):
+        print "Testing deleteOldTorrentRecords module...\n"
+        
+        pop_db = PopularityDBHandler.getInstance()
+        
+        tList=[(10,123456,1000,5,18,8),
+               (10,123457,1001,52,28,5),
+               (12,123458,1002,53,38,8),
+               (12,123459,1003,55,58,9),
+               (12,123451,1004,57,68,12),
+               (17,123451,1004,57,68,12),
+               (19,123451,1004,57,68,12)]
+        pop_db.storePeerPopularity(100, tList)
+
+        pop_db.addPopularity(12,1,85554544,78,25,4546456,5)        
+        pop_db.addPopularity(12,13,855545442,78,25,4546456,5)
+        pop_db.addPopularity(1,1,85554523,78,25,4546456,5)
+        pop_db.addPopularity(50000,1,85554544,78,25,4546456,5)
+        
+        result = pop_db.countTorrentPeerPopularityRec(12,100, int(time()))
+        print "Before delete: (num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])
+        
+        pop_db.deleteOldTorrentPeerRecords(12,100,4, int(time()))
+        
+        result = pop_db.countTorrentPeerPopularityRec(12,100, int(time()))
+        print "After delete: (num_records, oldest_record) = ( %d, %d)" % (result[0], result[1])        
+
+##--------------------------------------------------------------------------------------------------------------------------    
+    def singtest_getPopularityList(self):
+        print "Testing getPopularityList module...\n"
+         
+        pop_db = PopularityDBHandler.getInstance()
+        
+        tList=[(10,123456,1000,5,18,8),
+               (10,123457,1001,52,28,5),
+               (12,123458,1002,53,38,8),
+               (12,123459,1003,55,58,9),
+               (12,1619999,1004,57,68,12),
+               (17,123460,1004,57,68,12),
+               (19,123451,1004,57,68,12)]
+        
+        pop_db.storePeerPopularity(100, tList)
+        pop_db.addPopularity(12,1,1194958935,78,25,2194958935,5)        
+        pop_db.addPopularity(12,13,855545442,78,25,4546456,5)
+        pop_db.addPopularity(1,1,85554523,78,25,4546456,5)
+        pop_db.addPopularity(50000,1,85554544,78,25,4546456,5)  
+              
+        print pop_db.getPopularityList(peer_id=100)
+        print pop_db.getPopularityList(peer_id=100, torrent_id=12)  
+        print pop_db.getPopularityList(torrent_id=12)  
+        print pop_db.getPopularityList(recv_time_lbound=123457)  
+        print pop_db.getPopularityList(peer_id=100, torrent_id=12, recv_time_ubound=123458,recv_time_lbound=123457)
+        print pop_db.getPopularityList()
+     
+    def singtest_calculateSwarmSize(self):
+        print "Testing calculate swarm size module...\n"
+        pop_db = PopularityDBHandler.getInstance()
+        self.singtest_getPopularityList() 
+        #tempList =[2726, 2059, 899999, 42918, 4269, 4065]
+        tempList =[12,17, 4847, 4816, 4783, 2627]
+        print "swarm size:\n"
+        print pop_db.calculateSwarmSize(tempList, "TorrentIds")
+         
+##--------------------------------------------------------------------------------------------------------------------------    
+class TestTorrentDBHandler(unittest.TestCase):
+
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+
+    def singtested_functions(self):
+        if SHOW_NOT_TESTED_FUNCTIONS:
+            all_funcs = getFuncs2Test(TorrentDBHandler) 
+            tested_funcs = [
+                "register",
+                "getInstance",
+                "hasTorrent",
+                "hasMetaData",
+                "getNumberTorrents", "_getCategoryID",
+                "getTorrents",
+                "size",
+                "getTorrentID",
+                "_addTorrentToDB", "_addTorrentTracker",
+                "getOne",
+                "getTracker",
+                "updateTorrent",
+                "updateTorrentRelevance",
+                "deleteTorrent", "_deleteTorrent", "eraseTorrentFile",
+                "getNumberCollectedTorrents",
+                "getTorrent",
+                "freeSpace",
+                "getInfohash",
+            ]
+            for func in all_funcs:
+                if func not in tested_funcs:
+                    print "TestTorrentDBHandler: not test", func
+                
+#    def singtest_misc(self):
+#        db = TorrentDBHandler.getInstance()
+        
+    def _test_hasTorrent(self):
+        infohash_str = 'AA8cTG7ZuPsyblbRE7CyxsrKUCg='
+        infohash = str2bin(infohash_str)
+        db = TorrentDBHandler.getInstance()
+        assert db.hasTorrent(infohash) == True
+        assert db.hasMetaData(infohash) == True
+        fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        assert db.hasTorrent(fake_infoahsh) == False
+        assert db.hasMetaData(fake_infoahsh) == False
+        
+    def singtest_count(self):
+        db = TorrentDBHandler.getInstance()
+        start = time()
+        num = db.getNumberTorrents()
+        assert num == 4483
+        
+    def singtest_loadTorrents(self):
+        db = TorrentDBHandler.getInstance()
+        torrent_size = db._db.size('CollectedTorrent')
+        db2 = MyPreferenceDBHandler.getInstance()
+        mypref_size = db2.size()
+        res = db.getTorrents()
+        ### assert len(res) == torrent_size - mypref_size, (len(res), torrent_size - mypref_size)
+        res = db.getTorrents()
+        len(res) == torrent_size
+        data = res[0]
+        #print data
+        assert data['category'][0] in db.category_table.keys(), data['category']
+        assert data['status'] in db.status_table.keys(), data['status']
+        assert data['source'] in db.src_table.keys(), data['source']
+        assert len(data['infohash']) == 20
+                
+    def singtest_add_update_delete_Torrent(self):
+        self.addTorrent()
+        self.updateTorrent()
+        self.deleteTorrent()
+                
+    def addTorrent(self):
+        copyFile(S_TORRENT_PATH_BACKUP, S_TORRENT_PATH)
+        copyFile(M_TORRENT_PATH_BACKUP, M_TORRENT_PATH)
+        
+        db = TorrentDBHandler.getInstance()
+        
+        old_size = db.size()
+        old_src_size = db._db.size('TorrentSource')
+        old_tracker_size = db._db.size('TorrentTracker')
+        
+        s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09')
+        m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
+        
+        sid = db._db.getTorrentID(s_infohash)
+        mid = db._db.getTorrentID(m_infohash)
+                
+        single_torrent_file_path = os.path.join(FILES_DIR, 'single.torrent')
+        multiple_torrent_file_path = os.path.join(FILES_DIR, 'multiple.torrent')
+        
+        single_tdef = TorrentDef.load(single_torrent_file_path)
+        assert s_infohash == single_tdef.get_infohash()
+        src = 'http://www.rss.com/torrent.xml'
+        multiple_tdef = TorrentDef.load(multiple_torrent_file_path)
+        assert m_infohash == multiple_tdef.get_infohash()
+        
+        db.addExternalTorrent(single_tdef, extra_info={'filename':single_torrent_file_path})
+        db.addExternalTorrent(multiple_tdef, source=src, extra_info={'filename':multiple_torrent_file_path})
+        
+        single_torrent_id = db._db.getTorrentID(s_infohash)
+        multiple_torrent_id = db._db.getTorrentID(m_infohash)
+        
+        assert db.getInfohash(single_torrent_id) == s_infohash
+        
+        single_name = 'Tribler_4.1.7_src.zip'
+        multiple_name = 'Tribler_4.1.7_src'
+        
+        assert db.size() == old_size + 2, old_size - db.size()
+        assert old_src_size + 1 == db._db.size('TorrentSource')
+        assert old_tracker_size + 2 == db._db.size('TorrentTracker'), db._db.size('TorrentTracker')-old_tracker_size
+        
+        sname = db.getOne('name', torrent_id=single_torrent_id)
+        assert sname == single_name, (sname,single_name)
+        mname = db.getOne('name', torrent_id=multiple_torrent_id)
+        assert mname == multiple_name, (mname,multiple_name)
+        
+        s_size = db.getOne('length', torrent_id=single_torrent_id)
+        assert s_size == 1583233, s_size
+        m_size = db.getOne('length', torrent_id=multiple_torrent_id)
+        assert m_size == 5358560, m_size
+        
+        cat = db.getOne('category_id', torrent_id=multiple_torrent_id)
+        assert cat == 8, cat    # other
+        sid = db._db.getOne('TorrentSource', 'source_id', name=src)
+        assert sid > 1
+        m_sid = db.getOne('source_id', torrent_id=multiple_torrent_id)
+        assert sid == m_sid
+        s_sid = db.getOne('source_id', torrent_id=single_torrent_id)
+        assert 1 == s_sid
+        s_status = db.getOne('status_id', torrent_id=single_torrent_id)
+        assert s_status == 0
+        
+        m_comment = db.getOne('comment', torrent_id=multiple_torrent_id)
+        comments = 'www.tribler.org'
+        assert m_comment.find(comments)>-1
+        comments = 'something not inside'
+        assert m_comment.find(comments)==-1
+                
+        m_trackers = db.getTracker(m_infohash, 0)    #db._db.getAll('TorrentTracker', 'tracker', 'torrent_id=%d'%multiple_torrent_id)
+        assert len(m_trackers) == 1
+        assert ('http://tpb.tracker.thepiratebay.org/announce',1) in m_trackers, m_trackers
+        
+        s_torrent = db.getTorrent(s_infohash)
+        m_torrent = db.getTorrent(m_infohash)
+        assert s_torrent['name'] == 'Tribler_4.1.7_src.zip', s_torrent['name']
+        assert m_torrent['name'] == 'Tribler_4.1.7_src', m_torrent['name']
+        assert m_torrent['last_check_time'] == 0
+        assert len(s_torrent) == 16
+        assert len(m_torrent) == 16 
+        
+    def updateTorrent(self):
+        db = TorrentDBHandler.getInstance()
+        
+        s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09')
+        m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
+        kw = {}
+        db.updateTorrent(m_infohash, relevance=3.1415926, category=['Videoclips'], 
+                         status='good', progress=23.5, seeder=123, leecher=321, 
+                         last_check_time=1234567, ignore_number=1, retry_number=2, 
+                         other_key1='abcd', other_key2=123)
+        multiple_torrent_id = db._db.getTorrentID(m_infohash)
+        res_r = db.getOne('relevance', torrent_id=multiple_torrent_id)
+        ### assert 3.1415926 == res_r
+        db.updateTorrentRelevance(m_infohash, 1.41421)
+        res_r = db.getOne('relevance', torrent_id=multiple_torrent_id)
+        ### assert 1.41421 == res_r
+        cid = db.getOne('category_id', torrent_id=multiple_torrent_id)
+        ### assert cid == 2, cid
+        sid = db.getOne('status_id', torrent_id=multiple_torrent_id)
+        assert sid == 1
+        p = db.mypref_db.getOne('progress', torrent_id=multiple_torrent_id)
+        assert p == None, p
+        seeder = db.getOne('num_seeders', torrent_id=multiple_torrent_id)
+        assert seeder == 123
+        leecher = db.getOne('num_leechers', torrent_id=multiple_torrent_id)
+        assert leecher == 321
+        last_check_time = db._db.getOne('TorrentTracker', 'last_check', announce_tier=1, torrent_id=multiple_torrent_id)
+        assert last_check_time == 1234567, last_check_time
+        ignore_number = db._db.getOne('TorrentTracker', 'ignored_times', announce_tier=1, torrent_id=multiple_torrent_id)
+        assert ignore_number == 1
+        retry_number = db._db.getOne('TorrentTracker', 'retried_times', announce_tier=1, torrent_id=multiple_torrent_id)
+        assert retry_number == 2
+                
+    def deleteTorrent(self):
+        db = TorrentDBHandler.getInstance()
+        db.torrent_dir = FILES_DIR
+        s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09')
+        m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
+        
+        assert db.deleteTorrent(s_infohash, delete_file=True)
+        assert db.deleteTorrent(m_infohash)
+
+        assert not db.hasTorrent(s_infohash)
+        assert not db.hasTorrent(m_infohash)
+        assert not os.path.isfile(S_TORRENT_PATH)
+        m_trackers = db.getTracker(m_infohash, 0)
+        assert len(m_trackers) == 0
+        
+        # fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        # 02/02/10 Boudewijn: infohashes must be 20 bytes long
+        fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00'
+        assert not db.deleteTorrent(fake_infoahsh)
+        
+        my_infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc='
+        my_infohash = str2bin(my_infohash_str_126)
+        assert not db.deleteTorrent(my_infohash)
+        
+    def singtest_getCollectedTorrentHashes(self):
+        db = TorrentDBHandler.getInstance()
+        res = db.getNumberCollectedTorrents()
+        assert res == 4848, res
+        
+    def singtest_freeSpace(self):
+        db = TorrentDBHandler.getInstance()
+        old_res = db.getNumberCollectedTorrents()
+        db.freeSpace(20)
+        res = db.getNumberCollectedTorrents()
+        assert old_res - res == 20
+        init()
+        
+        
+class TestMyPreferenceDBHandler(unittest.TestCase):
+    
+    def setUp(self):
+        db_path = TRIBLER_DB_PATH
+        db = SQLiteCacheDB.getInstance()
+        db.openDB(db_path, busytimeout=BUSYTIMEOUT)
+        mypref_db = MyPreferenceDBHandler.getInstance()
+        mypref_db.loadData()
+        
+    def tearDown(self):
+        SQLiteCacheDB.getInstance().close()
+    
+    def singtest_getPrefList(self):
+        db = MyPreferenceDBHandler.getInstance()
+        pl = db.getMyPrefListInfohash()
+        assert len(pl) == 12
+        
+    def singtest_getCreationTime(self):
+        db = MyPreferenceDBHandler.getInstance()
+        infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc='
+        infohash = str2bin(infohash_str_126)
+        ct = db.getCreationTime(infohash)
+        assert ct == 1194966300, ct
+        
+    def singtest_getRecentLivePrefList(self):
+        db = MyPreferenceDBHandler.getInstance()
+        pl = db.getRecentLivePrefList()
+        assert len(pl) == 11, (len(pl), pl)
+        infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc='
+        assert bin2str(pl[0]) == infohash_str_126
+        infohash_str_1279 = 'R+grUhp884MnFkt6NuLnnauZFsc='
+        assert bin2str(pl[1]) == infohash_str_1279
+        
+        pl = db.getRecentLivePrefList(8)
+        assert len(pl) == 8, (len(pl), pl)
+        assert bin2str(pl[0]) == infohash_str_126
+        assert bin2str(pl[1]) == infohash_str_1279
+
+    def singtest_hasMyPreference(self):
+        infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc='
+        infohash_str_1279 = 'R+grUhp884MnFkt6NuLnnauZFsc='
+        db = MyPreferenceDBHandler.getInstance()
+        assert db.hasMyPreference(str2bin(infohash_str_126))
+        assert db.hasMyPreference(str2bin(infohash_str_1279))
+        # fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02'
+        # 02/02/10 Boudewijn: infohashes must be 20 bytes long
+        fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00'
+        assert not db.hasMyPreference(fake_infoahsh)
+            
+    def singtest_addMyPreference_deletePreference(self):
+        db = MyPreferenceDBHandler.getInstance()
+        p = db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126)
+        torrent_id = p[0]
+        infohash = db._db.getInfohash(torrent_id)
+        destpath = p[1]
+        progress = p[2]
+        creation_time = p[3]
+        db.deletePreference(infohash)
+        pl = db.getMyPrefListInfohash()
+        assert len(pl) == 11
+        assert infohash not in pl
+
+        data = {'destination_path':destpath}
+        db.addMyPreference(infohash, data)
+        p2 = db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126)
+        assert p2[0] == p[0] and p2[1] == p[1] and p2[2] == 0 and time()-p2[3] < 10 , p2
+        
+        db.deletePreference(infohash)
+        pl = db.getMyPrefListInfohash()
+        assert len(pl) == 11
+        assert infohash not in pl
+
+        data = {'destination_path':destpath, 'progress':progress, 'creation_time':creation_time}
+        db.addMyPreference(infohash, data)
+        p3 = db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126)
+        assert p3 == p, p3
+        
+    def singtest_updateProgress(self):
+        infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc='
+        infohash = str2bin(infohash_str_126)
+        db = MyPreferenceDBHandler.getInstance()
+        assert db.hasMyPreference(infohash)
+        torrent_id = db._db.getTorrentID(infohash)
+        db.updateProgress(infohash, 3.14)
+        p = db.getOne('progress', torrent_id=torrent_id)
+        assert p == 3.14
+
+    def singtest_getMyPrefListInfohash(self):
+        db = MyPreferenceDBHandler.getInstance()
+        preflist = db.getMyPrefListInfohash()
+        for p in preflist:
+            assert len(p) == 20
+        assert len(preflist) == 12
+        
+    def singtest_getMyPrefStats(self):
+        db = MyPreferenceDBHandler.getInstance()
+        res = db.getMyPrefStats()
+        assert len(res)==12
+        for k in res:
+            data = res[k]
+            assert len(data) == 3
+
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 3:
+        print "Usage: python test_so.py <class name> <method name>"
+    else:
+        for class_ in unittest.TestCase.__subclasses__():
+            if class_.__name__ == sys.argv[1]:
+                init()
+                suite.addTest(class_(sys.argv[2]))
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()
+    
+    
diff --git a/instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.sh b/instrumentation/next-share/BaseLib/Test/test_sqlitecachedbhandler.sh
new file mode 100755 (executable)
index 0000000..78fa433
--- /dev/null
@@ -0,0 +1,58 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_size
+python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getOne
+python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getAll
+
+python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_get
+python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_put
+
+python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_setSuperPeer
+python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_addExternalSuperPeer
+
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_size
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_getFriends
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_setFriendState
+python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_addExternalFriend
+
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getList
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerSim
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerList
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeers
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_addPeer
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_aa_hasPeer
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_findPeers
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_deletePeer
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPermIDByIP
+python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_loadPeers
+
+python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_getPrefList
+python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPreference
+python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPeerPreferences
+
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtested_functions
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_count
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_loadTorrents
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_add_update_delete_Torrent
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_getCollectedTorrentHashes
+python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_freeSpace
+
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getPrefList
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getCreationTime
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getRecentLivePrefList
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_hasMyPreference
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_addMyPreference_deletePreference
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_updateProgress
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefListInfohash
+python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefStats
diff --git a/instrumentation/next-share/BaseLib/Test/test_status.py b/instrumentation/next-share/BaseLib/Test/test_status.py
new file mode 100644 (file)
index 0000000..e3f083d
--- /dev/null
@@ -0,0 +1,239 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+import unittest
+import threading
+
+import time
+
+from BaseLib.Core.Statistics.Status import Status
+from BaseLib.Core.Statistics.Status import LivingLabReporter
+
+class TestOnChangeStatusReporter(Status.OnChangeStatusReporter):
+    
+    name = None
+    value = None
+
+    def report(self, element):
+        self.name = element.name
+        self.value = element.value
+
+class TestPeriodicStatusReporter(Status.PeriodicStatusReporter):
+    last_value = None
+
+    def report(self):
+        elements = self.get_elements()
+        # Actually report
+        assert len(elements) == 1
+        self.last_value = elements[0].get_value()
+
+class StatusTest(unittest.TestCase):
+    """
+    Unit tests for the Status class
+
+    
+    """
+    
+    def setUp(self):
+        pass
+    def tearDown(self):
+        pass
+    
+    def testBasic(self):
+
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+        
+        self.assertNotEqual(status, None)
+
+        self.assertEquals(status.get_name(), "UnitTest")
+        
+    def testInt(self):
+        
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+        self.assertNotEqual(status, None)
+
+        i = status.create_status_element("TestInteger")
+        self.assertEquals(i.get_name(), "TestInteger")
+
+        x = status.get_status_element("TestInteger")
+        self.assertEquals(x, i)
+
+        # Test set and get values
+        for j in range(0,10):
+            i.set_value(j)
+            self.assertEquals(i.get_value(), j)
+
+        # Clean up
+        status.remove_status_element(i)
+        try:
+            status.get_status_element("TestInteger")
+            self.fail("Remove does not remove status element 'TestInteger'")
+        except Status.NoSuchElementException, e:
+            # Expected
+            pass
+
+    def testInvalid(self):
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+
+        try:
+            i = status.create_status_element(None)
+            self.fail("Does not throw exception with no name")
+        except AssertionError, e:
+            pass
+
+        try:
+            status.get_status_element(None)
+            self.fail("Invalid get_status_element does not throw exception")
+        except AssertionError,e:
+            pass
+
+        try:
+            status.remove_status_element(None)
+            self.fail("Invalid remove_status_element does not throw exception")
+        except AssertionError,e:
+            pass
+
+        elem = Status.StatusElement("name", "description")
+        try:
+            status.remove_status_element(elem)
+            self.fail("Invalid remove_status_element does not throw exception")
+        except Status.NoSuchElementException,e:
+            pass
+            
+        
+    def testPolicy_ON_CHANGE(self):
+
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+        reporter = TestOnChangeStatusReporter("On change")
+        status.add_reporter(reporter)
+        i = status.create_status_element("TestInteger")
+
+        for x in range(0, 10):
+            i.set_value(x)
+            if x != reporter.value:
+                self.fail("Callback does not work for ON_CHANGE policy")
+            if reporter.name != "TestInteger":
+                self.fail("On_Change callback get's the wrong parameter, got '%s', expected 'TestInteger'"%reporter.name)
+
+        # Clean up
+        status.remove_status_element(i)
+        
+
+    def testPolicy_PERIODIC(self):
+
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+
+        reporter = TestPeriodicStatusReporter("Periodic, 0.4sec", 0.4)
+        status.add_reporter(reporter)
+        i = status.create_status_element("TestInteger")
+
+        for x in range(0, 5):
+            i.set_value(x)
+            self.assertEquals(reporter.last_value, None) # Not updated yet
+            
+        time.sleep(1)
+        
+        assert reporter.last_value == 4
+
+        for x in range(5, 9):
+            self.assertEquals(reporter.last_value, 4) # Not updated yet
+            i.set_value(x)
+        time.sleep(1)
+
+        self.assertEquals(reporter.last_value, 8)
+
+        # Clean up
+        status.remove_status_element(i)
+
+        reporter.stop()
+
+    def test_LLReporter_element(self):
+
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+        reporter = TestLivingLabPeriodicReporter("Living lab test reporter", 1.0)
+        status.add_reporter(reporter)
+        i = status.create_status_element("TestInteger")
+        i.set_value(1233)
+
+        b = status.create_status_element("Binary")
+        b.set_value("".join([chr(n) for n in range(0, 255)]))
+        
+        reporter.wait_for_post(5.0)
+
+        reporter.stop()
+        time.sleep(1)
+        
+        self.assertEquals(len(reporter.get_errors()), 0)
+
+        status.remove_status_element(i)
+        status.remove_status_element(b)
+
+    def test_LLReporter_event(self):
+
+        status = Status.get_status_holder("UnitTest")
+        status.reset()
+        reporter = TestLivingLabPeriodicReporter("Living lab test reporter", 1.0)
+        status.add_reporter(reporter)
+        event = status.create_event("SomeEvent")
+        event.add_value("123")
+        event.add_value("456")
+        status.add_event(event)
+        
+        reporter.wait_for_post(5.0)
+
+        reporter.stop()
+        time.sleep(1)
+        
+        self.assertEquals(len(reporter.get_errors()), 0)
+
+        status.remove_event(event)
+
+class TestLivingLabPeriodicReporter(LivingLabReporter.LivingLabPeriodicReporter):
+
+    def __init__(self, name, report_time):
+        self.errors = []
+        LivingLabReporter.LivingLabPeriodicReporter.__init__(self, name, report_time, "myid", self.count_errors)
+        self.xml = None
+        self.cond = threading.Condition()
+
+    def wait_for_post(self, timeout):
+        self.cond.acquire()
+        try:
+            if self.xml:
+                return True
+        
+            self.cond.wait(timeout)
+            if self.xml:
+                return True
+            raise Exception("Timeout")
+        finally:
+            self.cond.release()
+            
+
+    def post(self, xml):
+        # TODO: Check the XML?
+        print xml
+        self.xml = xml
+        self.cond.acquire()
+        self.cond.notifyAll()
+        self.cond.release()
+
+    def count_errors(self, zero, error):
+        print "ERROR",error
+        self.errors.append(error)
+
+    def get_errors(self):
+        return self.errors
+
+if __name__ == "__main__":
+
+    print "Testing Status module"
+    
+    unittest.main()
+    
+    print "All done"
diff --git a/instrumentation/next-share/BaseLib/Test/test_subtitles.bat b/instrumentation/next-share/BaseLib/Test/test_subtitles.bat
new file mode 100644 (file)
index 0000000..b8e0907
--- /dev/null
@@ -0,0 +1,8 @@
+\r
+set PYTHONPATH=..\..\r
+\r
+python test_subtitles_isolation.py\r
+python test_channelcast_plus_subtitles.py singtest_plain_nickname\r
+python test_channelcast_plus_subtitles.py singtest_unicode_nickname\r
+python test_subtitles_msgs.py singtest_subs_messages\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_subtitles.sh b/instrumentation/next-share/BaseLib/Test/test_subtitles.sh
new file mode 100755 (executable)
index 0000000..1d45092
--- /dev/null
@@ -0,0 +1,8 @@
+
+export PYTHONPATH=../..:"$PYTHONPATH"
+
+python test_subtitles_isolation.py
+python test_channelcast_plus_subtitles.py singtest_plain_nickname
+python test_channelcast_plus_subtitles.py singtest_unicode_nickname
+python test_subtitles_msgs.py singtest_subs_messages
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_subtitles_isolation.py b/instrumentation/next-share/BaseLib/Test/test_subtitles_isolation.py
new file mode 100644 (file)
index 0000000..439c52a
--- /dev/null
@@ -0,0 +1,46 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from BaseLib.Test.Core.CacheDB import test_MetadataDBHandler
+from BaseLib.Test.Core.Subtitles.MetadataDomainObjects import test_Langugages,test_Subtitle, test_MetadataDTO
+from BaseLib.Test.Core.Subtitles import test_SubtitlesHandler
+from BaseLib.Test.Core.Subtitles.SubtitleHandler import test_DiskManager,test_SubtitleMsgHandlerIsolation
+import unittest
+from BaseLib.Test.Core.Subtitles import test_RichMetadataInterceptor
+from BaseLib.Test.Core.CacheDB import SimpleMetadataDB
+from BaseLib.Test.Core.Subtitles import simple_mocks
+import os.path
+import sys
+
+testModules = (
+                test_Langugages,
+                test_MetadataDTO,
+                test_MetadataDBHandler,
+                test_Subtitle,
+                test_SubtitlesHandler,
+                test_DiskManager,
+                test_SubtitleMsgHandlerIsolation,
+                test_RichMetadataInterceptor
+                )
+testSuites = list()
+
+RES_DIR_NAME = 'subtitles_test_res'
+RES_DIR = os.path.join('.',RES_DIR_NAME)
+
+def _initSuite():
+    simple_mocks.RES_DIR = RES_DIR
+    SimpleMetadataDB.RES_DIR = RES_DIR
+    for module in testModules:
+        module.RES_DIR = RES_DIR
+        testSuites.append(module.suite())
+        
+def suite():
+    _initSuite()
+    return unittest.TestSuite(testSuites)
+
+if __name__ == '__main__':
+    #set the resources dir relative to the position from where the script is launched
+    pathRelativeToScript = os.path.dirname(sys.argv[0])
+    RES_DIR = os.path.join(pathRelativeToScript,RES_DIR_NAME)
+    unittest.TextTestRunner().run(suite())
+    
diff --git a/instrumentation/next-share/BaseLib/Test/test_subtitles_msgs.py b/instrumentation/next-share/BaseLib/Test/test_subtitles_msgs.py
new file mode 100644 (file)
index 0000000..7443ad8
--- /dev/null
@@ -0,0 +1,343 @@
+# Written by Andrea Reale
+# see LICENSE.txt for license information
+
+from __future__ import with_statement
+from BaseLib.Test.test_as_server import TestAsServer
+import tempfile
+from Tribler import LIBRARYNAME
+import os
+import sys
+import shutil
+import hashlib
+from BaseLib.Core.BitTornado.BT1.MessageID import GET_SUBS, SUBS
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.Overlay.permid import generate_keypair
+from BaseLib.Core.simpledefs import NTFY_RICH_METADATA
+from BaseLib.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
+from BaseLib.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
+from traceback import print_exc
+from BaseLib.Core.Subtitles.MetadataDomainObjects.Languages import LanguagesProvider
+from BaseLib.Core.Utilities import utilities
+from BaseLib.Core.Subtitles.SubtitlesHandler import SubtitlesHandler
+import BaseLib.Core.Subtitles.SubtitlesHandler as SubUtils
+from BaseLib.Test.olconn import OLConnection
+from BaseLib.Core.BuddyCast.buddycast import BuddyCastCore
+from BaseLib.Core.BuddyCast.channelcast import ChannelCastCore
+from BaseLib.Core.BuddyCast.votecast import VoteCastCore
+import codecs
+import unittest
+
+DEBUG = False
+RES_DIR = os.path.join('.', 'subtitles_test_res')
+
+class TestSubtitleMessages(TestAsServer):
+    
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        self.config.set_buddycast(True)
+        BuddyCastCore.TESTASSERVER = True
+        ChannelCastCore.TESTASSERVER = True
+        VoteCastCore.TESTASSERVER = True
+        self.config.set_start_recommender(True)
+        self.config.set_bartercast(True) 
+        self.config.set_remote_query(True)
+        self.config.set_crawler(False)       
+        self.config.set_torrent_collecting_dir(os.path.join(self.config_path, "tmp_torrent_collecting"))
+        
+        self.collecting_dir = os.path.join(self.config_path, "temp_subtitles_collecting")
+        os.makedirs(self.collecting_dir)
+        self.config.set_subtitles_collecting(True)
+        self.config.set_subtitles_collecting_dir(self.collecting_dir)
+        
+        
+
+#        # Write superpeers.txt and DB schema
+        self.install_path = tempfile.mkdtemp()
+        spdir = os.path.join(self.install_path, LIBRARYNAME, 'Core')
+        os.makedirs(spdir)
+
+        statsdir = os.path.join(self.install_path, LIBRARYNAME, 'Core', 'Statistics')
+        os.makedirs(statsdir)
+        
+        superpeerfilename = os.path.join(spdir, 'superpeer.txt')
+        print >> sys.stderr,"test: writing empty superpeers to",superpeerfilename
+        f = open(superpeerfilename, "w")
+        f.write('# Leeg')
+        f.close()
+
+        self.config.set_install_dir(self.install_path)
+        
+        srcfiles = []
+        srcfiles.append(os.path.join(LIBRARYNAME,"schema_sdb_v5.sql"))
+        for srcfile in srcfiles:
+            sfn = os.path.join('..','..',srcfile)
+            dfn = os.path.join(self.install_path,srcfile)
+            print >>sys.stderr,"test: copying",sfn,dfn
+            shutil.copyfile(sfn,dfn)
+            
+        #copy subtitles files in the appropriate subtitles folder
+        self.src1 = os.path.join(RES_DIR,'fake.srt')
+        self.src2 = os.path.join(RES_DIR,'fake0.srt')
+        
+
+            
+            
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der())
+        
+        self.another_keypair = generate_keypair()
+        self.anotherpermid = str(self.another_keypair.pub().get_der())
+        
+        self.testInfohash = hashlib.sha1("yoman!").digest()
+        
+        #copy subtitles in the collecting dir
+        nldName = SubUtils.getSubtitleFileRelativeName(self.anotherpermid, self.testInfohash, "nld")
+        engName = SubUtils.getSubtitleFileRelativeName(self.anotherpermid, self.testInfohash, "eng")
+        
+        self.sub1 = os.path.join(self.collecting_dir, nldName)
+        self.sub2 = os.path.join(self.collecting_dir, engName)
+        
+        shutil.copyfile(self.src1, self.sub1)
+        # Let's say that the receiving peer has only the nld subtitle
+        # avialable locally 
+        # shutil.copyfile(self.src2, self.sub2)
+        
+    
+    def setUpDB(self):
+        try:
+            self.richMetadata_db = self.session.open_dbhandler(NTFY_RICH_METADATA)
+
+            #add some metadata
+            self.mdto = MetadataDTO(self.anotherpermid, self.testInfohash)
+            subtitle1 = SubtitleInfo("nld", self.sub1)
+            subtitle1.computeChecksum()
+            subtitle2 = SubtitleInfo("eng", os.path.join(RES_DIR, "fake0.srt"))
+            subtitle2.computeChecksum()
+            self.mdto.addSubtitle(subtitle1)
+            self.mdto.addSubtitle(subtitle2)
+            
+            self.mdto.sign(self.another_keypair)
+            
+            self.richMetadata_db.insertMetadata(self.mdto)
+            
+            
+            
+            #hisoermid has the nld subtitle but doesn't have the english one
+            self.richMetadata_db.updateSubtitlePath(self.mdto.channel,self.mdto.infohash,"eng",None)
+            
+        except:
+            print_exc()
+        
+        
+    def tearDown(self):
+        TestAsServer.tearDown(self)
+        self.session.close_dbhandler(self.richMetadata_db)
+        
+    
+        
+    def subtest_receptionOfSUBS(self):
+        '''
+        Asking for the single available subtitle. The response should be 
+        a valid SUBS message containing its contents
+        '''
+        
+        print >> sys.stderr, "test: test_subtitles_msgs_1_1 -----------------------"
+        ol_conn = OLConnection(self.my_keypair,'localhost',self.hisport)
+        
+        bitmask = LanguagesProvider.getLanguagesInstance().langCodesToMask(['nld'])
+        binmask = utilities.uintToBinaryString(bitmask, length=4)
+        
+        request = GET_SUBS + \
+                      bencode((
+                              self.anotherpermid,
+                              self.testInfohash,
+                              binmask
+                              ))
+                      
+        subshandler = SubtitlesHandler()
+        subshandler.register(ol_conn, self.richMetadata_db, self.session)
+        
+        ol_conn.send(request)
+        subs_data = ol_conn.recv()
+        print >> sys.stderr, "test: subtitles_messages : received SUBS response: len",len(subs_data)
+        self.assertEquals(SUBS, subs_data[0])
+        data = bdecode(subs_data[1:])
+        print >> sys.stderr, "test: subtitles_messages : received SUBS response: ", data
+        
+        #check on the format of the response
+        self.assertTrue(isinstance(data,list))
+        self.assertEquals(4, len(data)) # for fields
+        self.assertEquals(self.mdto.channel,data[0])
+        self.assertEquals(self.mdto.infohash, data[1])
+        self.assertEquals(binmask, data[2])
+        self.assertTrue(isinstance(data[3],list))
+        self.assertEquals(1, len(data[3]))
+        with codecs.open(self.sub1, "rb", "utf-8") as sub:
+            expectedContents = sub.read()
+        self.assertEquals(expectedContents, data[3][0])
+        
+        ol_conn.close()
+        
+        print >> sys.stderr, "test: subtitles_messages: received content is valid."
+        print >> sys.stderr, "End of test_subtitles_msgs_1_1 test --------------------"
+        
+        
+    def subtest_receptionOfSUBSTwoRequestsOneAvailable(self):
+        """
+        Asking for two subtitles while the recipent of the request has only one.
+        The response should contain only the one available subtitle content,
+        plus a bitmask that reflects the contents of the response.
+        """
+        
+        print >> sys.stderr, "test: test_subtitles_msgs_2_1 -----------------------"
+        ol_conn = OLConnection(self.my_keypair,'localhost',self.hisport)
+        
+        bitmask = LanguagesProvider.getLanguagesInstance().langCodesToMask(['nld','eng'])
+        binmask = utilities.uintToBinaryString(bitmask, length=4)
+        
+        request = GET_SUBS + \
+                      bencode((
+                              self.anotherpermid,
+                              self.testInfohash,
+                              binmask
+                              ))
+                      
+        subshandler = SubtitlesHandler()
+        subshandler.register(ol_conn, self.richMetadata_db, self.session)
+        
+        ol_conn.send(request)
+        subs_data = ol_conn.recv()
+        self.assertEquals(SUBS, subs_data[0])
+        data = bdecode(subs_data[1:])
+        print >> sys.stderr, "test: subtitles_messages : received SUBS repsonse: ", data
+        
+        #check on the format of the response
+        self.assertTrue(isinstance(data,list))
+        self.assertEquals(4, len(data)) # for fields
+        self.assertEquals(self.mdto.channel,data[0])
+        self.assertEquals(self.mdto.infohash, data[1])
+        
+        #the receiver had only one of the two requested subtitles
+        # so I expect a different bitmask
+        bitmask = LanguagesProvider.getLanguagesInstance().langCodesToMask(['nld'])
+        expectedBinarymask = utilities.uintToBinaryString(bitmask, length=4)
+        
+        self.assertEquals(expectedBinarymask, data[2])
+        self.assertTrue(isinstance(data[3],list))
+        self.assertEquals(1, len(data[3]))
+        with codecs.open(self.sub1, "rb", "utf-8") as sub:
+            expectedContents = sub.read()
+        self.assertEquals(expectedContents, data[3][0])
+        
+        ol_conn.close()
+        print >> sys.stderr, "test: subtitles_messages: received content is valid."
+        print >> sys.stderr, "End of test_subtitles_msgs_2_1 test --------------------"
+        
+    def subtest_invalidRequest1(self):
+        """
+        Trying to send an empty message.
+        The connection should be closed by the receiver
+        """
+        print >> sys.stderr, "test: test_subtitles_msgs_invalid_request_1 ------------------"
+        ol_conn = OLConnection(self.my_keypair,'localhost',self.hisport)
+
+        
+        request = GET_SUBS + \
+                    bencode({})
+        
+        ol_conn.send(request)
+        self.assertEquals(0, len(ol_conn.recv()))
+        print >> sys.stderr, "test: test_subtitles_msgs_invalid_request_1: connection closed as expected"
+        
+        ol_conn.close()
+        print >> sys.stderr, "End of test_subtitles_msgs_invalid_request_1 ------------------"
+        
+    def subtest_invalidRequest2(self):
+        """
+        Trying to send an invalid message (an integer instead of a 4 bytes binary string)
+        The connection should be closed by the receiver
+        """
+        print >> sys.stderr, "test: test_subtitles_msgs_invalid_request_2 ------------------"
+        ol_conn = OLConnection(self.my_keypair,'localhost',self.hisport)
+
+        
+        request = GET_SUBS + \
+                      bencode((
+                              self.anotherpermid,
+                              self.testInfohash,
+                              42
+                              ))
+        
+        ol_conn.send(request)
+        self.assertEquals(0, len(ol_conn.recv()))
+        print >> sys.stderr, "test: test_subtitles_msgs_invalid_request_2: connection closed as expected"
+        
+        ol_conn.close()
+        print >> sys.stderr, "End of test_subtitles_msgs_invalid_request_2 ------------------"
+        
+    def subtest_invalidRequest3(self):
+        """
+        Trying to send an invalid message (valid for everythin except that there is one field more)
+        The connection should be closed by the receiver
+        """
+        print >> sys.stderr, "test: test_subtitles_msgs_invalid_request_3 ------------------"
+        ol_conn = OLConnection(self.my_keypair,'localhost',self.hisport)
+
+        bitmask = LanguagesProvider.getLanguagesInstance().langCodesToMask(['nld','eng'])
+        binmask = utilities.uintToBinaryString(bitmask, length=4)
+        
+        request = GET_SUBS + \
+                      bencode((
+                              self.anotherpermid,
+                              self.testInfohash,
+                              binmask,
+                              42
+                              ))
+        
+        ol_conn.send(request)
+        self.assertEquals(0, len(ol_conn.recv()))
+        print >> sys.stderr, "test: test_subtitles_msgs_invalid_request_3: connection closed as expected"
+        
+        ol_conn.close()
+        print >> sys.stderr, "End of test_subtitles_msgs_invalid_request_3 ------------------"
+        
+    def singtest_subs_messages(self):
+        self.setUpDB()
+        
+        self.subtest_receptionOfSUBS()
+        self.subtest_receptionOfSUBSTwoRequestsOneAvailable()
+        self.subtest_invalidRequest1()
+        self.subtest_invalidRequest2()
+        self.subtest_invalidRequest3()
+        
+        #testMethods = [getattr(self, method) for method in dir(self) if method.startswith('subtest')]
+        
+        #for m in testMethods:
+         #   m()
+        
+        
+def test_suite():
+    suite = unittest.TestSuite()
+    # We should run the tests in a separate Python interpreter to prevent 
+    # problems with our singleton classes, e.g. PeerDB, etc.
+    if len(sys.argv) != 2:
+        print "Usage: python test_channelcast_plus_subtitles.py <method name>"
+    else:
+        suite.addTest(TestSubtitleMessages(sys.argv[1]))
+    
+    return suite
+
+def main():
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])
+
+if __name__ == "__main__":
+    main()       
+
+        
+        
+        
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/test_superpeers.py b/instrumentation/next-share/BaseLib/Test/test_superpeers.py
new file mode 100644 (file)
index 0000000..a56466c
--- /dev/null
@@ -0,0 +1,87 @@
+import os, sys
+import tempfile
+import unittest
+
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB,CURRENT_MAIN_DB_VERSION
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import SuperPeerDBHandler, PeerDBHandler
+
+CREATE_SQL_FILE = os.path.join('..',"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql")
+assert os.path.isfile(CREATE_SQL_FILE)
+    
+import BaseLib.Core.CacheDB.sqlitecachedb
+print >>sys.stderr,"TEST: ENABLE DBUPGRADE HACK"
+BaseLib.Core.CacheDB.sqlitecachedb.TEST_SQLITECACHEDB_UPGRADE = True
+
+
+lines = [
+'superpeer1.das2.ewi.tudelft.nl, 7001, MG0CAQEEHR/bQNvwga7Ury5+8vg/DTGgmMpGCz35Zs/2iz7coAcGBSuBBAAaoUADPgAEAL2I5yVc1+dWVEx3nbriRKJmOSlQePZ9LU7yYQoGABMvU1uGHvqnT9t+53eaCGziV12MZ1g2p0GLmZP9, superpeer1@TUD\n',
+'superpeer1.das2.ewi.tudelft.nl, 7004, MG0CAQEEHVPNzNfHzGgIIrpUyNC1NYQpaoeNov0jovmEuwtCoAcGBSuBBAAaoUADPgIEAZNX5NBOuGH4j2kumv/9WkPLrJPVkOr5oVImhcp8AC7w7ww9eZwUF7S/Q96If4UmVX+L6HMKSOTLPoPk, superpeer2@TUD\n',
+'superpeer1.das2.ewi.tudelft.nl, 7003, MG0CAQEEHWDBJrkzilKmoOBWZHu19gaabapqJIAeSLhffluLoAcGBSuBBAAaoUADPgAEAQaLGR940aKktbAJNm6vYOTSN2P8z1P9EiQ48kJNAdrDl7oBkyrERZOq+IMMKIpu4ocsz5hxZHMTy2Fh, superpeer3@TUD\n',
+]
+
+class TestSuperPeerList(unittest.TestCase):
+    
+    def setUp(self):
+        self.file_path = tempfile.mktemp()
+        self.db_path = tempfile.mktemp()
+        
+        #print >>sys.stderr,"test: file_path",self.file_path
+        #print >>sys.stderr,"test: db_path",self.db_path
+        
+        self.writeSuperPeers()
+        head,tail = os.path.split(self.file_path)
+        self.config = {'install_dir':head, 'superpeer_file':tail}
+
+        self.db = SQLiteCacheDB.getInstance()
+        self.db.initDB(self.db_path, CREATE_SQL_FILE, check_version=False)
+        self.splist = SuperPeerDBHandler.getInstance()
+        
+#        cur = SQLiteCacheDB.getCursor()
+#        print cur, cur.connection
+        
+    def tearDown(self):
+        self.db.close(clean=True)
+        for path in [self.file_path, self.db_path]:
+            try:
+                os.remove(path)
+            except Exception, msg:
+                pass
+
+    def writeSuperPeers(self):
+        tf = open(self.file_path, "w")
+        tf.writelines(lines)
+        tf.close()
+            
+    def test_readSuperPeerList(self):
+        res = self.splist.readSuperPeerList(self.file_path)
+        assert len(res) == 3, len(res)
+
+    def test_loadSuperPeer(self):
+        """ The SuperPeerDBHandler constructor writes the superpeers to the PeerDB """
+        
+        self.splist.loadSuperPeers(self.config, True)
+        assert self.splist.size() == 3, self.splist.size()
+        
+        self.peer_db = PeerDBHandler.getInstance()
+        # Arno: must be 3, as there is a duplicate PermID in the lines list
+        assert self.peer_db.size() == 3, self.peer_db.size()
+        
+    def test_getSuperPeers(self):
+        self.splist.loadSuperPeers(self.config, True)
+        superpeers = self.splist.getSuperPeers()
+        assert len(superpeers) == 3, superpeers
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestSuperPeerList))
+    
+    return suite
+
+        
+def main():
+    unittest.main(defaultTest='test_suite')
+
+    
+if __name__ == '__main__':
+    main()        
diff --git a/instrumentation/next-share/BaseLib/Test/test_threadpool.py b/instrumentation/next-share/BaseLib/Test/test_threadpool.py
new file mode 100644 (file)
index 0000000..89542a7
--- /dev/null
@@ -0,0 +1,173 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+import unittest\r
+\r
+import sys\r
+import time\r
+from traceback import print_exc\r
+from threading import RLock\r
+\r
+from BaseLib.Core.APIImplementation.ThreadPool import ThreadPool\r
+\r
+\r
+DEBUG=False\r
+\r
+class TestThreadPool(unittest.TestCase):\r
+    """ \r
+    Parent class for testing internal thread pool of Tribler\r
+    """\r
+    \r
+    def setUp(self):\r
+        """ unittest test setup code """\r
+        self.tp = ThreadPool(10)\r
+        self.exp = []\r
+        self.gotlock = RLock()\r
+        self.got = []\r
+\r
+    def tearDown(self):\r
+        """ unittest test tear down code """\r
+        time.sleep(2)\r
+        self.got.sort()\r
+        self.assertEquals(self.exp,self.got)\r
+    \r
+    def test_queueTask1(self):\r
+        print >>sys.stderr,"test_queueTask1:"\r
+        self.exp = [1]\r
+        self.tp.queueTask(lambda:self.do_task(1))\r
+        \r
+    def do_task(self,val):\r
+        self.gotlock.acquire()\r
+        print >>sys.stderr,"test: got task",val\r
+        self.got.append(val)\r
+        self.gotlock.release()\r
+        \r
+    def test_queueTask10lambda(self):\r
+        print >>sys.stderr,"test_queueTask10lambda:"\r
+        self.exp = range(1,11)\r
+        def wrapper(x):\r
+            self.tp.queueTask(lambda:self.do_task(x))\r
+                          \r
+        for i in range(1,11):\r
+            print >>sys.stderr,"test: exp task",i\r
+            wrapper(i)\r
+\r
+    #\r
+    # Confusing lambda crap, do explicit:\r
+    #\r
+    def test_queueTask10explicit(self):\r
+        print >>sys.stderr,"test_queueTask10explicit:"\r
+        self.exp = range(1,11)\r
+        self.tp.queueTask(self.do_task1)\r
+        self.tp.queueTask(self.do_task2)\r
+        self.tp.queueTask(self.do_task3)\r
+        self.tp.queueTask(self.do_task4)\r
+        self.tp.queueTask(self.do_task5)\r
+        self.tp.queueTask(self.do_task6)\r
+        self.tp.queueTask(self.do_task7)\r
+        self.tp.queueTask(self.do_task8)\r
+        self.tp.queueTask(self.do_task9)\r
+        self.tp.queueTask(self.do_task10)\r
+\r
+\r
+    def test_joinAll(self):\r
+        print >>sys.stderr,"test_joinall:"\r
+        self.exp = range(1,6)\r
+        print >>sys.stderr,"test: adding tasks"\r
+        self.tp.queueTask(self.do_task1)\r
+        self.tp.queueTask(self.do_task2)\r
+        self.tp.queueTask(self.do_task3)\r
+        self.tp.queueTask(self.do_task4)\r
+        self.tp.queueTask(self.do_task5)\r
+        print >>sys.stderr,"test: join all"\r
+        self.tp.joinAll()\r
+        print >>sys.stderr,"test: adding post tasks, shouldn't get run"\r
+        self.tp.queueTask(self.do_task6)\r
+        self.tp.queueTask(self.do_task7)\r
+        self.tp.queueTask(self.do_task8)\r
+        self.tp.queueTask(self.do_task9)\r
+        self.tp.queueTask(self.do_task10)\r
+\r
+    def test_setThreadCountPlus10(self):\r
+        print >>sys.stderr,"test_setThreadCountPlus10:"\r
+        print >>sys.stderr,"test: pre threads",self.tp.getThreadCount()\r
+        self.tp.setThreadCount(20)\r
+        print >>sys.stderr,"test: post threads",self.tp.getThreadCount()\r
+        time.sleep(1)\r
+        self.test_joinAll()\r
+\r
+    def test_setThreadCountMinus8(self):\r
+        print >>sys.stderr,"test_setThreadCountMinus8:"\r
+        print >>sys.stderr,"test: pre threads",self.tp.getThreadCount()\r
+        self.tp.setThreadCount(2)\r
+        print >>sys.stderr,"test: post threads",self.tp.getThreadCount()\r
+        time.sleep(1)\r
+        self.test_joinAll()\r
+\r
+\r
+    def do_task1(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(1)\r
+        self.gotlock.release()\r
+\r
+    def do_task2(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(2)\r
+        self.gotlock.release()\r
+\r
+    def do_task3(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(3)\r
+        self.gotlock.release()\r
+\r
+    def do_task4(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(4)\r
+        self.gotlock.release()\r
+\r
+    def do_task5(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(5)\r
+        self.gotlock.release()\r
+\r
+    def do_task6(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(6)\r
+        self.gotlock.release()\r
+\r
+    def do_task7(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(7)\r
+        self.gotlock.release()\r
+\r
+    def do_task8(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(8)\r
+        self.gotlock.release()\r
+\r
+    def do_task9(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(9)\r
+        self.gotlock.release()\r
+\r
+    def do_task10(self):\r
+        self.gotlock.acquire()\r
+        self.got.append(10)\r
+        self.gotlock.release()\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    suite.addTest(unittest.makeSuite(TestThreadPool))\r
+    \r
+    return suite\r
+\r
+if __name__ == "__main__":\r
+    unittest.main()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_torrentcollecting.py b/instrumentation/next-share/BaseLib/Test/test_torrentcollecting.py
new file mode 100644 (file)
index 0000000..36328bc
--- /dev/null
@@ -0,0 +1,93 @@
+import os
+import sys
+import unittest
+
+from BaseLib.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, str2bin, CURRENT_MAIN_DB_VERSION
+from BaseLib.Core.CacheDB.SqliteCacheDBHandler import PreferenceDBHandler, MyPreferenceDBHandler
+from BaseLib.Core.BuddyCast.TorrentCollecting import SimpleTorrentCollecting
+from bak_tribler_sdb import *
+    
+CREATE_SQL_FILE = os.path.join('..',"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql"))
+assert os.path.isfile(CREATE_SQL_FILE)
+
+def init():
+    init_bak_tribler_sdb()
+
+
+SQLiteCacheDB.DEBUG = False
+
+class TestTorrentCollecting(unittest.TestCase):
+        
+    def setUp(self):
+        self.db = SQLiteCacheDB.getInstance()
+        self.db.initDB(TRIBLER_DB_PATH_BACKUP)
+        
+        permid = {}
+        permid[3127] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAcPezgQ13k1MSOaUrCPisWRhYuNT7Tm+q5rUgHFvAWd9b+BcSut6TCniEgHYHDnQ6TH/vxQBqtY8Loag'
+        permid[994] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAJUNmwvDaigRaM4cj7cE2O7lessqnnFEQsan7df9AZS8xeNmVsP/XXVrEt4t7e2TNicYmjn34st/sx2P'
+        permid[19] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAJv2YLuIWa4QEdOEs4CPRxQZDwZphKd/xK/tgbcALG198nNdT10znJ2sZYl+OJIvj7YfYp75PrrnWNX'
+        permid[5] = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAB0XbUrw5b8CrTrMZST1SPyrzjgSzIE6ynALtlZASGAb+figVXRRGpKW6MSal3KnEm1/q0P3JPWrhCE'
+        self.permid = permid
+        
+        db = MyPreferenceDBHandler.getInstance()
+        db.loadData()
+        
+    def tearDown(self):
+        self.db.close()
+    
+    def test_selecteTorrentToCollect(self):
+        db = PreferenceDBHandler.getInstance()
+        tc = SimpleTorrentCollecting(None,None)
+        truth = {3127:235, 994:20, 19:1, 5:0}
+        
+        for pid in truth:
+            pl = db.getPrefList(str2bin(self.permid[pid]))
+            assert len(pl) == truth[pid], [pid, len(pl)]
+            # test random selection
+            infohash = tc.selecteTorrentToCollect(pl, True)    
+            if pid == 994 or pid == 3127:
+                assert len(infohash) == 20, infohash
+            else:
+                assert infohash is None, infohash
+        
+        #tc.updateAllCooccurrence()
+        for pid in truth:
+            pl = db.getPrefList(str2bin(self.permid[pid]))
+            assert len(pl) == truth[pid], [pid, len(pl)]
+            # test selecting most relevant torrent
+            infohash = tc.selecteTorrentToCollect(pl, False)    
+            if pid == 994:
+                tid = tc.torrent_db.getTorrentID(infohash)
+                assert tid == 8979
+                
+                permid = self.permid[pid]
+                infohash = tc.updatePreferences(permid, pl)
+                tid = tc.torrent_db.getTorrentID(infohash)
+                assert tid == 8979
+            elif pid == 3127:
+                tid = tc.torrent_db.getTorrentID(infohash)
+                assert tid == 9170
+                
+                permid = self.permid[pid]
+                infohash = tc.updatePreferences(permid, pl)
+                tid = tc.torrent_db.getTorrentID(infohash)
+                assert tid == 9170
+            else:
+                assert infohash is None, infohash
+                
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestTorrentCollecting))
+    
+    return suite
+        
+def main():
+    init()
+    unittest.main(defaultTest='test_suite')
+
+    
+if __name__ == '__main__':
+    main()    
+            
+                    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/test_tracker_checking.py b/instrumentation/next-share/BaseLib/Test/test_tracker_checking.py
new file mode 100644 (file)
index 0000000..bbcbae7
--- /dev/null
@@ -0,0 +1,24 @@
+# Written by Yuan Yuan, Jie Yang
+# see LICENSE.txt for license information
+
+from BaseLib.TrackerChecking.TorrentChecking import TorrentChecking
+from time import sleep
+
+
+
+
+def run():
+    print "start run"
+#    torrent_db = TorrentDBHandler.getInstance()
+#    key = ['infohash', 'torrent_name', 'torrent_dir', 'relevance', 'info', 
+#                'num_owners', 'leecher', 'seeder', 'category']
+#    data = torrent_db.getRecommendedTorrents(key)
+#
+#    for idata in data[54:100]:
+#        trackerChecking(idata)
+    for x in range(1000):        
+        t = TorrentChecking()
+        t.start()
+        sleep(2)
+
+run()
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Test/test_upnp.py b/instrumentation/next-share/BaseLib/Test/test_upnp.py
new file mode 100644 (file)
index 0000000..e2e71d4
--- /dev/null
@@ -0,0 +1,160 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""
+This unittest runs the UPnPServer and the UPnPClient in a single thread.
+The UPnPServer is extended with a couple of simple services.
+The tests then revolve arount using the UPnPClient to discover and
+interact with the services defined by the UPnPServer.
+"""
+import time
+import unittest
+import threading
+
+from BaseLib.UPnP.common.taskrunner import TaskRunner
+from BaseLib.UPnP.upnpserver.upnpserver import UPnPServer
+from BaseLib.UPnP.upnpclient.upnpclient import UPnPClient
+from BaseLib.UPnP.services import SwitchPower, URLService
+from BaseLib.UPnP import SERVER_PRODUCT_NAME
+from BaseLib.UPnP import SERVER_ROOT_DEVICE_CONFIG
+
+##############################################
+# EVENT RECEIVER
+##############################################
+
+class _EventReceiver:
+    """
+    Dummy Event receiver that always holds the last event
+    it received.
+    """
+    def __init__(self):
+        self._args = (None, None, None)
+    def handler(self, *args):
+        """Invoked when event is delivered."""
+        self._args = args
+    def get_last_event(self):
+        """Get the state of the last event."""
+        return self._args
+
+
+##############################################
+# UPNP TEST CASE
+##############################################
+class UPnPTestCase(unittest.TestCase):
+
+    """TestCase for UPnPServer and UPnPClient."""
+
+    def setUp(self):
+        """Set up test environment."""
+        self._tr = TaskRunner()
+        self.server = UPnPServer(self._tr, 
+                            SERVER_PRODUCT_NAME,
+                            SERVER_ROOT_DEVICE_CONFIG)
+
+        self.service1 = SwitchPower("SwitchPower")
+        self.service2 = URLService("URLService")
+        self.server.add_service(self.service1)
+        self.server.add_service(self.service2)
+
+        self.client = UPnPClient(self._tr)
+        self.thread = threading.Thread(target=self._tr.run_forever)
+        self.thread.start()
+        time.sleep(0.1) # wait for client to discover server
+
+    def tearDown(self):
+        """Clean up test environment."""
+        self.client.close()
+        self.server.close()
+        self._tr.stop()
+        self.thread.join()
+
+    def test_device_discovery(self):
+        """
+        Test if UPnPClient is able to discover the UPnPDevice
+        hosted by UPnPServer.
+        """
+        root = self.server.get_root_device()
+        self.assertTrue( root.uuid in self.client.get_device_uuids())
+
+    def test_switchpower(self):
+        """Test discovery and use of SwitchPowerService."""
+
+        stub = self.client.get_services_by_short_id("SwitchPower")[0]
+
+        # Test service_id
+        self.assertEqual(self.service1.get_service_id(), 
+                         stub.get_service_id())
+        # Test service_type
+        self.assertEqual(self.service1.get_service_type(), 
+                         stub.get_service_type())
+
+        # Subscribe
+        evr = _EventReceiver()
+        stub.subscribe(evr.handler)
+
+        # Update SwitchPowerService by using Stub.
+        value = stub.GetStatus()
+        stub.SetTarget(not value)
+        new_value = stub.GetStatus()
+        self.assertEqual(not value, new_value, 
+                         evr.get_last_event()[2])
+
+        # Update SwitchPowerService directly.
+        self.service1.set_target(not new_value)
+        time.sleep(0.1) # wait for notification
+        self.assertEqual(not new_value, evr.get_last_event()[2])
+
+        # Unsubscribe
+        stub.unsubscribe(evr.handler)
+
+    def test_urlservice(self):
+        """Test discovery and use of URLService."""
+        stub = self.client.get_services_by_short_id("URLService")[0]
+        
+        # Subscribe
+        evr = _EventReceiver()
+        stub.subscribe(evr.handler)
+   
+        # Update URLService by using Stub.
+        stub.GetURL()
+        new_url = "http://p2p-next.org"
+        stub.SetURL(new_url)
+        service_url = self.service2.get_url()
+        url = stub.GetURL()
+        self.assertEqual(new_url, service_url, url)
+        self.assertEqual(url, evr.get_last_event()[2])
+
+        # Update URLService directly
+        url2 = "http://itek.norut.no"
+        self.service2.set_url(url2)
+        time.sleep(0.1) # wait for notification
+        self.assertEqual(url2, evr.get_last_event()[2])
+
+        # Unsubscribe
+        stub.unsubscribe(evr.handler)
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == "__main__":
+    unittest.main()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    
diff --git a/instrumentation/next-share/BaseLib/Test/test_url.py b/instrumentation/next-share/BaseLib/Test/test_url.py
new file mode 100644 (file)
index 0000000..02cf2ed
--- /dev/null
@@ -0,0 +1,263 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# * URL 2 TorrentDef
+# 
+#  - missing fields
+#  - malformed fields
+#    - bad syntax
+#    - bad length
+#
+# * TorrentDef 2 URL
+#  - Creates right URL from params
+#
+# Move to API dir?
+#
+
+import unittest
+import sys
+import os
+import tempfile
+import shutil
+from traceback import print_exc
+
+from BaseLib.Core.API import *
+
+
+DEBUG=False
+
+class TestP2PURLs(unittest.TestCase):
+    """ 
+    Testing P2P URLs version 0    
+    """
+    
+    def setUp(self):
+        pass
+        
+    def tearDown(self):
+        pass
+
+    def test_url_syntax(self):
+        """
+        tribe://127.2.3.42:7764/announce?SjaakCam.mpegts&k=MHowDQYJKoZIhvcNAQEBBQADaQAwZgJhAN0Khlp5ZhWC7VfLynCkKts71b8h8tZXH87PkDtJUTJaX_SS1Cddxkv63PRmKOvtAHhkTLSsWOZbSeHkOlPIq_FGg2aDLDJ05g3lQ-8mSmo05ff4SLqNUTShWO2CR2TPhQIBAw&l=HCAAAA&s=15&a=RSA&b=AAIAAA
+        tribe://127.1.0.10:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg
+        """
+
+        badurllist = []
+        
+        badurllist += [("ribe://127.1.0.10:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "wrong scheme")]
+        badurllist += [("tribe//127.1.0.10:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "no colon after scheme")] 
+        #badurllist += [("tribe://127.1.0.10:6969/announce?trai ler.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "space not escaped")] # too strict
+        #badurllist += [("tribe://localhost;10/?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "bad port spec")] # too strict
+        badurllist += [("tribe://localhost:https/?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "port not int")]
+        badurllist += [("tribe://localhost/trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "not query")]
+        if sys.platform != "win32":
+            badurllist += [("tribe://localhost?tr\xfeiler.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "char in name not URL escaped")]
+        badurllist += [("tribe://localhost?Sjaak&", "query with empty key=value")]
+        badurllist += [("tribe://localhost?trailer.mkv&r:TTgcifG0Ot7STCY2JL8SUOxROFo&l:AKK35A&s=15&b:AAFnGg", "key value not separated by =")]
+        badurllist += [("tribe://localhost?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b:AAFnGg", "some key value not separated by =")]
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=", "query with malformed key value")]
+
+
+        # IPv6 addresses
+        badurllist += [("tribe://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg","unclosed IPv6 literal address")]
+        badurllist += [("tribe://FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg","unopened IPv6 literal address")]        
+        badurllist += [("tribe://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg","unclosed IPv6 literal address, no port")]
+        badurllist += [("tribe://FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg","unopened IPv6 literal address, no port")]        
+
+        self.run_badurllist(badurllist)
+
+
+    def test_missing(self):
+        badurllist = []
+        badurllist += [("tribe:/", "missing all fields")]
+        badurllist += [("tribe://", "missing authority")]
+        badurllist += [("tribe://localhost", "missing query fields")]
+        badurllist += [("tribe://localhost?", "empty query")]
+        badurllist += [("tribe://localhost?Sjaak", "query just name")]
+        badurllist += [("tribe://localhost?n=Sjaak", "query just name")]
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo", "query with just valid root hash")] 
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A", "query with missing piece size+bitrate")]
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15", "query with missing bitrate")]
+
+        # live
+        badurllist += [("tribe://127.2.3.42:7764/announce?SjaakCam.mpegts&k=MHowDQYJKoZIhvcNAQEBBQADaQAwZgJhAN0Khlp5ZhWC7VfLynCkKts71b8h8tZXH87PkDtJUTJaX_SS1Cddxkv63PRmKOvtAHhkTLSsWOZbSeHkOlPIq_FGg2aDLDJ05g3lQ-8mSmo05ff4SLqNUTShWO2CR2TPhQIBAw&l=HCAAAA&s=15&b=AAIAAA", "query with missing live auth method")]
+
+        
+        self.run_badurllist(badurllist)
+
+    def test_encoding(self):
+        badurllist = []
+        badurllist += [("tribe://localhost?Sjaak&r=\xd3]\xb7\xe3\x9e\xbb\xf3\xdd5\xdb~9\xeb\xbf=\xd3]\xb7\xe3\x9e&l=AKK35A&s=15&b=AAFnGg", "query with non-BASE64URL encoded root hash")]
+        badurllist += [("tribe://127.1.0.10:6969/announce?trailer.mkv&r=TTgcifG0Ot7ST!Y2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg", "query with invalid BASE64URL encoded root hash, contains !")]
+        badurllist += [("tribe://127.1.0.10:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo=&l=AKK35A&s=15&b=AAFnGg", "query with invalid BASE64URL encoded root hash, contains = padding")]
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=1234&s=15&b=AAFnGg", "query with non-encoded length")]
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=1234&b=AAFnGg", "query with non-encoded piece size")]
+        badurllist += [("tribe://localhost?Sjaak&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=1234", "query with non-encoded bitrate")]
+
+        # live
+        badurllist += [("tribe://127.2.3.42:7764/announce?SjaakCam.mpegts&k=MHowDQYJKoZIhvc!AQEBBQADaQAwZgJhAN0Khlp5ZhWC7VfLynCkKts71b8h8tZXH87PkDtJUTJaX_SS1Cddxkv63PRmKOvtAHhkTLSsWOZbSeHkOlPIq_FGg2aDLDJ05g3lQ-8mSmo05ff4SLqNUTShWO2CR2TPhQIBAw&l=HCAAAA&s=15&a=RSA&b=AAIAAA", "query with invalid BASE64URL encoded live public key, contains !")]
+        
+        self.run_badurllist(badurllist)
+
+    def run_badurllist(self,badurllist):
+        
+        #print >>sys.stderr,badurllist
+        
+        for url,problem in badurllist:
+            try:
+                print >>sys.stderr,"\n\nTest",problem
+                tdef = TorrentDef.load_from_url(url)
+                self.assert_(False,"Should not have accepted URL: "+problem)
+            except AssertionError,e:
+                raise e
+            except:
+                print_exc()
+                self.assert_(True)
+        
+        
+    def test_create_vod(self):
+        
+        paramlist = []
+        paramlist += [('Sjaak',134349,2 ** 15, "4:01")]
+        paramlist += [('Sjaak',1343490,2 ** 15, "1:04:01")] # long duration
+        paramlist += [('Sjaak Harry',134349,2 ** 15, "4:01")] # space in name
+        paramlist += [(u'Serg\u00e9Harr\u014c',134349,2 ** 15, "4:01")] # Unicode name
+        paramlist += [(u'\u4f60\u597d',134349,2 ** 15, "4:01")] # Unicode name, Ni Hao ;o)
+        
+        self.run_paramlist_vod(paramlist,"http://127.0.0.1/announce")
+        #self.run_paramlist_vod(paramlist,"http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]/announce")
+        
+    def run_paramlist_vod(self,paramlist,tracker):
+        tmpdirname = tempfile.mkdtemp()
+        
+        for name,leng,piecesize,duration in paramlist:
+            
+            
+            print >>sys.stderr,"\n\nTest",`name`
+            tmpfilename = os.path.join(tmpdirname,name)
+        
+            content = '*' * leng
+            f = open(tmpfilename,"wb")
+            f.write(content)
+            f.close()
+            
+            tdef = TorrentDef()
+            tdef.add_content(tmpfilename,playtime=duration)
+            tdef.set_tracker(tracker)
+            tdef.set_piece_length(piecesize)
+            tdef.set_create_merkle_torrent(True)
+            # Arno, 2009-10-02: Explicitly set encoding to UTF-8. Default on
+            # Win32 is 'mbcs'. Python cannot properly encode this, 
+            # u'\u4f60\u597d.ts' becomes '??.ts' (literally, ? = char(63))
+            #
+            tdef.set_encoding('UTF-8')
+            tdef.set_url_compat(True)
+            tdef.finalize()
+            print >>sys.stderr,"URL",tdef.get_url()
+            
+            tdef2 = TorrentDef.load_from_url(tdef.get_url())
+            
+            if isinstance(name,unicode):
+                utf8name = name.encode("UTF-8")
+            else:
+                utf8name = name
+                
+            #print >>sys.stderr,"ORIG NAME",`utf8name`
+            #print >>sys.stderr,"TDEF NAME",`tdef2.get_name()`
+                
+            self.assertEqual(tdef2.get_name(),utf8name)
+            self.assertEqual(tdef2.get_length(),leng)
+            self.assertEqual(tdef2.get_piece_length(),piecesize)
+            tbitrate = tdef2.get_bitrate()
+            s = dur2s(duration)
+            ebitrate = leng/s
+            self.assertEqual(tbitrate,ebitrate)
+             
+        try:           
+            shutil.rmtree(tmpdirname)
+        except:
+            print_exc()
+        
+
+    def test_create_live(self):
+        
+        paramlist = []
+        #paramlist += [('Sjaak.ts',2 ** 15, 2 ** 16, "1:00:00", None)]
+        paramlist += [('Sjaak.ts',2 ** 15, 2 ** 16, "1:00:00", RSALiveSourceAuthConfig())]
+        paramlist += [('Sjaak.ts',2 ** 16, 2 ** 20, "1:00:00", RSALiveSourceAuthConfig())] # high bitrate
+        paramlist += [('Sjaak.ts',2 ** 15, 2 ** 16, "0:15", RSALiveSourceAuthConfig())] # small duration = window
+        paramlist += [('Sjaak.ts',2 ** 15, 2 ** 16, "1:00:00", ECDSALiveSourceAuthConfig())] # ECDSA auth
+        paramlist += [('Sjaak Harry.ts',2 ** 15, 2 ** 16, "1:00:00", RSALiveSourceAuthConfig())] # space in name
+        paramlist += [(u'Serg\u00e9Harr\u014c.ts',2 ** 15, 2 ** 16, "1:00:00", RSALiveSourceAuthConfig())] # Unicode name
+        paramlist += [(u'\u4f60\u597d.ts',2 ** 15, 2 ** 16, "1:00:00", RSALiveSourceAuthConfig())] # Unicode name, Ni Hao ;o)
+        
+        self.run_paramlist_live(paramlist,"http://127.0.0.1/announce")
+        self.run_paramlist_live(paramlist,"http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]/announce")
+
+    def run_paramlist_live(self,paramlist,tracker):
+        tmpdirname = tempfile.mkdtemp()
+        
+        for name,piecesize,bitrate,duration,authcfg in paramlist:
+            
+            print >>sys.stderr,"\n\nTest",`name`
+            
+            tdef = TorrentDef()
+            tdef.create_live(name,bitrate,playtime=duration,authconfig=authcfg)
+            tdef.set_tracker(tracker)
+            # Arno, 2009-10-02: Explicitly set encoding to UTF-8. Default on
+            # Win32 is 'mbcs'. Python cannot properly encode this, 
+            # u'\u4f60\u597d.ts' becomes '??.ts' (literally, ? = char(63))
+            #
+            tdef.set_encoding('UTF-8')
+            tdef.set_piece_length(piecesize)
+            tdef.set_url_compat(True)
+            
+            print >>sys.stderr,"Test: BEFORE FINALIZE"
+            tdef.finalize()
+            print >>sys.stderr,"Test: AFTER FINALIZE"
+            url = tdef.get_url()
+            print >>sys.stderr,"URL",url
+            print >>sys.stderr,"Test: AFTER GET URL"
+            
+            tdef2 = TorrentDef.load_from_url(tdef.get_url())
+            
+            if isinstance(name,unicode):
+                utf8name = name.encode("UTF-8")
+            else:
+                utf8name = name
+            self.assertEqual(tdef2.get_name(),utf8name)
+            
+            leng = dur2s(duration) * bitrate
+            self.assertEqual(tdef2.get_length(),leng)
+            self.assertEqual(tdef2.get_piece_length(),piecesize)
+            self.assertEqual(tdef2.get_bitrate(),bitrate)
+            
+            self.assertEquals(tdef2.get_live_pubkey(),authcfg.get_pubkey())
+                        
+        try:           
+            shutil.rmtree(tmpdirname)
+        except:
+            print_exc()
+
+
+
+def dur2s(dur):
+    """ [hh]mm:ss -> seconds """
+    elems = dur.split(":")
+    s = 0
+    for i in range(0,len(elems)):
+        num = int(elems[i])
+        t = num * int(pow(60.0,len(elems)-i-1))
+        s += t
+    return s
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestP2PURLs))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_url_metadata.py b/instrumentation/next-share/BaseLib/Test/test_url_metadata.py
new file mode 100644 (file)
index 0000000..2e766d7
--- /dev/null
@@ -0,0 +1,167 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+from traceback import print_exc
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.simpledefs import P2PURL_SCHEME,NTFY_TORRENTS,URL_MIME_TYPE
+from BaseLib.Core.BitTornado.BT1.MessageID import getMessageName,GET_METADATA,METADATA
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.CacheDB.CacheDBHandler import TorrentDBHandler
+
+# TODO: use reimplementations
+from BaseLib.Core.APIImplementation.makeurl import p2purl_decode_base64url,p2purl_decode_nnumber,p2purl_decode_piecelength 
+
+
+DEBUG=True
+
+class TestURLMetadata(TestAsServer):
+    """ 
+    Testing download helping
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(5)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+
+    def setUpPreSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPreSession(self)
+        
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        self.mypermid = str(self.my_keypair.pub().get_der())
+        self.hispermid = str(self.his_keypair.pub().get_der()) 
+
+        # Create URL compat torrents and save in Torrent database.
+        self.tdef1 = TorrentDef.load_from_url(P2PURL_SCHEME+'://127.2.3.42:7764/announce?SjaakCam.mpegts&k=MHowDQYJKoZIhvcNAQEBBQADaQAwZgJhAN0Khlp5ZhWC7VfLynCkKts71b8h8tZXH87PkDtJUTJaX_SS1Cddxkv63PRmKOvtAHhkTLSsWOZbSeHkOlPIq_FGg2aDLDJ05g3lQ-8mSmo05ff4SLqNUTShWO2CR2TPhQIBAw&l=HCAAAA&s=15&a=RSA&b=AAIAAA')
+        self.torrentfn1 = os.path.join(self.session.get_torrent_collecting_dir(),"live.torrent")
+        self.tdef1.save(self.torrentfn1)
+
+        self.tdef2 = TorrentDef.load_from_url(P2PURL_SCHEME+'://127.1.0.10:6969/announce?trailer.mkv&r=TTgcifG0Ot7STCY2JL8SUOxROFo&l=AKK35A&s=15&b=AAFnGg')
+        self.torrentfn2 = os.path.join(self.session.get_torrent_collecting_dir(),"vod.torrent")
+        self.tdef2.save(self.torrentfn2)
+
+        self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
+        extra_info = {'status':'good', 'filename':self.torrentfn1}
+        self.torrent_db.addExternalTorrent(self.tdef1, source='',extra_info=extra_info)
+        extra_info = {'status':'good', 'filename':self.torrentfn2}
+        self.torrent_db.addExternalTorrent(self.tdef2, source='',extra_info=extra_info)
+         
+        
+    def tearDown(self):
+        """ override TestAsServer """
+        print >> sys.stderr,"test: *** TEARDOWN"
+        TestAsServer.tearDown(self)
+
+    #
+    # Good GET_METADATA for url-compat torrent
+    #
+    def test_good_get_metadata_url(self):
+
+        # 1. Establish overlay connection to Tribler
+        s = OLConnection(self.my_keypair,'localhost',self.hisport)
+
+        for tdef in [self.tdef1,self.tdef2]:
+            msg = self.create_good_get_metadata(tdef.get_infohash())
+            s.send(msg)
+        
+            try:
+                s.b.s.settimeout(10.0)
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print >>sys.stderr,"test: Got reply",getMessageName(resp[0])
+                self.assert_(resp[0] == METADATA)
+                self.check_metadata(resp[1:],tdef)
+
+            except socket.timeout:
+                print >> sys.stderr,"test: Timeout, bad, peer didn't reply with METADATA message"
+                self.assert_(False)
+
+        s.close()
+
+    def create_good_get_metadata(self,infohash):
+        bd = bencode(infohash)
+        return GET_METADATA+bd
+
+    def check_metadata(self,bdata,tdef):
+        data = bdecode(bdata)
+        # selversion >= OLPROTO_VER_ELEVENTH:
+        for key in ['torrent_hash','metatype','metadata','last_check_time','status','leecher','seeder']:
+            self.assert_(key in data)
+            
+        self.assertEqual(data['metatype'],URL_MIME_TYPE)
+        self.assertEqual(data['torrent_hash'],tdef.get_infohash())
+            
+        url = data['metadata']
+        cidx = url.find(':')
+        self.assert_(cidx != -1)
+        scheme = url[0:cidx]
+        if url[cidx+1] == '/':
+            # hierarchical URL
+            qidx = url.find('?')
+            self.assert_(qidx != -1)
+            tracker = "http"+url[cidx:qidx]
+        else:
+            # Not yet supported by TorrentDef
+            tracker = None 
+            qidx = cidx+1
+            
+        query = url[qidx+1:]
+        kvs = query.split('&')
+        pt = {}
+        for kv in kvs:
+            if not '=' in kv:
+                k = 'n'
+                v = kv
+            else:
+                (k,v) = kv.split('=')
+                if k == 'l': #length
+                    v = p2purl_decode_nnumber(v)
+                elif k == 's': # piece size
+                    v = p2purl_decode_piecelength(v)
+                elif k == 'r': # root hash
+                    v = p2purl_decode_base64url(v)
+                elif k == 'k': # live key
+                    v = p2purl_decode_base64url(v)
+                elif k == 'a': # live auth method
+                    pass
+                elif k == 'b': # bitrate
+                    v = p2purl_decode_nnumber(v)
+            pt[k] = v
+            
+        # Compare:
+        self.assertEqual(P2PURL_SCHEME,scheme)
+        self.assertEqual(tdef.get_tracker(),tracker)
+        self.assertEqual(tdef.get_name(),pt['n'])
+        self.assertEqual(tdef.get_length(),pt['l'])
+        self.assertEqual(tdef.get_piece_length(),pt['s'])
+        if 'r' in pt:
+            self.assertEqual(tdef.get_infohash(),pt['r'])
+        else:
+            self.assertEqual(tdef.get_live_pubkey(),pt['k'])
+            self.assertEqual(tdef.get_live_authmethod(),pt['a'])
+        self.assertEqual(tdef.get_bitrate(),pt['b'])
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestURLMetadata))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/instrumentation/next-share/BaseLib/Test/test_ut_pex.py b/instrumentation/next-share/BaseLib/Test/test_ut_pex.py
new file mode 100644 (file)
index 0000000..c76293b
--- /dev/null
@@ -0,0 +1,433 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import unittest
+import os
+import sys
+import time
+import socket
+from traceback import print_exc
+from types import DictType,StringType,IntType
+
+from BaseLib.Test.test_as_server import TestAsServer
+from olconn import OLConnection
+from btconn import BTConnection
+from BaseLib.Core.TorrentDef import TorrentDef
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig
+from BaseLib.Core.BitTornado.bencode import bencode,bdecode
+from BaseLib.Core.BitTornado.__init__ import TRIBLER_PEERID_LETTER
+from BaseLib.Core.BitTornado.BT1.MessageID import *
+from BaseLib.Core.DownloadConfig import *
+
+DEBUG=True
+
+class TestUTorrentPeerExchange(TestAsServer):
+    """ 
+    Testing EXTEND uTorrent Peer Exchange message:
+
+    See BitTornado/BT1/Connecter.py and Tribler/DecentralizedTracking/ut_pex.py
+    """
+
+    def setUp(self):
+        """ override TestAsServer """
+        TestAsServer.setUp(self)
+        print >>sys.stderr,"test: Giving MyLaunchMany time to startup"
+        time.sleep(3)
+        print >>sys.stderr,"test: MyLaunchMany should have started up"
+    
+    def setUpPostSession(self):
+        """ override TestAsServer """
+        TestAsServer.setUpPostSession(self)
+
+        # Let Tribler start downloading an non-functioning torrent, so
+        # we can talk to a normal download engine.
+        
+        self.torrentfn = os.path.join('extend_hs_dir','dummydata.merkle.torrent')
+        tdef = TorrentDef.load(self.torrentfn)
+
+        dscfg = DownloadStartupConfig()
+        dscfg.set_dest_dir(self.config_path)
+        
+        self.session.start_download(tdef,dscfg)
+        
+        # This is the infohash of the torrent in test/extend_hs_dir
+        self.infohash = '\xccg\x07\xe2\x9e!]\x16\xae{\xb8\x10?\xf9\xa5\xf9\x07\xfdBk'
+        self.mylistenport = 4810
+
+    def test_all(self):
+        """ 
+            I want to start a Tribler client once and then connect to
+            it many times. So there must be only one test method
+            to prevent setUp() from creating a new client every time.
+
+            The code is constructed so unittest will show the name of the
+            (sub)test where the error occured in the traceback it prints.
+        """
+
+        # myid needs to identify the connection as Tribler in order to
+        # get the 'same' bit added.f
+        myid = TRIBLER_PEERID_LETTER + "".join(map(chr, range(19)))
+
+        # Create a fake other client, so the EXTEND ut_pex won't be empty
+        msg2 = self.create_good_nontribler_extend_hs(listenport=4321)
+        s2 = BTConnection('localhost',self.hisport,mylistenport=4321,user_option_pattern='\x00\x00\x00\x00\x00\x10\x00\x00',user_infohash=self.infohash,myid=myid)
+        s2.read_handshake_medium_rare()
+        s2.send(msg2)
+        self.subtest_good_nontribler_ut_pex()
+        self.subtest_good_nontribler_ut_pex_diff_id()
+        self.subtest_good_tribler_ut_pex()
+        self.subtest_bad_ut_pex()
+
+        # now we add a second non-tribler peer. this peer should also
+        # be in the pex message but should not have the 'same' bit set
+        myid = "X" + "".join(map(chr, range(19)))
+        msg3 = self.create_good_nontribler_extend_hs(listenport=4322)
+        s3 = BTConnection('localhost',self.hisport,mylistenport=4322,user_option_pattern='\x00\x00\x00\x00\x00\x10\x00\x00',user_infohash=self.infohash,myid=myid)
+        s3.read_handshake_medium_rare()
+        s3.send(msg3)
+        self.subtest_good_nontribler_ut_pex_same_and_nonsame()
+
+    #
+    # Good ut_pex message
+    #
+    def subtest_good_nontribler_ut_pex(self):
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'
+        self._test_good(self.create_good_nontribler_extend_hs,options=options,infohash=self.infohash,pex_id=1)
+
+    def subtest_good_nontribler_ut_pex_diff_id(self):
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'
+        self._test_good(self.create_good_nontribler_extend_hs,options=options,infohash=self.infohash,pex_id=134)
+
+    def subtest_good_tribler_ut_pex(self):
+        self._test_good(self.create_good_tribler_extend_hs,infohash=self.infohash)
+        
+        # We've said we're a Tribler peer, and we initiated the connection, so 
+        # now *we* should now try to establish an overlay-swarm connection.
+        s = OLConnection(self.my_keypair,'localhost',self.hisport,mylistenport=self.mylistenport)
+        # the connection should be intact, so this should not throw an
+        # exception:
+        time.sleep(5)
+        s.send('bla')
+        s.close()
+
+    def subtest_good_nontribler_ut_pex_same_and_nonsame(self):
+        self._test_good(self.create_good_tribler_extend_hs,infohash=self.infohash,connections={"tribler":1,"non-tribler":1})
+
+    def _test_good(self,msg_gen_func,options=None,infohash=None,pex_id=1,connections={"tribler":1,"non-tribler":0}):
+        if options is None and infohash is None:
+            s = BTConnection('localhost',self.hisport)
+        elif options is None:
+            s = BTConnection('localhost',self.hisport,user_infohash=infohash)
+        elif infohash is None:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options)
+        else:
+            s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=infohash)
+            
+        if DEBUG:
+            print "test: Creating test HS message",msg_gen_func,"pex_id",pex_id
+        msg = msg_gen_func(pex_id=pex_id)
+        s.send(msg)
+        s.read_handshake_medium_rare()
+
+        # Send our ut_pex message to Tribler
+        msg = self.create_good_ut_pex()
+        s.send(msg)
+        
+        time.sleep(5)
+
+        # Tribler should send an EXTEND HS message back
+        try:
+            s.s.settimeout(10.0)
+            resp = s.recv()
+            self.assert_(len(resp) > 0)
+            self.assert_(resp[0] == EXTEND)
+            self.check_tribler_extend_hs(resp[1:])
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with EXTEND HS message"
+            self.assert_(False)
+
+        # Tribler should send an ut_pex message after a while
+        print "test: Setting 60 second timeout to see if Tribler sends periodic ut_pex"
+        try:
+            s.s.settimeout(70.0)
+            while True:
+                resp = s.recv()
+                self.assert_(len(resp) > 0)
+                print "test: Tribler returns",getMessageName(resp[0])
+                if resp[0] == EXTEND:
+                    self.check_ut_pex(resp[1:],pex_id,connections)
+                    s.close()
+                    break
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't reply with EXTEND ut_pex message"
+            self.assert_(False)
+
+        
+
+    def create_good_nontribler_extend_hs(self,listenport=None,pex_id=1):
+        d = {}
+        d['m'] = {'ut_pex':pex_id, 'dag':255 }
+        if listenport is None:
+            d['p'] = self.mylistenport
+        else:
+            d['p'] = listenport
+        d['v'] = 'TestSweet 1.2.3.4'
+        d['e'] = 0
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def create_good_tribler_extend_hs(self,pex_id=1):
+        d = {}
+        d['m'] = {'Tr_OVERLAYSWARM':253,'ut_pex':pex_id}
+        d['p'] = self.mylistenport
+        d['v'] = 'Tribler 3.5.1'
+        d['e'] = 0
+        bd = bencode(d)
+        return EXTEND+chr(0)+bd
+
+    def check_tribler_extend_hs(self,data):
+        self.assert_(data[0] == chr(0))
+        d = bdecode(data[1:])
+        self.assert_(type(d) == DictType)
+        self.assert_('m' in d.keys())
+        m = d['m']
+        self.assert_(type(m) == DictType)
+        self.assert_('Tr_OVERLAYSWARM' in m.keys())
+        val = m['Tr_OVERLAYSWARM']
+        self.assert_(type(val) == IntType)
+        self.assert_(val == 253)
+        self.assert_('ut_pex' in m.keys())
+        val = m['ut_pex']
+        self.assert_(type(val) == IntType)
+        self.assert_(val == 1)
+
+    def create_good_ut_pex(self,pex_id=1):
+        d = {}
+        d['added'] = ''
+        d['added.f'] = ''
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(pex_id)+bd
+
+    def check_ut_pex(self,data,pex_id,connections):
+        self.assert_(data[0] == chr(pex_id))
+        d = bdecode(data[1:])
+        
+        print >>sys.stderr,"test: d is",`d`,"pex_id",pex_id
+        
+        self.assert_(type(d) == DictType)
+        self.assert_('added' in d.keys())
+        cp = d['added']
+        apeers = self.check_compact_peers(cp)
+        
+        print >>sys.stderr,"test: apeers is",apeers
+
+        self.assert_('added.f' in d.keys())
+        addedf = d['added.f']
+        print "test: Length of added.f",len(addedf)
+        self.assert_(type(addedf) == StringType)
+        self.assert_(len(apeers) == len(addedf))
+        self.assert_('dropped' in d.keys())
+        cp = d['dropped']
+        self.check_compact_peers(cp)
+
+        
+        # Check that the fake client(s) we created are included
+        connections_in_pex = sum(connections.values())
+        self.assert_(len(apeers) == connections_in_pex)
+        self.assert_(apeers[0][1] in (4321, 4322))
+
+        # 03/09/09 Boudewijn: the bit 0x04 is reserved to indicate
+        # that the peer is of the 'same' client type as sender. both
+        # are Tribler peers so the 'same' bit must be set
+        tribler_peers = 0
+        non_tribler_peers = 0
+        for b in addedf:
+            if ord(b) & 4:
+                tribler_peers += 1
+            else:
+                non_tribler_peers += 1
+        self.assert_(tribler_peers == connections["tribler"])
+        self.assert_(non_tribler_peers == connections["non-tribler"])
+
+    def check_compact_peers(self,cp):
+        self.assert_(type(cp) == StringType)
+        self.assert_(len(cp) % 6 == 0)
+        peers = []
+        for x in xrange(0, len(cp), 6):
+            ip = '.'.join([str(ord(i)) for i in cp[x:x+4]])
+            port = (ord(cp[x+4]) << 8) | ord(cp[x+5])
+            peers.append((ip, port))
+        #print "test: Got compact peers",peers
+        return peers
+
+    #
+    # Bad EXTEND handshake message
+    #    
+    def subtest_bad_ut_pex(self):
+        methods = [self.create_empty,
+            self.create_ext_id_not_byte,
+            self.create_not_bdecodable,
+            self.create_not_dict1,
+            self.create_not_dict2,
+
+            # not testing for bad and missing keys because some known
+            # clients act this way. Therefore our code is not as
+            # strict anymore.
+            # self.create_bad_keys,
+            # self.create_added_missing,
+            # self.create_added_f_missing,
+            # self.create_dropped_missing,
+                   
+            self.create_added_not_str,
+            self.create_added_f_not_str,
+            self.create_dropped_not_str,
+            self.create_added_too_small,
+            self.create_added_f_too_big,
+            self.create_dropped_too_small]
+
+        for m in methods:
+            self._test_bad(m)
+
+    #
+    # Main test code for bad EXTEND ut_pex messages
+    #
+    def _test_bad(self,gen_drequest_func):
+        options = '\x00\x00\x00\x00\x00\x10\x00\x00'
+        s = BTConnection('localhost',self.hisport,user_option_pattern=options,user_infohash=self.infohash)
+        print >> sys.stderr,"\ntest: ",gen_drequest_func
+        
+        hsmsg = self.create_good_nontribler_extend_hs()
+        s.send(hsmsg)
+        
+        msg = gen_drequest_func()
+        s.send(msg)
+        time.sleep(5)
+        
+        # the other side should not like this and close the connection
+        try:
+            s.s.settimeout(10.0)
+            s.read_handshake_medium_rare(close_ok = True)
+            while True:
+                resp = s.recv()
+                if len(resp) > 0:
+                    print >>sys.stderr,"test: Got",getMessageName(resp[0]),"from peer"
+                    self.assert_(resp[0] == EXTEND or resp[0]==UNCHOKE)
+                else:
+                    self.assert_(len(resp)==0)
+                    s.close()
+                    break
+        except socket.timeout:
+            print >> sys.stderr,"test: Timeout, bad, peer didn't close connection"
+            self.assert_(False)
+
+    #
+    # Bad message creators
+    # 
+    def create_empty(self):
+        return EXTEND+chr(1)
+
+    def create_ext_id_not_byte(self):
+        return EXTEND+'Hallo kijkbuiskinderen'
+    
+    def create_not_bdecodable(self):
+        return EXTEND+chr(1)+"bla"
+
+    def create_not_dict1(self):
+        d = 481
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_not_dict2(self):
+        d = []
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_bad_keys(self):
+        d = {}
+        d['bla1'] = ''
+        d['bla2'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+        
+    def create_added_missing(self):
+        d = {}
+        d['added.f'] = ''
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+        
+    def create_added_f_missing(self):
+        d = {}
+        d['added'] = ''
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_dropped_missing(self):
+        d = {}
+        d['added'] = ''
+        d['added.f'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_added_not_str(self):
+        d = {}
+        d['added'] = 481
+        d['added.f'] = ''
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_added_f_not_str(self):
+        d = {}
+        d['added'] = ''
+        d['added.f'] = 481
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_dropped_not_str(self):
+        d = {}
+        d['added'] = ''
+        d['added.f'] = ''
+        d['dropped'] = 481
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_added_too_small(self):
+        d = {}
+        d['added'] = '\x82\x25\xc1\x40\x00' # should be 6 bytes
+        d['added.f'] = ''
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_added_f_too_big(self):
+        d = {}
+        d['added'] = ''
+        d['added.f'] = '\x00'
+        d['dropped'] = ''
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+    def create_dropped_too_small(self):
+        d = {}        
+        d['added'] = ''
+        d['added.f'] = ''
+        d['dropped'] = '\x82\x25\xc1\x40\x00' # should be 6 bytes
+        bd = bencode(d)
+        return EXTEND+chr(1)+bd
+
+
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(TestUTorrentPeerExchange))
+    
+    return suite
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/instrumentation/next-share/BaseLib/Test/test_video_server.py b/instrumentation/next-share/BaseLib/Test/test_video_server.py
new file mode 100644 (file)
index 0000000..b0a29d7
--- /dev/null
@@ -0,0 +1,204 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+import unittest\r
+\r
+import os\r
+import sys\r
+import time\r
+import socket\r
+from traceback import print_exc,print_stack\r
+\r
+from BaseLib.Video.VideoServer import VideoHTTPServer\r
+\r
+\r
+DEBUG=False\r
+\r
+class TestVideoHTTPServer(unittest.TestCase):\r
+    """ \r
+    Class for testing HTTP-based video server.\r
+    \r
+    Mainly HTTP range queries.\r
+    """\r
+    \r
+    def setUp(self):\r
+        """ unittest test setup code """\r
+        self.port = 6789\r
+        self.serv = VideoHTTPServer(self.port)\r
+        self.serv.background_serve()\r
+        self.serv.register(self.videoservthread_error_callback,self.videoservthread_set_status_callback)\r
+        \r
+        self.sourcefn = os.path.join("API","file.wmv") # 82KB or 82948 bytes\r
+        self.sourcesize = os.path.getsize(self.sourcefn)\r
+         \r
+    def tearDown(self):\r
+        """ unittest test tear down code """\r
+        print >>sys.stderr,"test: Tear down, sleeping 10 s"\r
+        time.sleep(10)\r
+    \r
+    def videoservthread_error_callback(self,e,url):\r
+        """ Called by HTTP serving thread """\r
+        print >>sys.stderr,"test: ERROR",e,url\r
+        self.assert_(False)\r
+        \r
+    def videoservthread_set_status_callback(self,status):\r
+        """ Called by HTTP serving thread """\r
+        print >>sys.stderr,"test: STATUS",status\r
+    \r
+\r
+    #\r
+    # Tests\r
+    #\r
+    def test_ranges(self):\r
+        # Run single test, VideoHTTPServer is singleton at the moment and\r
+        # doesn't like recreate.\r
+        self.range_test(115,214,self.sourcesize)\r
+        self.range_test(self.sourcesize-100,None,self.sourcesize)\r
+        self.range_test(None,100,self.sourcesize)\r
+        self.range_test(115,214,self.sourcesize,setset=True)\r
+\r
+    #\r
+    # Internal\r
+    #\r
+    def register_file_stream(self):\r
+        stream = open(self.sourcefn,"rb")\r
+\r
+        streaminfo = { 'mimetype': 'video/x-ms-wmv', 'stream': stream, 'length': self.sourcesize }\r
+        \r
+        self.serv.set_inputstream(streaminfo,"/stream")\r
+\r
+    def get_std_header(self):\r
+        msg =  "GET /stream HTTP/1.1\r\n"\r
+        msg += "Host: 127.0.0.1:"+str(self.port)+"\r\n"\r
+        return msg\r
+\r
+    def create_range_str(self,firstbyte,lastbyte):\r
+        head = "" \r
+        if firstbyte is not None:\r
+            head += str(firstbyte)\r
+        head += "-"\r
+        if lastbyte is not None:\r
+            head += str(lastbyte)\r
+            \r
+        return head\r
+\r
+    def range_test(self,firstbyte,lastbyte,sourcesize,setset=False):\r
+        print >>sys.stderr,"test: range_test:",firstbyte,lastbyte,sourcesize,"setset",setset\r
+        self.register_file_stream()\r
+        \r
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r
+        s.connect(('127.0.0.1', self.port))\r
+\r
+        head = self.get_std_header()\r
+        \r
+        head += "Range: bytes="\r
+        head += self.create_range_str(firstbyte,lastbyte)\r
+        if setset:\r
+            # Make into set of byte ranges, VideoHTTPServer should refuse.\r
+            head += ",0-99"\r
+        head += "\r\n"\r
+        \r
+        head += "Connection: close\r\n"\r
+        \r
+        head += "\r\n"\r
+        \r
+        if firstbyte is not None and lastbyte is None:\r
+            # 100-\r
+            expfirstbyte = firstbyte \r
+            explastbyte = self.sourcesize-1\r
+        elif firstbyte is None and lastbyte is not None:\r
+            # -100\r
+            expfirstbyte = self.sourcesize-lastbyte \r
+            explastbyte = self.sourcesize-1\r
+        else:\r
+            expfirstbyte = firstbyte\r
+            explastbyte = lastbyte\r
+\r
+        # the amount of bytes actually requested. (Content-length)\r
+        expsize = explastbyte - expfirstbyte + 1\r
+\r
+        print >>sys.stderr,"test: Expecting first",expfirstbyte,"last",explastbyte,"size",sourcesize\r
+        s.send(head)\r
+        \r
+        # Parse header\r
+        s.settimeout(10.0)\r
+        while True:\r
+            line = self.readline(s)\r
+            \r
+            print >>sys.stderr,"test: Got line",`line`\r
+            \r
+            if len(line)==0:\r
+                print >>sys.stderr,"test: server closed conn"\r
+                self.assert_(False)\r
+                return\r
+            \r
+            if line.startswith("HTTP"):\r
+                if not setset:\r
+                    # Python returns "HTTP/1.0 206 Partial Content\r\n" HTTP 1.0???\r
+                    self.assert_(line.startswith("HTTP/1."))\r
+                    self.assert_(line.find("206") != -1) # Partial content\r
+                else:\r
+                    self.assert_(line.startswith("HTTP/1."))\r
+                    self.assert_(line.find("416") != -1) # Requested Range Not Satisfiable\r
+                    return\r
+\r
+            elif line.startswith("Content-Range:"):\r
+                expline = "Content-Range: bytes "+self.create_range_str(expfirstbyte,explastbyte)+"/"+str(sourcesize)+"\r\n"\r
+                self.assertEqual(expline,line)\r
+                 \r
+            elif line.startswith("Content-Type:"):\r
+                self.assertEqual(line,"Content-Type: video/x-ms-wmv\r\n")\r
+                \r
+            elif line.startswith("Content-Length:"):\r
+                self.assertEqual(line,"Content-Length: "+str(expsize)+"\r\n")\r
+\r
+            elif line.endswith("\r\n") and len(line) == 2:\r
+                # End of header\r
+                break\r
+        \r
+        data = s.recv(expsize)\r
+        if len(data) == 0:\r
+            print >>sys.stderr,"test: server closed conn2"\r
+            self.assert_(False)\r
+            return\r
+        else:\r
+            f = open(self.sourcefn,"rb")\r
+            if firstbyte is not None:\r
+                f.seek(firstbyte)\r
+            else:\r
+                f.seek(lastbyte,os.SEEK_END)\r
+\r
+            expdata = f.read(expsize)\r
+            f.close()\r
+            self.assert_(data,expdata)\r
+\r
+            try:\r
+                # Readed body, reading more should EOF (we disabled persist conn)\r
+                data = s.recv(10240)\r
+                self.assert_(len(data) == 0)\r
+        \r
+            except socket.timeout:\r
+                print >> sys.stderr,"test: Timeout, video server didn't respond with requested bytes, possibly bug in Python impl of HTTP"\r
+                print_exc()\r
+\r
+    def readline(self,s):\r
+        line = ''\r
+        while True:\r
+            data = s.recv(1)\r
+            if len(data) == 0:\r
+                return line\r
+            else:\r
+                line = line+data\r
+            if data == '\n' and len(line) >= 2 and line[-2:] == '\r\n':\r
+                return line        \r
+        \r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    suite.addTest(unittest.makeSuite(TestVideoHTTPServer))\r
+    \r
+    return suite\r
+\r
+if __name__ == "__main__":\r
+    unittest.main()\r
+        \r
diff --git a/instrumentation/next-share/BaseLib/Test/test_vod.bat b/instrumentation/next-share/BaseLib/Test/test_vod.bat
new file mode 100644 (file)
index 0000000..ffd3862
--- /dev/null
@@ -0,0 +1,5 @@
+set PYTHONPATH=..\..\r
+\r
+python test_vod.py singtest_99\r
+python test_vod.py singtest_100\r
+python test_vod.py singtest_101\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_vod.py b/instrumentation/next-share/BaseLib/Test/test_vod.py
new file mode 100644 (file)
index 0000000..729de02
--- /dev/null
@@ -0,0 +1,227 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# TODO: we download from Tribler \r
+#\r
+\r
+import unittest\r
+import os\r
+import sys\r
+import time\r
+from sha import sha\r
+from traceback import print_exc\r
+from tempfile import mkstemp,mkdtemp\r
+from M2Crypto import Rand\r
+\r
+from BaseLib.Test.test_as_server import TestAsServer\r
+from BaseLib.Core.simpledefs import *\r
+from BaseLib.Core.TorrentDef import TorrentDef\r
+from BaseLib.Core.DownloadConfig import DownloadStartupConfig\r
+\r
+import Tribler\r
+BaseLib.Core.Video.PiecePickerStreaming.TEST_VOD_OVERRIDE = True\r
+\r
+from BaseLib.Core.Utilities.utilities import isValidIP\r
+\r
+\r
+DEBUG=True\r
+\r
+class TestVideoOnDemand(TestAsServer):\r
+    """ \r
+    Testing Merkle hashpiece messages for both:\r
+    * Merkle BEP style\r
+    * old Tribler <= 4.5.2 that did not use the Extention protocol (BEP 10).\r
+     \r
+    See BitTornado/BT1/Connecter.py\r
+    """\r
+\r
+    def setUp(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUp(self)\r
+        self.vodstarted = False\r
+\r
+    def setUpPreSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPreSession(self)\r
+        self.config.set_overlay(False)\r
+        self.config.set_megacache(False)\r
+\r
+    \r
+    def setUpPostSession(self):\r
+        """ override TestAsServer """\r
+        TestAsServer.setUpPostSession(self)\r
+        \r
+        \r
+    def tearDown(self):\r
+        print >>sys.stderr,"Test: Sleep before tear down"\r
+        time.sleep(10)\r
+\r
+        TestAsServer.tearDown(self)\r
+\r
+\r
+    def create_torrent(self):\r
+\r
+        [srchandle,self.sourcefn] = mkstemp()\r
+        self.content = Rand.rand_bytes(self.contentlen)\r
+        os.write(srchandle,self.content)\r
+        os.close(srchandle)\r
+        \r
+        self.tdef = TorrentDef()\r
+        self.tdef.add_content(self.sourcefn)\r
+        self.tdef.set_piece_length(self.piecelen)\r
+        self.tdef.set_tracker("http://127.0.0.1:12/announce")\r
+        self.tdef.finalize()\r
+\r
+        self.torrentfn = os.path.join(self.session.get_state_dir(),"gen.torrent")\r
+        self.tdef.save(self.torrentfn)\r
+        \r
+        dscfg = DownloadStartupConfig()\r
+        destdir = os.path.dirname(self.sourcefn)\r
+        dscfg.set_dest_dir(destdir)\r
+        dscfg.set_video_event_callback(self.sesscb_vod_event_callback)\r
+        \r
+        self.session.set_download_states_callback(self.states_callback)\r
+        self.session.start_download(self.tdef,dscfg)\r
+\r
+    def states_callback(self,dslist):\r
+        ds = dslist[0]\r
+        d = ds.get_download()\r
+    #    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)\r
+        print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \\r
+                (d.get_def().get_name(), \\r
+                dlstatus_strings[ds.get_status()], \\r
+                ds.get_progress() * 100, \\r
+                ds.get_error(), \\r
+                ds.get_current_speed(UPLOAD), \\r
+                ds.get_current_speed(DOWNLOAD))\r
+    \r
+        return (1.0, False)\r
+\r
+\r
+    def sesscb_vod_event_callback(self,d,event,params):\r
+        \r
+        if self.vodstarted:\r
+            return\r
+        self.vodstarted = True\r
+        \r
+        print >>sys.stderr,"Test: vod_event_callback",event,params\r
+        if event == VODEVENT_START:\r
+            stream = params['stream']\r
+\r
+            # Read last piece\r
+            lastpieceoff = ((self.contentlen-1) / self.piecelen) * self.piecelen\r
+            lastpiecesize = self.contentlen - lastpieceoff\r
+            print >>sys.stderr,"Test: stream: lastpieceoff",lastpieceoff,lastpiecesize\r
+            self.stream_read(stream,lastpieceoff,lastpiecesize,self.piecelen)\r
+\r
+            # Read second,3rd,4th byte, only\r
+            secoff = 1\r
+            secsize = 3\r
+            blocksize = 3\r
+            self.stream_read(stream,secoff,secsize,blocksize)\r
+            \r
+            # Read last byte\r
+            lastoff = self.contentlen-1\r
+            lastsize = 1\r
+            self.stream_read(stream,lastoff,lastsize,self.piecelen)\r
+\r
+            print >>sys.stderr,"Test: stream: Passed?"\r
+\r
+    def stream_read(self,stream,off,size,blocksize):\r
+            stream.seek(off)\r
+            data = stream.read(blocksize)\r
+            print >>sys.stderr,"Test: stream: Got data",len(data)\r
+            self.assertEquals(len(data),size)\r
+            self.assertEquals(data,self.content[off:off+size])\r
+            \r
+\r
+    def singtest_99(self):\r
+        self.contentlen = 99\r
+        self.piecelen = 10\r
+        self.create_torrent()\r
+        \r
+        print >>sys.stderr,"Test: Letting network thread create Download, sleeping"\r
+        time.sleep(5)\r
+        \r
+        dlist = self.session.get_downloads()\r
+        d = dlist[0]\r
+        vs = d.sd.videostatus\r
+        \r
+        \r
+        goodrange = ((0,0),(9,8))\r
+        self.assertEqual(vs.movie_range,goodrange)\r
+        self.assertEqual(vs.first_piecelen,10)\r
+        self.assertEqual(vs.last_piecelen,9)\r
+        self.assertEqual(vs.first_piece,0)\r
+        self.assertEqual(vs.last_piece,9)\r
+        self.assertEqual(vs.movie_numpieces,10)\r
+\r
+        print >>sys.stderr,"Test: status: Passed? ****************************************************************"\r
+\r
+            \r
+    def singtest_100(self):\r
+        self.contentlen = 100\r
+        self.piecelen = 10\r
+        self.create_torrent()\r
+        \r
+        print >>sys.stderr,"Test: Letting network thread create Download, sleeping"\r
+        time.sleep(5)\r
+        \r
+        dlist = self.session.get_downloads()\r
+        d = dlist[0]\r
+        vs = d.sd.videostatus\r
+        \r
+        \r
+        goodrange = ((0,0),(9,9))\r
+        self.assertEqual(vs.movie_range,goodrange)\r
+        self.assertEqual(vs.first_piecelen,10)\r
+        self.assertEqual(vs.last_piecelen,10)\r
+        self.assertEqual(vs.first_piece,0)\r
+        self.assertEqual(vs.last_piece,9)\r
+        self.assertEqual(vs.movie_numpieces,10)\r
+\r
+        print >>sys.stderr,"Test: status: Passed? ****************************************************************"\r
+\r
+\r
+    def singtest_101(self):\r
+        self.contentlen = 101\r
+        self.piecelen = 10\r
+        self.create_torrent()\r
+        \r
+        print >>sys.stderr,"Test: Letting network thread create Download, sleeping"\r
+        time.sleep(5)\r
+        \r
+        dlist = self.session.get_downloads()\r
+        d = dlist[0]\r
+        vs = d.sd.videostatus\r
+        \r
+        \r
+        goodrange = ((0,0),(10,0))\r
+        self.assertEqual(vs.movie_range,goodrange)\r
+        self.assertEqual(vs.first_piecelen,10)\r
+        self.assertEqual(vs.last_piecelen,1)\r
+        self.assertEqual(vs.first_piece,0)\r
+        self.assertEqual(vs.last_piece,10)\r
+        self.assertEqual(vs.movie_numpieces,11)\r
+        \r
+        print >>sys.stderr,"Test: status: Passed? ****************************************************************"\r
+\r
+\r
+\r
+def test_suite():\r
+    suite = unittest.TestSuite()\r
+    # We should run the tests in a separate Python interpreter to prevent \r
+    # problems with our singleton classes, e.g. PeerDB, etc.\r
+    if len(sys.argv) != 2:\r
+        print "Usage: python test_vod.py <method name>"\r
+    else:\r
+        suite.addTest(TestVideoOnDemand(sys.argv[1]))\r
+    \r
+    return suite\r
+\r
+def main():\r
+    unittest.main(defaultTest='test_suite',argv=[sys.argv[0]])\r
+\r
+if __name__ == "__main__":\r
+    main()\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Test/test_vod.sh b/instrumentation/next-share/BaseLib/Test/test_vod.sh
new file mode 100644 (file)
index 0000000..b736683
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/sh -x
+#
+# We should run the tests in a separate Python interpreter to prevent
+# problems with our singleton classes, e.g. SuperPeerDB, etc.
+#
+# WARNING: this shell script must use \n as end-of-line, Windows
+# \r\n gives problems running this on Linux
+
+PYTHONPATH=../..:"$PYTHONPATH"
+export PYTHONPATH
+
+python test_vod.py singtest_99
+python test_vod.py singtest_100
+python test_vod.py singtest_101
diff --git a/instrumentation/next-share/BaseLib/Test/usericon-ok.jpg b/instrumentation/next-share/BaseLib/Test/usericon-ok.jpg
new file mode 100644 (file)
index 0000000..1b9fc6d
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Test/usericon-ok.jpg differ
diff --git a/instrumentation/next-share/BaseLib/Tools/__init__.py b/instrumentation/next-share/BaseLib/Tools/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Tools/bitbucket-live-noauth.py b/instrumentation/next-share/BaseLib/Tools/bitbucket-live-noauth.py
new file mode 100644 (file)
index 0000000..32af918
--- /dev/null
@@ -0,0 +1,83 @@
+import sys
+import time
+import random
+import tempfile
+from traceback import print_exc
+from base64 import encodestring
+
+from BaseLib.Core.API import *
+
+DEBUG = True
+
+RATE=32768
+
+def vod_event_callback(d,event,params):
+    if event == VODEVENT_START:
+        stream = params["stream"]
+
+        grandtotal = 0L
+        st = time.time()
+        while True:
+            global RATE
+            total = 0
+            while total < int(RATE):
+                data = stream.read(int(RATE))
+                total += len(data)
+                
+            grandtotal += total
+            et = time.time()
+            diff = max(et - st,0.00001)
+            grandrate = float(grandtotal) / diff
+            print >>sys.stderr,"bitbucket: grandrate",grandrate,"~",RATE,"avail",stream.available()
+            time.sleep(1.0)
+
+def state_callback(ds):
+    try:
+        d = ds.get_download()
+        p = "%.0f %%" % (100.0*ds.get_progress())
+        dl = "dl %.0f" % (ds.get_current_speed(DOWNLOAD))
+        ul = "ul %.0f" % (ds.get_current_speed(UPLOAD))
+        print >>sys.stderr,dlstatus_strings[ds.get_status() ],p,dl,ul,"====="
+    except:
+        print_exc()
+
+    return (1.0,False)
+
+
+print "Loading",sys.argv
+statedir = tempfile.mkdtemp()
+port = random.randint(10000,20000)
+
+scfg = SessionStartupConfig()
+scfg.set_state_dir(statedir) 
+scfg.set_listen_port(port)
+scfg.set_megacache( False )
+scfg.set_overlay( False )
+
+
+
+s = Session( scfg )
+tdef = TorrentDef.load(sys.argv[1])
+RATE = tdef.get_bitrate()
+
+dscfg = DownloadStartupConfig()
+dscfg.set_video_event_callback( vod_event_callback )
+
+# A Closed swarm - load the POA. Will throw an exception if no POA is available
+if tdef.get_cs_keys():
+    print >>sys.stderr, "Is a closed swarm, reading POA"
+    try:
+        poa = ClosedSwarm.trivial_get_poa(s.get_default_state_dir(),
+                                          s.get_permid(),
+                                          tdef.infohash)
+    except Exception,e:
+        print >>sys.stderr, "Failed to load POA for swarm",encodestring(tdef.infohash).replace("\n",""),"from",s.get_default_state_dir(),"(my permid is %s)"%encodestring(s.get_permid()).replace("\n",""),"Error was:",e
+        raise SystemExit("Failed to load POA, aborting")
+
+d = s.start_download( tdef, dscfg )
+
+d.set_state_callback(state_callback,getpeerlist=False)
+
+while True:
+  time.sleep(60)
+
diff --git a/instrumentation/next-share/BaseLib/Tools/bitbucket-live.py b/instrumentation/next-share/BaseLib/Tools/bitbucket-live.py
new file mode 100644 (file)
index 0000000..5bec62a
--- /dev/null
@@ -0,0 +1,66 @@
+import sys
+import time
+from traceback import print_exc
+
+from BaseLib.Core.API import *
+
+DEBUG = False
+
+
+def vod_event_callback(d,event,params):
+    if event == VODEVENT_START:
+        stream = params["stream"]
+
+        epoch_server = None
+        epoch_local = time.time()
+        blocksize = d.get_def().get_piece_length()
+        while True:
+            stream.read(blocksize)
+            last_ts = stream.get_generation_time()
+
+            if epoch_server is None:
+                if DEBUG:
+                    print >>sys.stderr, "bitbucket: received first data."
+                epoch_server = last_ts
+
+            age_server = last_ts - epoch_server
+            age_local  = time.time() - epoch_local
+
+            # if server is younger, wait up to sync
+            waittime = max( 0, age_server - age_local )
+            if DEBUG:
+                print >>sys.stderr, "bitbucket: sleeping %.2f seconds. we're at time %.2f, piece has age %.2f" % (waittime,age_local,age_server)
+            time.sleep( waittime )
+
+
+def state_callback(ds):
+    try:
+        d = ds.get_download()
+        p = "%.0f %%" % (100.0*ds.get_progress())
+        dl = "dl %.0f" % (ds.get_current_speed(DOWNLOAD))
+        ul = "ul %.0f" % (ds.get_current_speed(UPLOAD))
+        print >>sys.stderr,dlstatus_strings[ds.get_status() ],p,dl,ul,"====="
+    except:
+        print_exc()
+
+    return (1.0,False)
+
+
+
+scfg = SessionStartupConfig()
+scfg.set_megacache( False )
+scfg.set_overlay( False )
+
+s = Session( scfg )
+tdef = TorrentDef.load(sys.argv[1])
+dscfg = DownloadStartupConfig()
+dscfg.set_video_event_callback( vod_event_callback )
+dscfg.set_max_uploads(16)
+
+d = s.start_download( tdef, dscfg )
+
+d.set_state_callback(state_callback,getpeerlist=False)
+
+while True:
+  time.sleep(60)
+
diff --git a/instrumentation/next-share/BaseLib/Tools/btshowmetainfo.py b/instrumentation/next-share/BaseLib/Tools/btshowmetainfo.py
new file mode 100644 (file)
index 0000000..bce2a17
--- /dev/null
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+# Written by Henry 'Pi' James and Loring Holden
+# modified for multitracker display by John Hoffman
+# see LICENSE.txt for license information
+
+from sys import *
+from os.path import *
+import binascii
+
+from BaseLib.Core.API import TorrentDef
+from BaseLib.Core.Overlay.permid import verify_torrent_signature
+
+if len(argv) == 1:
+    print '%s file1.torrent file2.torrent file3.torrent ...' % argv[0]
+    print
+    exit(2) # common exit code for syntax error
+
+for metainfo_name in argv[1:]:
+    if metainfo_name.endswith(".url"):
+        f = open(metainfo_name,"rb")
+        url = f.read()
+        f.close()
+        tdef = TorrentDef.load_from_url(url)
+    else:
+        tdef = TorrentDef.load(metainfo_name)
+    metainfo = tdef.get_metainfo()
+    infohash = tdef.get_infohash()
+    
+    print "metainfo:",metainfo.keys()
+    #print "metainfo creation date",metainfo['creation date']
+    if 'azureus_properties' in metainfo:
+        azprop = metainfo['azureus_properties']
+        print "azprop:",azprop.keys()
+        if 'Content' in azprop:
+                content = azprop['Content']
+                print "content:",content.keys()
+                for key in content.keys():
+                        if key.lower() != 'thumbnail':
+                                print key,"=",content[key]
+        if 'cdn_properties' in azprop:
+            cdnprops = azprop['cdn_properties']
+            print "cdn_properties:",cdnprops.keys()
+            for key in cdnprops:
+                print "cdn_properties:",key,"=",cdnprops[key]
+    #print `metainfo`
+    info = metainfo['info']
+
+    print 'metainfo file.: %s' % basename(metainfo_name)
+    print 'info hash.....: %s' % binascii.hexlify(infohash)
+    print 'info hash.....: %s' % `infohash`
+    piece_length = info['piece length']
+    if info.has_key('length'):
+        # let's assume we just have a file
+        print 'file name.....: %s' % info['name']
+        file_length = info['length']
+        name ='file size.....:'
+    else:
+        # let's assume we have a directory structure
+        print 'directory name: %s' % info['name']
+        print 'files.........: '
+        file_length = 0;
+        for file in info['files']:
+            path = ''
+            for item in file['path']:
+                if (path != ''):
+                   path = path + "/"
+                path = path + item
+            print '   %s (%d)' % (path, file['length'])
+            file_length += file['length']
+            name ='archive size..:'
+    piece_number, last_piece_length = divmod(file_length, piece_length)
+    print '%s %i (%i * %i + %i)' \
+          % (name,file_length, piece_number, piece_length, last_piece_length)
+    if info.has_key('root hash'):
+        print 'root hash.....: %s' % `info['root hash']`
+    if info.has_key('live'):
+        print 'torrent type..: live',`info['live']`
+    
+    print 'announce url..: %s' % metainfo['announce']
+    if metainfo.has_key('announce-list'):
+        list = []
+        for tier in metainfo['announce-list']:
+            for tracker in tier:
+                list+=[tracker,',']
+            del list[-1]
+            list+=['|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring+=i
+        print 'announce-list.: %s' % liststring
+    if metainfo.has_key('httpseeds'):
+        list = []
+        for seed in metainfo['httpseeds']:
+            list += [seed,'|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring+=i
+        print 'http seeds....: %s' % liststring
+    if metainfo.has_key('url-list'):
+        list = []
+        for seed in metainfo['url-list']:
+            list += [seed,'|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring+=i
+        print 'url-list......: %s' % liststring
+
+    # Torrent signature
+    if metainfo.has_key('signature'):
+        print 'signature.....: %s' % `metainfo['signature']`
+    if metainfo.has_key('signer'):
+        print 'signer........: %s' % `metainfo['signer']`
+    if metainfo.has_key('signature') and metainfo.has_key('signer'):
+        if verify_torrent_signature(metainfo):
+            res = 'OK'
+        else:
+            res = 'Failed'
+        print 'signaturecheck: %s' % res
+    if metainfo.has_key('comment'):
+        print 'comment.......: %s' % metainfo['comment']
+
+
diff --git a/instrumentation/next-share/BaseLib/Tools/cmdlinedl.py b/instrumentation/next-share/BaseLib/Tools/cmdlinedl.py
new file mode 100644 (file)
index 0000000..f78eb27
--- /dev/null
@@ -0,0 +1,145 @@
+# Written by Arno Bakker, George Milescu 
+# see LICENSE.txt for license information
+#
+# Razvan Deaconescu, 2008:
+#       * corrected problem when running in background
+#       * added usage and print_version functions
+#       * uses getopt for command line argument parsing
+
+import sys
+import shutil
+import time
+import tempfile
+import random
+import os
+import getopt
+from traceback import print_exc
+
+from BaseLib.Core.API import *
+from BaseLib.Core.BitTornado.__init__ import version, report_email
+
+# Print usage message
+def usage():
+    print "Usage: python cmdlinedl.py [options] torrentfile_or_url"
+    print "Options:"
+    print "\t--port <port>"
+    print "\t-p <port>\t\tuse <port> to listen for connections"
+    print "\t\t\t\t(default is random value)"
+    print "\t--output <output-dir>"
+    print "\t-o <output-dir>\t\tuse <output-dir> for storing downloaded data"
+    print "\t\t\t\t(default is current directory)"
+    print "\t--version"
+    print "\t-v\t\t\tprint version and exit"
+    print "\t--help"
+    print "\t-h\t\t\tprint this help screen"
+    print
+    print "Report bugs to <" + report_email + ">"
+
+# Print version information
+def print_version():
+    print version, "<" + report_email + ">"
+
+# Print torrent statistics
+def state_callback(ds):
+    d = ds.get_download()
+#    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+    print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \
+            (d.get_def().get_name(), \
+            dlstatus_strings[ds.get_status()], \
+            ds.get_progress() * 100, \
+            ds.get_error(), \
+            ds.get_current_speed(UPLOAD), \
+            ds.get_current_speed(DOWNLOAD))
+
+    return (1.0, False)
+
+def main():
+    try:
+        # opts = a list of (option, value) pairs
+        # args = the list of program arguments left after the option list was stripped
+        opts, args = getopt.getopt(sys.argv[1:], "hvo:p:", ["help", "version", "output-dir", "port"])
+    except getopt.GetoptError, err:
+        print str(err)
+        usage()
+        sys.exit(2)
+
+    # init to default values
+    output_dir = os.getcwd()
+    port = random.randint(10000, 65535)
+
+    for o, a in opts:
+        if o in ("-h", "--help"):
+            usage()
+            sys.exit(0)
+        elif o in ("-o", "--output-dir"):
+            output_dir = a
+        elif o in ("-p", "--port"):
+            port = int(a)
+        elif o in ("-v", "--version"):
+            print_version()
+            sys.exit(0)
+        else:
+            assert False, "unhandled option"
+
+    if len(args) == 0:
+        usage()
+        sys.exit(2)
+
+    if len(args) > 1:
+        print "Too many arguments"
+        usage()
+        sys.exit(2)
+    torrentfile_or_url = args[0]
+
+    print "Press Ctrl-C to stop the download"
+
+    # setup session
+    sscfg = SessionStartupConfig()
+    statedir = tempfile.mkdtemp()
+    sscfg.set_state_dir(statedir)
+    sscfg.set_listen_port(port)
+    sscfg.set_megacache(False)
+    sscfg.set_overlay(False)
+    sscfg.set_dialback(True)
+    sscfg.set_internal_tracker(False)
+    
+    s = Session(sscfg)
+
+    # setup and start download
+    dscfg = DownloadStartupConfig()
+    dscfg.set_dest_dir(output_dir);
+    #dscfg.set_max_speed( UPLOAD, 10 )
+
+    if torrentfile_or_url.startswith("http") or torrentfile_or_url.startswith(P2PURL_SCHEME):
+        tdef = TorrentDef.load_from_url(torrentfile_or_url)
+    else: 
+        tdef = TorrentDef.load(torrentfile_or_url)
+    if tdef.get_live():
+        raise ValueError("cmdlinedl does not support live torrents")
+        
+    d = s.start_download(tdef, dscfg)
+    d.set_state_callback(state_callback, getpeerlist=False)
+   
+    #
+    # loop while waiting for CTRL-C (or any other signal/interrupt)
+    #
+    # - cannot use sys.stdin.read() - it means busy waiting when running
+    #   the process in background
+    # - cannot use condition variable - that don't listen to KeyboardInterrupt
+    #
+    # time.sleep(sys.maxint) has "issues" on 64bit architectures; divide it
+    # by some value (2048) to solve problem
+    #
+    try:
+        while True:
+            time.sleep(sys.maxint/2048)
+    except:
+        print_exc()
+
+    s.shutdown()
+    time.sleep(3)
+    shutil.rmtree(statedir)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Tools/createlivestream-njaal.py b/instrumentation/next-share/BaseLib/Tools/createlivestream-njaal.py
new file mode 100644 (file)
index 0000000..bc0e3a6
--- /dev/null
@@ -0,0 +1,248 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+#
+
+import sys
+import os
+import shutil
+import time
+import tempfile
+import random
+import urllib2
+from traceback import print_exc
+from threading import Condition
+
+from BaseLib.Core.API import *
+import BaseLib.Core.BitTornado.parseargs as parseargs
+
+argsdef = [('name', '', 'name of the stream'),
+           ('source', '-', 'source to stream (url, file or "-" to indicate stdin)'),
+           ('fileloop', False, 'if source is file, loop over it endlessly'),
+           ('destdir', '.','dir to save torrent (and stream)'),
+           ('bitrate', (512*1024)/8, 'bitrate of the streams in bytes'),
+           ('piecesize', 32768, 'transport piece size'),
+           ('duration', '1:00:00', 'duration of the stream in hh:mm:ss format'),
+           ('nuploads', 7, 'the max number of peers to serve directly'),
+           ('port', 7764, 'the TCP+UDP listen port'),
+           ('thumb', '', 'filename of image in JPEG format, preferably 171x96'),
+           ('cs_keys', '', 'semi-colon separated list of Closed Swarm keys for this torrent'),
+           ('generate_cs', 'no', "Create a closed swarm, generating the keys ('yes' to generate)"),
+
+           ('auth', 'RSA', 'Live-souce authentication method to use (ECDSA or RSA)')
+           ]
+
+
+def state_callback(ds):
+    d = ds.get_download()
+    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+
+    return (1.0,False)
+
+def vod_ready_callback(d,mimetype,stream,filename):
+    """ Called by the Session when the content of the Download is ready
+     
+    Called by Session thread """
+    print >>sys.stderr,"main: VOD ready callback called ###########################################################",mimetype
+
+def generate_key(source, config):
+    """
+    Generate and a closed swarm key matching the config.  Source is the 
+    source of the torrent
+    """
+    
+    
+    a, b = os.path.split(source)
+    if b == '':
+        target = a
+    else:
+        target = os.path.join(a, b)
+    target += ".torrent"
+    print "Generating key to '%s.tkey' and '%s.pub'"%(target, target)
+    
+    keypair, pubkey = ClosedSwarm.generate_cs_keypair(target + ".tkey",
+                                                      target + ".pub")
+    
+    return keypair,pubkey
+
+
+def get_usage(defs):
+    return parseargs.formatDefinitions(defs,80)
+    
+    
+class FileLoopStream:
+    
+    def __init__(self,stream):
+        self.stream = stream
+        
+    def read(self,nbytes=None):
+        data = self.stream.read(nbytes)
+        if len(data) == 0: # EOF
+            self.stream.seek(0)
+            data = self.stream.read(nbytes)
+        return data
+    
+    def close(self):
+        self.stream.close()
+
+
+if __name__ == "__main__":
+
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})
+    print >>sys.stderr,"config is",config
+    print "fileargs is",fileargs
+    
+    if config['name'] == '':
+        print "Usage:  ",get_usage(argsdef)
+        sys.exit(0)
+        
+    
+    print "Press Ctrl-C to stop the download"
+
+    try:
+        os.remove(os.path.join(config['destdir'],config['name']))
+    except:
+        print_exc()
+    
+    sscfg = SessionStartupConfig()
+    statedir = tempfile.mkdtemp()
+    sscfg.set_state_dir(statedir)
+    sscfg.set_listen_port(config['port'])
+    sscfg.set_megacache(False)
+    sscfg.set_overlay(False)
+    sscfg.set_dialback(True)
+    
+    s = Session(sscfg)
+
+
+    # LIVESOURCEAUTH
+    authfilename = os.path.join(config['destdir'],config['name']+'.sauth')
+    if config['auth'] == 'RSA':
+        try:
+            authcfg = RSALiveSourceAuthConfig.load(authfilename)
+        except:
+            print_exc()
+            authcfg = RSALiveSourceAuthConfig()
+            authcfg.save(authfilename)
+    else:
+        try:
+            authcfg = ECDSALiveSourceAuthConfig.load(authfilename)
+        except:
+            print_exc()
+            authcfg = ECDSALiveSourceAuthConfig()
+            authcfg.save(authfilename)
+
+    print >>sys.stderr,"main: Source auth pubkey",`str(authcfg.get_pubkey())`
+
+    tdef = TorrentDef()
+    # hint: to derive bitrate and duration from a file, use
+    #    ffmpeg -i file.mpeg /dev/null
+    tdef.create_live(config['name'],config['bitrate'],config['duration'],authcfg)
+    tdef.set_tracker(s.get_internal_tracker_url())
+    tdef.set_piece_length(config['piecesize']) #TODO: auto based on bitrate?
+    if len(config['thumb']) > 0:
+        tdef.set_thumbnail(config['thumb'])
+
+    if config['generate_cs'].lower() == "yes":
+        if config['cs_keys']:
+            print "Refusing to generate keys when key is given"
+            raise SystemExit(1)
+
+        cs_keypair, config['cs_keys'] = generate_key(config['name'], config)
+
+        # TODO: Read POA if keys are already given (but generate_cs is "no")
+        # Will also create POA for this node - which will seed it!
+    if len(config['cs_keys']) > 0:
+        print >>sys.stderr,"Setting torrent keys to:",config['cs_keys'].split(";")
+        tdef.set_cs_keys(config['cs_keys'].split(";"))
+    else:
+        print >>sys.stderr,"No keys"
+    #tdef2 = TorrentDef.load(torrentfilename)
+    #print >>sys.stderr,"main: Source auth pubkey2",`tdef2.metainfo['info']['live']`
+
+    tdef.finalize()
+    
+    torrentbasename = config['name']+'.tstream'
+    torrentfilename = os.path.join(config['destdir'],torrentbasename)
+    tdef.save(torrentfilename)
+
+    poa = None
+    if tdef.get_cs_keys():
+        # Try to read POA, or if none was found, generate it
+        try:
+            poa = ClosedSwarm.trivial_get_poa(Session.get_default_state_dir(),
+                                              authcfg.get_pubkey(),
+                                              tdef.infohash)
+        except:
+            # Generate and save
+            poa = ClosedSwarm.create_poa(tdef.infohash,
+                                         cs_keypair,
+                                         authcfg.get_pubkey())
+            
+            try:
+                ClosedSwarm.trivial_save_poa(Session.get_default_state_dir(),
+                                             authcfg.get_pubkey(),
+                                             tdef.infohash,
+                                             poa)
+                print >>sys.stderr,"POA saved"
+            except Exception,e:
+                print >>sys.stderr,"Could not save POA"
+
+
+
+    dscfg = DownloadStartupConfig()
+    dscfg.set_dest_dir(config['destdir'])
+
+    if poa:
+        dscfg.set_poa(poa)
+        
+    if config['source'] == '-':
+        # Arno: doesn't appear to work on Linux
+        source = sys.stdin
+    elif config['source'].startswith('http:'):
+        # HTTP source
+        source = urllib2.urlopen(config['source'])
+        """
+        # Windows Media Encoder gives Bad Request if we don't include User-Agent
+        url = config['source']
+        user_agent = 'NSPlayer/4.1.0.3856'
+        headers = { 'User-Agent' : user_agent }
+
+        req = urllib2.Request(url, None, headers)
+        source = urllib2.urlopen(req)
+        """
+    elif config['source'].startswith('pipe:'):
+        # Program as source via pipe
+        cmd = config['source'][len('pipe:'):]
+        (child_out,source) = os.popen2( cmd, 'b' )
+    else:
+        # File source
+        stream = open(config['source'],"rb")
+        if config['fileloop']:
+            source = FileLoopStream(stream)
+        else:
+            source = stream
+        dscfg.set_video_ratelimit(tdef.get_bitrate())
+        
+    dscfg.set_video_source(source,authcfg)
+
+    dscfg.set_max_uploads(config['nuploads'])
+
+    d = s.start_download(tdef,dscfg)
+    d.set_state_callback(state_callback,getpeerlist=False)
+   
+    # condition variable would be prettier, but that don't listen to 
+    # KeyboardInterrupt
+    #time.sleep(sys.maxint/2048)
+    #try:
+    #    while True:
+    #        x = sys.stdin.read()
+    #except:
+    #    print_exc()
+    cond = Condition()
+    cond.acquire()
+    cond.wait()
+    
+    s.shutdown()
+    time.sleep(3)    
+    shutil.rmtree(statedir)
+    
diff --git a/instrumentation/next-share/BaseLib/Tools/createlivestream-noauth.py b/instrumentation/next-share/BaseLib/Tools/createlivestream-noauth.py
new file mode 100644 (file)
index 0000000..e843bc0
--- /dev/null
@@ -0,0 +1,155 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+#
+
+import sys
+import os
+import shutil
+import time
+import tempfile
+import random
+import urllib2
+from traceback import print_exc
+from threading import Condition
+
+from BaseLib.Core.API import *
+import BaseLib.Core.BitTornado.parseargs as parseargs
+
+argsdef = [('name', '', 'name of the stream'),
+           ('source', '-', 'source to stream (url, file or "-" to indicate stdin)'),
+           ('destdir', '.','dir to save torrent (and stream)'),
+           ('bitrate', (512*1024)/8, 'bitrate of the streams in bytes'),
+           ('piecesize', 32768, 'transport piece size'),
+           ('duration', '1:00:00', 'duration of the stream in hh:mm:ss format'),
+           ('nuploads', 7, 'the max number of peers to serve directly'),
+           ('port', 7764, 'the TCP+UDP listen port'),
+           ('thumb', '', 'filename of image in JPEG format, preferably 171x96')]
+
+
+def state_callback(ds):
+    d = ds.get_download()
+    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+
+    return (1.0,False)
+
+def vod_ready_callback(d,mimetype,stream,filename):
+    """ Called by the Session when the content of the Download is ready
+     
+    Called by Session thread """
+    print >>sys.stderr,"main: VOD ready callback called ###########################################################",mimetype
+
+def get_usage(defs):
+    return parseargs.formatDefinitions(defs,80)
+    
+
+if __name__ == "__main__":
+
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})
+    print >>sys.stderr,"config is",config
+    print "fileargs is",fileargs
+    
+    if config['name'] == '':
+        print "Usage:  ",get_usage(argsdef)
+        sys.exit(0)
+        
+    
+    print "Press Ctrl-C to stop the download"
+
+    try:
+        os.remove(os.path.join(config['destdir'],config['name']))
+    except:
+        print_exc()
+    
+    sscfg = SessionStartupConfig()
+    statedir = tempfile.mkdtemp()
+    sscfg.set_state_dir(statedir)
+    sscfg.set_listen_port(config['port'])
+    sscfg.set_megacache(False)
+    sscfg.set_overlay(False)
+    sscfg.set_dialback(True)
+    
+    s = Session(sscfg)
+
+
+    # no LIVESOURCEAUTH for Pioneer, saves CPU
+
+
+    tdef = TorrentDef()
+    # hint: to derive bitrate and duration from a file, use
+    #    ffmpeg -i file.mpeg /dev/null
+    tdef.create_live(config['name'],config['bitrate'],config['duration'])
+    tdef.set_tracker(s.get_internal_tracker_url())
+    tdef.set_piece_length(config['piecesize']) #TODO: auto based on bitrate?
+    if len(config['thumb']) > 0:
+        tdef.set_thumbnail(config['thumb'])
+    tdef.finalize()
+    
+    torrentbasename = config['name']+'.tstream'
+    torrentfilename = os.path.join(config['destdir'],torrentbasename)
+    tdef.save(torrentfilename)
+
+    #tdef2 = TorrentDef.load(torrentfilename)
+    #print >>sys.stderr,"main: Source auth pubkey2",`tdef2.metainfo['info']['live']`
+
+    dscfg = DownloadStartupConfig()
+    dscfg.set_dest_dir(config['destdir'])
+    dscfg.set_live_aux_seeders( [
+            # servers: 
+            ("130.161.211.232",8764), # ss1
+            ("130.161.211.233",8764), # ss2
+            ("130.161.211.234",8764), # ss3
+            ("193.29.139.222",8764), # peer2peer.colo.mediadesign.nl, old ip
+            ("83.96.143.114",8764), # peer2peer.colo.mediadesign.nl, new ip
+            ("130.37.198.236",8764), # jip
+
+            # machines to verify the video stream:
+            ("130.161.159.89",8620), # jd's laptop 
+            ("130.161.159.210",8620), # arno's st interface
+            ] )
+    if config['source'] == '-':
+        # Arno: doesn't appear to work on Linux
+        source = sys.stdin
+    elif config['source'].startswith('http:'):
+        # HTTP source
+        source = urllib2.urlopen(config['source'])
+        """
+        # Windows Media Encoder gives Bad Request if we don't include User-Agent
+        url = config['source']
+        user_agent = 'NSPlayer/4.1.0.3856'
+        headers = { 'User-Agent' : user_agent }
+
+        req = urllib2.Request(url, None, headers)
+        source = urllib2.urlopen(req)
+        """
+    elif config['source'].startswith('pipe:'):
+        # Program as source via pipe
+        cmd = config['source'][len('pipe:'):]
+        (child_out,source) = os.popen2( cmd, 'b' )
+    else:
+        # File source
+        source = open(config['source'],"rb")
+        dscfg.set_video_ratelimit(tdef.get_bitrate())
+        
+    dscfg.set_video_source(source)
+
+    dscfg.set_max_uploads(config['nuploads'])
+
+    d = s.start_download(tdef,dscfg)
+    d.set_state_callback(state_callback,getpeerlist=False)
+   
+    # condition variable would be prettier, but that don't listen to 
+    # KeyboardInterrupt
+    #time.sleep(sys.maxint/2048)
+    #try:
+    #    while True:
+    #        x = sys.stdin.read()
+    #except:
+    #    print_exc()
+    cond = Condition()
+    cond.acquire()
+    cond.wait()
+    
+    s.shutdown()
+    time.sleep(3)    
+    shutil.rmtree(statedir)
+    
diff --git a/instrumentation/next-share/BaseLib/Tools/createlivestream.py b/instrumentation/next-share/BaseLib/Tools/createlivestream.py
new file mode 100644 (file)
index 0000000..f017eda
--- /dev/null
@@ -0,0 +1,351 @@
+# Written by Arno Bakker 
+# see LICENSE.txt for license information
+#
+
+import sys
+import os
+import shutil
+import time
+import tempfile
+import urllib2
+from traceback import print_exc
+from threading import Condition
+from base64 import encodestring
+
+from BaseLib.Core.API import *
+from BaseLib.Video.Ogg import ogg_grab_page,is_ogg
+import BaseLib.Core.BitTornado.parseargs as parseargs
+from BaseLib.Core.Utilities.timeouturlopen import urlOpenTimeout
+
+
+argsdef = [('name', '', 'name of the stream'),
+           ('source', '-', 'source to stream (url, file or "-" to indicate stdin)'),
+           ('fileloop', False, 'if source is file, loop over it endlessly'),
+           ('destdir', '.','dir to save torrent (and stream)'),
+           ('bitrate', (512*1024)/8, 'bitrate of the streams in bytes'),
+           ('piecesize', 32768, 'transport piece size'),
+           ('duration', '1:00:00', 'duration of the stream in hh:mm:ss format'),
+           ('nuploads', 7, 'the max number of peers to serve directly'),
+           ('port', 7764, 'the TCP+UDP listen port'),
+           ('thumb', '', 'filename of image in JPEG format, preferably 171x96'),
+           ('auth', 'RSA', 'Live-souce authentication method to use (ECDSA or RSA)'),
+           ('url', False, 'Create URL instead of torrent (cannot be used with thumb)'),
+           ('cs_keys', '', 
+            "Closed swarm torrent keys (semicolon separated if more than one)"),
+           ('generate_cs', 'no',
+            "Create a closed swarm, generating the keys ('yes' to generate)"),
+           ('cs_publish_dir', './', "Publish public CS key in what dir?")]
+
+
+def state_callback(ds):
+    d = ds.get_download()
+    #print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+    # Arno, 2010-03-04: more compact
+    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],"%3.1f %%" % (ds.get_progress()),ds.get_error(),"up %.1f down %.1f" % (ds.get_current_speed(UPLOAD),ds.get_current_speed(DOWNLOAD))
+
+    return (1.0,False)
+
+def vod_ready_callback(d,mimetype,stream,filename):
+    """ Called by the Session when the content of the Download is ready
+     
+    Called by Session thread """
+    print >>sys.stderr,"main: VOD ready callback called ###########################################################",mimetype
+
+def get_usage(defs):
+    return parseargs.formatDefinitions(defs,80)
+    
+    
+    
+class InfiniteHTTPStream:
+    def __init__(self,url):
+        self.url = url
+        self.reopen()
+        
+    def read(self,nbytes=None):
+        ret = self.stream.read(nbytes)
+        if len(ret) == 0:
+            # EOF
+            print >>sys.stderr,"createlivestream: Reconnecting on EOF input stream"
+            self.reopen()
+            return self.read(nbytes=nbytes)
+        return ret
+        
+    def close(self):
+        self.stream.close()
+                
+    def reopen(self):
+        while True:
+            try:
+                self.stream = urlOpenTimeout(self.url) # 30 sec timeout
+                break
+            except:
+                print_exc()
+                time.sleep(5.0) # No exp. backoff, get back ASAP
+        
+    
+class HaltOnEOFStream:
+    def __init__(self,stream):
+        self.stream = stream
+    
+    def read(self,nbytes=None):
+        ret = self.stream.read(nbytes)
+        if len(ret) == 0:
+            # EOF
+            print >>sys.stderr,"createlivestream: Exiting on EOF input stream"
+            os._exit(1)
+        return ret
+        
+    def close(self):
+        self.stream.close()
+    
+    
+class FileLoopStream:
+    
+    def __init__(self,stream):
+        self.stream = stream
+        
+    def read(self,nbytes=None):
+        data = self.stream.read(nbytes)
+        if len(data) == 0: # EOF
+            self.stream.seek(0)
+            data = self.stream.read(nbytes)
+        return data
+    
+    def close(self):
+        self.stream.close()
+
+
+def generate_key(source, config):
+    """
+    Generate and a closed swarm key matching the config.  Source is the 
+    source of the torrent
+    """
+    a, b = os.path.split(source)
+    if b == '':
+        target = a
+    else:
+        target = os.path.join(a, b)
+    target += ".torrent"
+    print "Generating key to '%s.tkey' and '%s.pub'"%(target, target)
+    
+    keypair, pubkey = ClosedSwarm.generate_cs_keypair(target + ".tkey",
+                                                      target + ".pub")
+    
+    return keypair,pubkey
+
+def publish_key(torrent, keypair, target_directory = "./"):
+
+    t = TorrentDef.load(torrent)
+    
+    filename = encodestring(t.infohash).replace("\n","")
+    filename = filename.replace("/","")
+    filename = filename.replace("\\","")
+    key_file = os.path.join(target_directory, filename + ".tkey")
+    ClosedSwarm.save_cs_keypair(keypair, key_file)
+    print "Key saved to:", key_file
+
+if __name__ == "__main__":
+
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})
+    
+    print >>sys.stderr,"config is",config
+    print >>sys.stderr,"fileargs is",fileargs
+    
+    if config['name'] == '':
+        print "Usage:  ",get_usage(argsdef)
+        sys.exit(0)
+        
+    
+    print "Press Ctrl-C to stop the download"
+
+    try:
+        os.remove(os.path.join(config['destdir'],config['name']))
+    except:
+        print_exc()
+    
+    sscfg = SessionStartupConfig()
+    statedir = tempfile.mkdtemp()
+    sscfg.set_state_dir(statedir)
+    sscfg.set_listen_port(config['port'])
+    sscfg.set_megacache(False)
+    sscfg.set_overlay(False)
+    sscfg.set_dialback(True)
+    
+    s = Session(sscfg)
+
+
+    # LIVESOURCEAUTH
+    authfilename = os.path.join(config['destdir'],config['name']+'.sauth')
+    if config['auth'] == 'RSA':
+        try:
+            authcfg = RSALiveSourceAuthConfig.load(authfilename)
+        except:
+            print_exc()
+            authcfg = RSALiveSourceAuthConfig()
+            authcfg.save(authfilename)
+    else:
+        try:
+            authcfg = ECDSALiveSourceAuthConfig.load(authfilename)
+        except:
+            print_exc()
+            authcfg = ECDSALiveSourceAuthConfig()
+            authcfg.save(authfilename)
+
+    print >>sys.stderr,"main: Source auth pubkey",`authcfg.get_pubkey()`
+
+
+    # Support for Ogg as transport stream
+    ogg_header_pages = []
+    if not config['url'] and is_ogg(config['source']):
+        if config['source'].startswith('http:'): 
+            # HTTP source
+            source = urllib2.urlopen(config['source'])
+        else:
+            # File source
+            source = open(config['source'],"rb")
+        while True:
+            (isheader,header,body) = ogg_grab_page(source)
+            if not isheader:
+                break
+            else:
+                ogg_header_pages.append((header,body))
+        source.close()
+
+
+
+    tdef = TorrentDef()
+    # hint: to derive bitrate and duration from a file, use
+    #    ffmpeg -i file.mpeg /dev/null
+    tdef.create_live(config['name'],config['bitrate'],config['duration'],authcfg)
+    tdef.set_tracker(s.get_internal_tracker_url())
+    tdef.set_piece_length(config['piecesize']) #TODO: auto based on bitrate?
+
+    # CLOSEDSWARM
+    cs_keypair = None
+    if config['generate_cs'].lower() == "yes":
+        if config['cs_keys']:
+            print "Refusing to generate keys when key is given"
+            raise SystemExit(1)
+
+        cs_keypair, config['cs_keys'] = generate_key(config['name'], config)
+    if len(config['cs_keys']) > 0:
+        print >>sys.stderr,"Setting torrent keys to:",config['cs_keys'].split(";")
+        tdef.set_cs_keys(config['cs_keys'].split(";"))
+    else:
+        print >>sys.stderr,"No keys"
+
+    if config['url']:
+        tdef.set_url_compat(1)
+    else:
+        if len(config['thumb']) > 0:
+            tdef.set_thumbnail(config['thumb'])
+    # Ogg hack
+    if len(ogg_header_pages) > 0:
+        headers = ''
+        for header,body in ogg_header_pages:
+            headers += header+body
+        tdef.set_live_ogg_headers(headers)
+    
+    tdef.finalize()
+    
+    
+    if config['url']:
+        urlbasename = config['name']+'.url'
+        urlfilename = os.path.join(config['destdir'],urlbasename)
+        f = open(urlfilename,"wb")
+        f.write(tdef.get_url())
+        f.close()
+    else:
+        torrentbasename = config['name']+'.tstream'
+        torrentfilename = os.path.join(config['destdir'],torrentbasename)
+        tdef.save(torrentfilename)
+
+    #tdef2 = TorrentDef.load(torrentfilename)
+    #print >>sys.stderr,"main: Source auth pubkey2",`tdef2.metainfo['info']['live']`
+    poa = None
+    if tdef.get_cs_keys() and authcfg:
+        # POA saving/loading needs the public key of the swarm
+        # Try to read POA, or if none was found, generate it
+        try:
+            poa = ClosedSwarm.trivial_get_poa(Session.get_default_state_dir(),
+                                              authcfg.get_pubkey(),
+                                              tdef.infohash)
+        except:
+            # Generate and save
+            poa = ClosedSwarm.create_poa(tdef.infohash,
+                                         cs_keypair,
+                                         authcfg.get_pubkey())
+            
+            try:
+                ClosedSwarm.trivial_save_poa(Session.get_default_state_dir(),
+                                             authcfg.get_pubkey(),
+                                             tdef.infohash,
+                                             poa)
+                print >>sys.stderr,"POA saved"
+            except Exception,e:
+                print >>sys.stderr,"Could not save POA"
+
+    # Save torrent public key to encoded permid.tkey for easy publishing
+    if cs_keypair:
+        publish_key(torrentfilename, cs_keypair, config['cs_publish_dir'])
+
+    dscfg = DownloadStartupConfig()
+    dscfg.set_dest_dir(config['destdir'])
+
+    if poa:
+        dscfg.set_poa(poa)
+
+    if config['source'] == '-':
+        # Arno: doesn't appear to work on Linux
+        source = sys.stdin
+    elif config['source'].startswith('http:'):
+        # HTTP source
+        source = InfiniteHTTPStream(config['source'])
+        """
+        # Windows Media Encoder gives Bad Request if we don't include User-Agent
+        url = config['source']
+        user_agent = 'NSPlayer/4.1.0.3856'
+        headers = { 'User-Agent' : user_agent }
+
+        req = urllib2.Request(url, None, headers)
+        source = urllib2.urlopen(req)
+        """
+    elif config['source'].startswith('pipe:'):
+        # Program as source via pipe
+        cmd = config['source'][len('pipe:'):]
+        (child_out,source) = os.popen2( cmd, 'b' )
+    else:
+        # File source
+        stream = open(config['source'],"rb")
+        if config['fileloop']:
+            source = FileLoopStream(stream)
+        else:
+            source = stream
+        dscfg.set_video_ratelimit(tdef.get_bitrate())
+
+    haltsource = HaltOnEOFStream(source)
+        
+    restartstatefilename = config['name']+'.restart' 
+    dscfg.set_video_source(haltsource,authcfg,restartstatefilename=restartstatefilename)
+
+    dscfg.set_max_uploads(config['nuploads'])
+
+    d = s.start_download(tdef,dscfg)
+    d.set_state_callback(state_callback,getpeerlist=False)
+   
+    # condition variable would be prettier, but that don't listen to 
+    # KeyboardInterrupt
+    time.sleep(sys.maxint/2048)
+    #try:
+    #    while True:
+    #        x = sys.stdin.read()
+    #except:
+    #    print_exc()
+    #cond = Condition()
+    #cond.acquire()
+    #cond.wait()
+    
+    s.shutdown()
+    time.sleep(3)    
+    shutil.rmtree(statedir)
+    
diff --git a/instrumentation/next-share/BaseLib/Tools/createpoa.py b/instrumentation/next-share/BaseLib/Tools/createpoa.py
new file mode 100644 (file)
index 0000000..00dd684
--- /dev/null
@@ -0,0 +1,104 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+import sys
+import os.path
+from base64 import decodestring
+
+import BaseLib.Core.BitTornado.parseargs as parseargs
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.ClosedSwarm import ClosedSwarm
+from BaseLib.Core.TorrentDef import TorrentDef
+
+
+
+defaults = [
+    ('output_file', '', 
+        'Where to write the PoA (default nodeid.poa)'),
+    ('node_id', '', 'Node ID receiving the PoA'),
+    ('key_file', '', 'Private key file, default torrentfile.tkey')
+]
+
+
+def create_poa(torrent, torrent_keypair, node_id, target_file):
+
+    # Sanity - check that this key matches the torrent
+    pubkey = torrent_keypair.pub()
+    good_key = False
+    for key in torrent.get_cs_keys():
+        if pubkey.get_der() == key.get_der():
+            good_key = True
+            break
+
+    if not good_key:
+        raise Exception("Bad key given for this torrent")
+
+    # if the node_id is base64 encoded, decode it
+    try:
+        actual_node_id = decodestring(node_id)
+        print "Node ID was base64 encoded"
+    except:
+        actual_node_id = node_id
+
+    # Got the right key, now create POA
+    poa = ClosedSwarm.create_poa(t.infohash, torrent_keypair, actual_node_id)
+
+    # Now we save it
+    if target_file:
+        ClosedSwarm.write_poa_to_file(target_file)
+        tf = target_file
+    else:
+        tf = ClosedSwarm.trivial_save_poa("./", decodestring(node_id), t.infohash, poa)
+    
+    print "Proof of access written to file '%s'"%tf
+
+def get_usage(defs):
+    print "Usage: ",sys.argv[0],"<torrentfile> [options]\n"
+    print parseargs.formatDefinitions(defs,80)
+
+
+if __name__ == "__main__":
+    
+
+    config, fileargs = parseargs.parseargs(sys.argv, defaults, presets = {})
+
+    if len(fileargs) < 2:
+        get_usage(defaults)
+        raise SystemExit(1)
+        
+    torrent = fileargs[1]
+    if not os.path.exists(torrent):
+        print "Error: Could not find torrent file '%s'"%torrent
+        raise SystemExit(1)
+    
+    if not config['key_file']:
+        config['key_file'] = torrent + ".tkey"
+        
+    if not os.path.exists(config['key_file']):
+        print "Error: Could not find key file '%s'"%config['key_file']
+        raise SystemExit(1)
+
+    # Load the torrent file
+    try:
+        t = TorrentDef.load(torrent)
+    except Exception,e:
+        print "Bad torrent file:",e
+        raise SystemExit(1)
+    if not t.get_cs_keys():
+        print "Not a closed swarm torrent"
+        raise SystemExit(1)
+    try:
+        torrent_keypair = ClosedSwarm.read_cs_keypair(config['key_file'])
+    except Exception,e:
+        print "Bad torrent key file",e
+        raise SystemExit(1)
+    
+    # Need permid of the receiving node
+    if not config['node_id']:
+        print "Missing nodeid"
+        raise SystemExit(1)
+
+    create_poa(t, torrent_keypair, 
+               config['node_id'], config['output_file'])
+    
diff --git a/instrumentation/next-share/BaseLib/Tools/createtorrent.py b/instrumentation/next-share/BaseLib/Tools/createtorrent.py
new file mode 100644 (file)
index 0000000..f053323
--- /dev/null
@@ -0,0 +1,134 @@
+# Written by Arno Bakker \r
+# see LICENSE.txt for license information\r
+#\r
+\r
+import sys\r
+import os\r
+import shutil\r
+import time\r
+import tempfile\r
+import random\r
+import urllib2\r
+from traceback import print_exc\r
+from threading import Condition\r
+from base64 import encodestring\r
+\r
+from BaseLib.Core.API import *\r
+import BaseLib.Core.BitTornado.parseargs as parseargs\r
+\r
+argsdef = [('source', '', 'source file or directory'),\r
+           ('tracker', 'http://127.0.0.1:6969/announce', 'tracker URL'),\r
+           ('destdir', '.','dir to save torrent'),\r
+           ('duration', '1:00:00', 'duration of the stream in hh:mm:ss format'),           \r
+           ('piecesize', 32768, 'transport piece size'),\r
+           ('thumb', '', 'filename of image in JPEG format, preferably 171x96'),\r
+           ('url-list', [], 'a URL following BEP19 HTTP Seeding (TODO: support list)'),\r
+            ('url', False, 'Create URL instead of torrent (cannot be used with thumb)'),\r
+            ('cs_keys', '', \r
+            "Closed swarm torrent keys (semicolon separated if more than one)"),\r
+            ('generate_cs', 'no',\r
+             "Create a closed swarm, generating the keys ('yes' to generate)"),\r
+           ('cs_publish_dir', '.', "Publish public CS key in what dir?")\r
+            ]\r
+\r
+\r
+def get_usage(defs):\r
+    return parseargs.formatDefinitions(defs,80)\r
+\r
+def generate_key(config):\r
+    """\r
+    Generate and a closed swarm key matching the config.  Source is the \r
+    source of the torrent\r
+    """\r
+    if 'target' in config and config['target']:\r
+        target = os.path.join(params['target'], split(normpath(file))[1])\r
+    else:\r
+        a, b = os.path.split(config['source'])\r
+        if b == '':\r
+            target = a\r
+        else:\r
+            target = os.path.join(a, b)\r
+    target += ".torrent"\r
+    print "Generating key to '%s.tkey' and '%s.pub'"%(target, target)\r
+    \r
+    keypair, pubkey = ClosedSwarm.generate_cs_keypair(target + ".tkey",\r
+                                                      target + ".pub")\r
+    \r
+    return keypair,pubkey\r
+\r
+def publish_key(torrent, keypair, target_directory = "."):\r
\r
+    t = TorrentDef.load(torrent)\r
+    \r
+    filename = encodestring(t.infohash).replace("\n","")\r
+    filename = filename.replace("/","")\r
+    filename = filename.replace("\\","")\r
+    key_file = os.path.join(target_directory, filename + ".tkey")\r
+    ClosedSwarm.save_cs_keypair(keypair, key_file)\r
+    print "Key saved to:", key_file\r
+\r
+def progress(perc):\r
+    print int(100.0*perc),"%",\r
+        \r
+if __name__ == "__main__":\r
+\r
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})\r
+    print >>sys.stderr,"config is",config\r
+    \r
+    if config['source'] == '':\r
+        print "Usage:  ",get_usage(argsdef)\r
+        sys.exit(0)\r
+        \r
+    if isinstance(config['source'],unicode):\r
+        usource = config['source']\r
+    else:\r
+        usource = config['source'].decode(sys.getfilesystemencoding())\r
+        \r
+    tdef = TorrentDef()\r
+    if os.path.isdir(usource):\r
+        for filename in os.listdir(usource):\r
+            path = os.path.join(usource,filename)\r
+            tdef.add_content(path,path,playtime=config['duration']) #TODO: only set duration on video file\r
+    else:\r
+        tdef.add_content(usource,playtime=config['duration'])\r
+        \r
+    tdef.set_tracker(config['tracker'])\r
+    tdef.set_piece_length(config['piecesize']) #TODO: auto based on bitrate?\r
+\r
+    # CLOSEDSWARM\r
+    cs_keypair = None # Save for publishing later\r
+    if config['generate_cs'].lower() == "yes":\r
+        if config['cs_keys']:\r
+            print "Refusing to generate keys when key is given"\r
+            raise SystemExit(1)\r
+        cs_keypair, cs_pubkey = generate_key(config)\r
+        tdef.set_cs_keys([cs_pubkey])\r
+    elif config['cs_keys']:\r
+        config['cs_keys'] = config['cs_keys'].split(";")\r
+    \r
+    # TODO4DIEGO: DO BE CHANGED TO set_url_list() and support lists of URLs\r
+    if len(config['url-list']) > 0:\r
+        urllist = [config['url-list']]\r
+        tdef.set_urllist(urllist)\r
+    \r
+    if config['url']:\r
+        tdef.set_create_merkle_torrent(1)\r
+        tdef.set_url_compat(1)\r
+    else:\r
+        if len(config['thumb']) > 0:\r
+            tdef.set_thumbnail(config['thumb'])\r
+    tdef.finalize(userprogresscallback=progress)\r
+    \r
+    if config['url']:\r
+        urlbasename = config['source']+'.url'\r
+        urlfilename = os.path.join(config['destdir'],urlbasename)\r
+        f = open(urlfilename,"wb")\r
+        f.write(tdef.get_url())\r
+        f.close()\r
+    else:\r
+        torrentbasename = config['source']+'.tstream'\r
+        torrentfilename = os.path.join(config['destdir'],torrentbasename)\r
+        tdef.save(torrentfilename)\r
+        \r
+    if cs_keypair:\r
+        publish_key(torrentfilename, cs_keypair, config['cs_publish_dir'])\r
diff --git a/instrumentation/next-share/BaseLib/Tools/dirtracker.py b/instrumentation/next-share/BaseLib/Tools/dirtracker.py
new file mode 100644 (file)
index 0000000..91d5d86
--- /dev/null
@@ -0,0 +1,191 @@
+# Written by Arno Bakker \r
+# see LICENSE.txt for license information\r
+#\r
+# Razvan Deaconescu, 2008:\r
+#       * corrected problem when running in background\r
+#       * added usage and print_version functions\r
+#       * uses getopt for command line argument parsing\r
+\r
+import sys\r
+import shutil\r
+import time\r
+import tempfile\r
+import random\r
+import os\r
+import getopt\r
+from traceback import print_exc\r
+\r
+from BaseLib.__init__ import LIBRARYNAME\r
+from BaseLib.Core.API import *\r
+from BaseLib.Core.BitTornado.__init__ import version, report_email\r
+\r
+\r
+checkpointedwhenseeding = False\r
+sesjun = None\r
+\r
+def usage():\r
+    print "Usage: python dirseeder.py [options] directory"\r
+    print "Options:"\r
+    print "\t--port <port>"\r
+    print "\t-p <port>\t\tuse <port> to listen for connections"\r
+    print "\t\t\t\t(default is random value)"\r
+    print "\tdirectory (default is current)"\r
+    print "\t--seeder\t\t\tseeder only"\r
+    print "\t--version"\r
+    print "\t-v\t\t\tprint version and exit"\r
+    print "\t--help"\r
+    print "\t-h\t\t\tprint this help screen"\r
+    print\r
+    print "Report bugs to <" + report_email + ">"\r
+\r
+def print_version():\r
+    print version, "<" + report_email + ">"\r
+\r
+def states_callback(dslist):\r
+    allseeding = True\r
+    for ds in dslist:\r
+        state_callback(ds)\r
+        if ds.get_status() != DLSTATUS_SEEDING:\r
+            allseeding = False\r
+        \r
+    global checkpointedwhenseeding\r
+    global sesjun\r
+    if len(dslist) > 0 and allseeding and not checkpointedwhenseeding:\r
+        checkpointedwhenseeding = True\r
+        print >>sys.stderr,"All seeding, checkpointing Session to enable quick restart"\r
+        sesjun.checkpoint()\r
+        \r
+    return (1.0, False)\r
+\r
+def state_callback(ds):\r
+    d = ds.get_download()\r
+#    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)\r
+    print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \\r
+            (`d.get_def().get_name()`, \\r
+            dlstatus_strings[ds.get_status()], \\r
+            ds.get_progress() * 100, \\r
+            ds.get_error(), \\r
+            ds.get_current_speed(UPLOAD), \\r
+            ds.get_current_speed(DOWNLOAD))\r
+\r
+    return (1.0, False)\r
+\r
+def main():\r
+    try:\r
+        opts, args = getopt.getopt(sys.argv[1:], "hvp:", ["help", "version", "port", "seeder"])\r
+    except getopt.GetoptError, err:\r
+        print str(err)\r
+        usage()\r
+        sys.exit(2)\r
+\r
+    # init to default values\r
+    port = 6969\r
+    tracking  = True\r
+    for o, a in opts:\r
+        if o in ("-h", "--help"):\r
+            usage()\r
+            sys.exit(0)\r
+        elif o in ("-p", "--port"):\r
+            port = int(a)\r
+        elif o in ("-p", "--port"):\r
+            port = int(a)\r
+        elif o in ("--seeder"):\r
+            tracking = False\r
+        elif o in ("-v", "--version"):\r
+            print_version()\r
+            sys.exit(0)\r
+        else:\r
+            assert False, "unhandled option"\r
+\r
+\r
+    if len(args) > 1:\r
+        print "Too many arguments"\r
+        usage()\r
+        sys.exit(2)\r
+    elif len(args) == 0:\r
+        torrentsdir = os.getcwd()\r
+    else:\r
+        torrentsdir = os.path.abspath(args[0])\r
+\r
+    print "Press Ctrl-C or send SIGKILL or WM_DESTROY to stop seeding"\r
+\r
+    # setup session\r
+    sscfg = SessionStartupConfig()\r
+    statedir = os.path.join(torrentsdir,"."+LIBRARYNAME)\r
+    sscfg.set_state_dir(statedir)\r
+    sscfg.set_listen_port(port)\r
+    sscfg.set_megacache(False)\r
+    sscfg.set_overlay(False)\r
+    sscfg.set_dialback(False)\r
+    if tracking:\r
+        sscfg.set_internal_tracker(True)\r
+        # M23TRIAL, log full\r
+        logfilename = "tracker-"+str(int(time.time()))+".log"\r
+        sscfg.set_tracker_logfile(logfilename)\r
+        sscfg.set_tracker_log_nat_checks(True)\r
+    \r
+    s = Session(sscfg)\r
+    global sesjun\r
+    sesjun = s\r
+    s.set_download_states_callback(states_callback, getpeerlist=False)\r
+    \r
+    # Restore previous Session\r
+    s.load_checkpoint()\r
+\r
+    # setup and start downloads\r
+    dscfg = DownloadStartupConfig()\r
+    dscfg.set_dest_dir(torrentsdir)\r
+    #dscfg.set_max_speed(UPLOAD,256) # FOR DEMO\r
+    \r
+    ##dscfg.set_max_uploads(32)\r
+    \r
+    #\r
+    # Scan dir, until exit by CTRL-C (or any other signal/interrupt)\r
+    #\r
+    try:\r
+        while True:\r
+            try:\r
+                print >>sys.stderr,"Rescanning",`torrentsdir`\r
+                for torrent_file in os.listdir(torrentsdir):\r
+                    if torrent_file.endswith(".torrent") or torrent_file.endswith(".tstream") or torrent_file.endswith(".url"): \r
+                        print >>sys.stderr,"Found file",`torrent_file`\r
+                        tfullfilename = os.path.join(torrentsdir,torrent_file)\r
+                        if torrent_file.endswith(".url"):\r
+                            f = open(tfullfilename,"rb")\r
+                            url = f.read()\r
+                            f.close()\r
+                            tdef = TorrentDef.load_from_url(url)\r
+                        else:\r
+                            tdef = TorrentDef.load(tfullfilename)\r
+                        \r
+                        # See if already running:\r
+                        dlist = s.get_downloads()\r
+                        existing = False\r
+                        for d in dlist:\r
+                            existinfohash = d.get_def().get_infohash()\r
+                            if existinfohash == tdef.get_infohash():\r
+                                existing = True\r
+                                break\r
+                        if existing:\r
+                            print >>sys.stderr,"Ignoring existing Download",`tdef.get_name()`\r
+                        else:\r
+                            if tracking:\r
+                                s.add_to_internal_tracker(tdef)\r
+#                            d = s.start_download(tdef, dscfg)\r
+                            \r
+                            # Checkpoint again when new are seeding\r
+                            global checkpointedwhenseeding\r
+                            checkpointedwhenseeding = False\r
+                            \r
+            except KeyboardInterrupt,e:\r
+                raise e\r
+            except Exception, e:\r
+                print_exc()\r
+            \r
+            time.sleep(30.0)\r
+\r
+    except Exception, e:\r
+        print_exc()\r
+\r
+if __name__ == "__main__":\r
+    main()\r
diff --git a/instrumentation/next-share/BaseLib/Tools/dirtrackerseeder.py b/instrumentation/next-share/BaseLib/Tools/dirtrackerseeder.py
new file mode 100644 (file)
index 0000000..30e49af
--- /dev/null
@@ -0,0 +1,198 @@
+# Written by Arno Bakker \r
+# see LICENSE.txt for license information\r
+#\r
+# Razvan Deaconescu, 2008:\r
+#       * corrected problem when running in background\r
+#       * added usage and print_version functions\r
+#       * uses getopt for command line argument parsing\r
+\r
+import sys\r
+import shutil\r
+import time\r
+import tempfile\r
+import random\r
+import os\r
+import getopt\r
+from traceback import print_exc\r
+\r
+from BaseLib.__init__ import LIBRARYNAME\r
+from BaseLib.Core.API import *\r
+from BaseLib.Core.BitTornado.__init__ import version, report_email\r
+\r
+MAXUPLOAD = 1000 # KB/s or None\r
+\r
+checkpointedwhenseeding = False\r
+sesjun = None\r
+\r
+def usage():\r
+    print "Usage: python dirseeder.py [options] directory"\r
+    print "Options:"\r
+    print "\t--port <port>"\r
+    print "\t-p <port>\t\tuse <port> to listen for connections"\r
+    print "\t\t\t\t(default is random value)"\r
+    print "\tdirectory (default is current)"\r
+    print "\t--seeder\t\t\tseeder only"\r
+    print "\t--version"\r
+    print "\t-v\t\t\tprint version and exit"\r
+    print "\t--help"\r
+    print "\t-h\t\t\tprint this help screen"\r
+    print\r
+    print "Report bugs to <" + report_email + ">"\r
+\r
+def print_version():\r
+    print version, "<" + report_email + ">"\r
+\r
+def states_callback(dslist):\r
+    allseeding = True\r
+    for ds in dslist:\r
+        state_callback(ds)\r
+        if ds.get_status() != DLSTATUS_SEEDING:\r
+            allseeding = False\r
+        \r
+    global checkpointedwhenseeding\r
+    global sesjun\r
+    if len(dslist) > 0 and allseeding and not checkpointedwhenseeding:\r
+        checkpointedwhenseeding = True\r
+        print >>sys.stderr,"All seeding, checkpointing Session to enable quick restart"\r
+        sesjun.checkpoint()\r
+        \r
+    return (1.0, False)\r
+\r
+def state_callback(ds):\r
+    d = ds.get_download()\r
+#    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)\r
+    print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \\r
+            (`d.get_def().get_name()`, \\r
+            dlstatus_strings[ds.get_status()], \\r
+            ds.get_progress() * 100, \\r
+            ds.get_error(), \\r
+            ds.get_current_speed(UPLOAD), \\r
+            ds.get_current_speed(DOWNLOAD))\r
+\r
+    return (1.0, False)\r
+\r
+def main():\r
+    try:\r
+        opts, args = getopt.getopt(sys.argv[1:], "hvp:", ["help", "version", "port", "seeder"])\r
+    except getopt.GetoptError, err:\r
+        print str(err)\r
+        usage()\r
+        sys.exit(2)\r
+\r
+    # init to default values\r
+    port = 6969\r
+    tracking  = True\r
+    for o, a in opts:\r
+        if o in ("-h", "--help"):\r
+            usage()\r
+            sys.exit(0)\r
+        elif o in ("-p", "--port"):\r
+            port = int(a)\r
+        elif o in ("-p", "--port"):\r
+            port = int(a)\r
+        elif o in ("--seeder"):\r
+            tracking = False\r
+        elif o in ("-v", "--version"):\r
+            print_version()\r
+            sys.exit(0)\r
+        else:\r
+            assert False, "unhandled option"\r
+\r
+\r
+    if len(args) > 1:\r
+        print "Too many arguments"\r
+        usage()\r
+        sys.exit(2)\r
+    elif len(args) == 0:\r
+        torrentsdir = os.getcwd()\r
+    else:\r
+        torrentsdir = os.path.abspath(args[0])\r
+\r
+    print "Press Ctrl-C or send SIGKILL or WM_DESTROY to stop seeding"\r
+\r
+    # setup session\r
+    sscfg = SessionStartupConfig()\r
+    statedir = os.path.join(torrentsdir,"."+LIBRARYNAME)\r
+    sscfg.set_state_dir(statedir)\r
+    sscfg.set_listen_port(port)\r
+    sscfg.set_megacache(False)\r
+    sscfg.set_overlay(False)\r
+    sscfg.set_dialback(False)\r
+    if tracking:\r
+        sscfg.set_internal_tracker(True)\r
+        # log full\r
+        logfilename = "tracker-"+str(int(time.time()))+".log"\r
+        sscfg.set_tracker_logfile(logfilename)\r
+        sscfg.set_tracker_log_nat_checks(True)\r
+    \r
+    s = Session(sscfg)\r
+    global sesjun\r
+    sesjun = s\r
+    s.set_download_states_callback(states_callback, getpeerlist=False)\r
+    \r
+    # Restore previous Session\r
+    s.load_checkpoint()\r
+\r
+    # setup and start downloads\r
+    dscfg = DownloadStartupConfig()\r
+    dscfg.set_dest_dir(torrentsdir)\r
+    # Arno, 2010-04-16: STBSPEED: complete BITFIELDS are processed much faster\r
+    dscfg.set_breakup_seed_bitfield(False)\r
+    if MAXUPLOAD is not None:\r
+        dscfg.set_max_speed(UPLOAD,MAXUPLOAD)\r
+    \r
+    ##dscfg.set_max_uploads(32)\r
+    \r
+    #\r
+    # Scan dir, until exit by CTRL-C (or any other signal/interrupt)\r
+    #\r
+    try:\r
+        while True:\r
+            try:\r
+                print >>sys.stderr,"Rescanning",`torrentsdir`\r
+                for torrent_file in os.listdir(torrentsdir):\r
+                    if torrent_file.endswith(".torrent") or torrent_file.endswith(".tstream") or torrent_file.endswith(".url"): \r
+                        print >>sys.stderr,"Found file",`torrent_file`\r
+                        tfullfilename = os.path.join(torrentsdir,torrent_file)\r
+                        if torrent_file.endswith(".url"):\r
+                            f = open(tfullfilename,"rb")\r
+                            url = f.read()\r
+                            f.close()\r
+                            tdef = TorrentDef.load_from_url(url)\r
+                        else:\r
+                            tdef = TorrentDef.load(tfullfilename)\r
+                        \r
+                        # See if already running:\r
+                        dlist = s.get_downloads()\r
+                        existing = False\r
+                        for d in dlist:\r
+                            existinfohash = d.get_def().get_infohash()\r
+                            if existinfohash == tdef.get_infohash():\r
+                                existing = True\r
+                                break\r
+                        if existing:\r
+                            print >>sys.stderr,"Ignoring existing Download",`tdef.get_name()`\r
+                            \r
+                            if MAXUPLOAD is not None:\r
+                                d.set_max_speed(UPLOAD,MAXUPLOAD)\r
+                        else:\r
+                            if tracking:\r
+                                s.add_to_internal_tracker(tdef)\r
+                            d = s.start_download(tdef, dscfg)\r
+                            \r
+                            # Checkpoint again when new are seeding\r
+                            global checkpointedwhenseeding\r
+                            checkpointedwhenseeding = False\r
+                            \r
+            except KeyboardInterrupt,e:\r
+                raise e\r
+            except Exception, e:\r
+                print_exc()\r
+            \r
+            time.sleep(30.0)\r
+\r
+    except Exception, e:\r
+        print_exc()\r
+\r
+if __name__ == "__main__":\r
+    main()\r
diff --git a/instrumentation/next-share/BaseLib/Tools/duration2torrent.py b/instrumentation/next-share/BaseLib/Tools/duration2torrent.py
new file mode 100644 (file)
index 0000000..c9d8be6
--- /dev/null
@@ -0,0 +1,51 @@
+# Written by Riccardo Petrocco\r
+# see LICENSE.txt for license information\r
+#\r
+\r
+import sys\r
+import os\r
+import shutil\r
+import time\r
+import tempfile\r
+import random\r
+import urllib2\r
+from traceback import print_exc\r
+from threading import Condition\r
+\r
+from BaseLib.Core.API import *\r
+import BaseLib.Core.BitTornado.parseargs as parseargs\r
+\r
+argsdef = [('torrent', '', 'source torrent to be modified'),\r
+           ('destdir', '.','dir to save torrent'),
+           ('newtorrent', '', 'name of the new torrent, if not specified the original torrent will be replaced'),\r
+           ('duration', '', 'duration of the stream in hh:mm:ss format')]\r
+\r
+\r
+def get_usage(defs):\r
+    return parseargs.formatDefinitions(defs,80)\r
+    \r
+    \r
+if __name__ == "__main__":\r
+\r
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})\r
+    print >>sys.stderr,"config is",config\r
+    \r
+    if config['torrent'] == '':\r
+        print "Usage:  ",get_usage(argsdef)\r
+        sys.exit(0)\r
+                \r
+    tdef = TorrentDef.load(config['torrent'])
+    metainfo = tdef.get_metainfo()
+    
+    if config['duration'] != '':
+        metainfo['playtime'] = config['duration']
+        \r
+    tdef.finalize()\r
+    \r
+    if config['newtorrent'] == '':
+        torrentbasename = config['torrent']
+    else:
+        torrentbasename = config['newtorrent'] + '.torrent'\r
+
+    torrentfilename = os.path.join(config['destdir'],torrentbasename)\r
+    tdef.save(torrentfilename)\r
diff --git a/instrumentation/next-share/BaseLib/Tools/httpseeder.py b/instrumentation/next-share/BaseLib/Tools/httpseeder.py
new file mode 100644 (file)
index 0000000..6809948
--- /dev/null
@@ -0,0 +1,60 @@
+\r
+import sys\r
+import os\r
+import time\r
+from traceback import print_exc\r
+\r
+from BaseLib.Video.VideoServer import VideoHTTPServer\r
+\r
+\r
+VIDEOHTTP_LISTENPORT = 8080\r
+\r
+class HTTPSeeder:\r
+    \r
+    def __init__(self):\r
+        self.videoHTTPServer = VideoHTTPServer(VIDEOHTTP_LISTENPORT)\r
+        self.videoHTTPServer.register(self.videoservthread_error_callback,self.videoservthread_set_status_callback)\r
+        self.videoHTTPServer.background_serve()\r
+\r
+    #\r
+    # VideoServer status/error reporting\r
+    #\r
+    def videoservthread_error_callback(self,e,url):\r
+        print >>sys.stderr,"httpseed: Video server reported error",url,str(e)\r
+\r
+    def videoservthread_set_status_callback(self,status):\r
+        print >>sys.stderr,"httpseed: Video server sets status callback",status\r
+\r
+\r
+\r
+\r
+if __name__ == '__main__':\r
+    \r
+    print >>sys.stderr,"httpseed: Starting"\r
+    \r
+    httpseed = HTTPSeeder()\r
+    \r
+    paths = []\r
+    paths.append("treeOfLife.ogv")\r
+    paths.append("RDTV_ep2_5min.ogv")\r
+    \r
+    for path in paths:\r
+        filename = os.path.basename(path) \r
+    \r
+        f = open(path,"rb")\r
+        s = os.stat(path)\r
+        fsize = s.st_size\r
+    \r
+        #streaminfo = { 'mimetype': 'application/ogg', 'stream': f, 'length': fsize, 'blocksize':2 ** 16, 'bitrate':69976.4 }\r
+        streaminfo = { 'mimetype': 'application/ogg', 'stream': f, 'length': fsize, 'blocksize':2 ** 16 }\r
+\r
+        urlpath = "/"+filename\r
+        print >>sys.stderr,"httpseed: Hosting",urlpath\r
+        httpseed.videoHTTPServer.set_inputstream(streaminfo,urlpath)\r
+\r
+    print >>sys.stderr,"httpseed: Waiting"\r
+    try:\r
+        while True:\r
+            time.sleep(sys.maxint/2048)\r
+    except:\r
+        print_exc()\r
diff --git a/instrumentation/next-share/BaseLib/Tools/pingbackserver.py b/instrumentation/next-share/BaseLib/Tools/pingbackserver.py
new file mode 100644 (file)
index 0000000..0cf1b6d
--- /dev/null
@@ -0,0 +1,102 @@
+# Written by Lucia D'Acunto
+# see LICENSE.txt for license information
+
+# Ping back server
+
+from socket import *
+import sys
+import time
+import thread
+import select
+
+
+DEBUG = True
+
+
+# Print usage information
+def usage():
+
+        print "Usage:"
+        print " python pingback.py <serverport>"
+
+
+def serveclient(message, udpsock, clientaddr):
+
+    # Loop forever receiving pings and sending pongs
+    data = message.split(':')
+                    
+    if data[0] == "ping":
+
+        if DEBUG:
+            print >> sys.stderr, "received ping with delay", data[1], "from", clientaddr
+
+        time.sleep(int(data[1]))
+
+        if DEBUG:
+            print >> sys.stderr, "sending pong back after", data[1], "seconds", "to", clientaddr
+
+        pongMsg = (str("pong:"+data[1]))
+        udpsock.sendto(pongMsg, clientaddr)
+
+
+
+if __name__=="__main__" :
+
+    if len(sys.argv) != 2 :
+                usage()
+                sys.exit(1)
+
+    serveraddr = None
+    log = open("log.txt", "a") # logfile
+
+
+    try :
+        serveraddr = (gethostbyname(gethostname()), int(sys.argv[1]))
+
+    except ValueError, strerror :
+        if DEBUG:
+            print >> sys.stderr, "ValueError: ", strerror
+        usage()
+        sys.exit(1)
+
+
+    # Set up the sockets
+    try :
+        udpsock = socket(AF_INET, SOCK_DGRAM)
+        udpsock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
+        udpsock.bind(serveraddr)
+
+    except error, (errno, strerror) :
+
+        if udpsock :
+            udpsock.close()
+
+        if DEBUG:
+            print >> sys.stderr, "Could not open socket: %s" % (strerror)
+        sys.stdout.flush()
+
+        sys.exit(1)
+
+    
+    if DEBUG:
+        print >> sys.stderr, "waiting for connection..."
+
+    # Loop forever receiving pings and sending pongs
+    while True: 
+
+        BUFSIZ = 1024
+        message = None
+        clientaddr = None
+
+        try:
+            message, clientaddr = udpsock.recvfrom(BUFSIZ)
+        except error:
+            continue
+
+        print >> sys.stderr, time.strftime("%Y/%m/%d %H:%M:%S"), "...connected from:", clientaddr
+        log.write("%i %s %i\n" % (time.time(), str(clientaddr[0]), clientaddr[1]))
+        log.flush()
+
+        thread.start_new_thread(serveclient, (message, udpsock, clientaddr))
+
+            
diff --git a/instrumentation/next-share/BaseLib/Tools/pipe-arnocam-home.sh b/instrumentation/next-share/BaseLib/Tools/pipe-arnocam-home.sh
new file mode 100755 (executable)
index 0000000..f48d7a8
--- /dev/null
@@ -0,0 +1,2 @@
+
+ffmpeg -i /dev/video -vcodec mpeg4 -vb 428288 -s 320x240 -acodec mp3 -ab 96000 -ac 1 -f mpegts -
diff --git a/instrumentation/next-share/BaseLib/Tools/pipe-arnocam-jip.sh b/instrumentation/next-share/BaseLib/Tools/pipe-arnocam-jip.sh
new file mode 100755 (executable)
index 0000000..b9b95e4
--- /dev/null
@@ -0,0 +1,2 @@
+
+ffmpeg -s 320x240 -r 15 -f video4linux -i /dev/video -vcodec mpeg4 -vb 428288 -s 320x240 -an -f mpegts -
diff --git a/instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-aac-gop-sync.sh b/instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-aac-gop-sync.sh
new file mode 100755 (executable)
index 0000000..e9d010f
--- /dev/null
@@ -0,0 +1,7 @@
+
+#FFMPEG=$HOME/pkgs/ffmpeg-r13556/bin/ffmpeg
+FFMPEG=$HOME/pkgs/ffmpeg-r14154-x264-r745/bin/ffmpeg
+#FFMPEG=$HOME/pkgs/ffmpeg-r14260-x264-snapshot-20080716-2245/bin/ffmpeg
+
+$FFMPEG -f mpegts -vsync 1 -map 0.0:0.1 -map 0.1 -i /dev/dvb/adapter0/dvr0 -vcodec libx264 -vb 428288 -g 16 -s 320x240 -acodec libfaac -ab 96000 -ac 1 -deinterlace -f mpegts -
+
diff --git a/instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-mp3.sh b/instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-mp3.sh
new file mode 100755 (executable)
index 0000000..6ee0c9c
--- /dev/null
@@ -0,0 +1,3 @@
+
+$HOME/pkgs/ffmpeg-r13556/bin/ffmpeg -f mpegts -i /dev/dvb/adapter0/dvr0 -vcodec libx264 -vb 428288 -s 320x240 -acodec libmp3lame -ab 96000 -ac 1 -deinterlace -f mpegts -
+
diff --git a/instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-nosound-mencoder.sh b/instrumentation/next-share/BaseLib/Tools/pipe-babscam-h264-nosound-mencoder.sh
new file mode 100755 (executable)
index 0000000..0c7075b
--- /dev/null
@@ -0,0 +1,7 @@
+
+# X264ENCOPTS='bitrate=1024'
+# X264ENCOPTS='vbv-maxrate=1024:vbv-minrate=1024:qcomp=0:ratetol=0:keyint=30:frameref=1'
+X264ENCOPTS='bitrate=1024:qcomp=0:ratetol=0:keyint=30:frameref=1:level=4.1'
+
+mencoder -cache 8192 -ovc x264 -x264encopts "$X264ENCOPTS" -nosound  -of lavf -lavfopts i_certify_that_my_video_stream_does_not_use_b_frames:format=mpegts -vf scale=640:360 -quiet -o /dev/stdout /dev/dvb/adapter0/dvr0 
+
diff --git a/instrumentation/next-share/BaseLib/Tools/pipe-babscam-mpeg4-mp3-sync.sh b/instrumentation/next-share/BaseLib/Tools/pipe-babscam-mpeg4-mp3-sync.sh
new file mode 100755 (executable)
index 0000000..90242c5
--- /dev/null
@@ -0,0 +1,3 @@
+
+$HOME/pkgs/ffmpeg/bin/ffmpeg -f mpegts -vsync 1 -map 0.0:0.1 -map 0.1 -i /dev/dvb/adapter0/dvr0 -vcodec mpeg4 -vb 428288 -s 320x240 -acodec libmp3lame -ab 96000 -ac 1 -f mpegts -
+
diff --git a/instrumentation/next-share/BaseLib/Tools/proxy-cmdline.py b/instrumentation/next-share/BaseLib/Tools/proxy-cmdline.py
new file mode 100644 (file)
index 0000000..8f67301
--- /dev/null
@@ -0,0 +1,279 @@
+# Written by Arno Bakker, George Milescu 
+# see LICENSE.txt for license information
+#
+# Razvan Deaconescu, 2008:
+#       * corrected problem when running in background
+#       * added usage and print_version functions
+#       * uses getopt for command line argument parsing
+# George Milescu, 2009
+#       * Added arguments for proxymode
+
+import sys
+import shutil
+import time
+import tempfile
+import random
+import os
+import getopt
+from traceback import print_exc
+
+from BaseLib.Core.API import *
+from BaseLib.Core.BitTornado.__init__ import version, report_email
+from BaseLib.Core.Utilities.utilities import show_permid_short
+from M2Crypto import EC
+
+# Print usage message
+def usage():
+    print "Usage: python proxy-cmdlinedl.py [options] torrent_file"
+    print "Options:"
+    print "\t--port <port>"
+    print "\t-p <port>\t\tuse <port> to listen for connections"
+    print "\t\t\t\t(default is random value)"
+    print "\t--output <output-dir>"
+    print "\t-o <output-dir>\t\tuse <output-dir> for storing downloaded data"
+    print "\t\t\t\t(default is current directory)"
+    print "\t--state-dir <state-dir>"
+    print "\t\t\t\tuse <state-dir> for storing session data"
+    print "\t\t\t\t(default is /tmp/tmp-tribler)"
+    print "\t--proxymode <proxy-mode>"
+    print "\t\t\t\t[DEVEL] use <proxy-mode> to specify how the client behaves"
+    print "\t\t\t\t * proxy-mode = off: no proxy is being used (the client is either an helper, or it does not start use proxy connections)"
+    print "\t\t\t\t * proxy-mode = private: only proxy connections are being used"
+    print "\t\t\t\t * proxy-mode = speed: both proxy and direct connections are being used"
+    print "\t\t\t\t(default is off)"
+    print "\t--proxyservice <proxy-service>"
+    print "\t\t\t\t[DEVEL] use <proxy-mode> to specify how the client behaves"
+    print "\t\t\t\t * proxy-service = off: the current node can not be used as a proxy by other nodes"
+    print "\t\t\t\t * proxy-service = on: the current node can be used as a proxy by other nodes"
+    print "\t\t\t\t(default is off)"
+    print "\t--helpers <helpers>"
+    print "\t\t\t\t[DEVEL] use <helpers> to specify maximum number of helpers used or a torrent"
+    print "\t\t\t\t(default is 5)"
+    print "\t--test-mode <test-mode>"
+    print "\t\t\t\t[DEVEL] use <test-mode> to specify if the client runs as part of a test"
+    print "\t\t\t\t * test-mode = off: the client is not run as part of a test"
+    print "\t\t\t\t * test-mode = coord: the client is part of a test, as a coordinator"
+    print "\t\t\t\t * test-mode = helper: the client is part of a test, as a helper"
+    print "\t\t\t\t(default is off)"
+    print "\t--no-download"
+    print "\t\t\t\t[DEVEL] Don't download anything, just stay and wait"
+    print "\t\t\t\t(if not present the default to download the torrent data)"
+    print "\t--version"
+    print "\t-v\t\t\tprint version and exit"
+    print "\t--help"
+    print "\t-h\t\t\tprint this help screen"
+    print
+    print "Report bugs to <" + report_email + ">"
+
+# Print version information
+def print_version():
+    print version, "<" + report_email + ">"
+
+# Print torrent statistics
+def state_callback(ds):
+    d = ds.get_download()
+#    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+    print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \
+            (d.get_def().get_name(), \
+            dlstatus_strings[ds.get_status()], \
+            ds.get_progress() * 100, \
+            ds.get_error(), \
+            ds.get_current_speed(UPLOAD), \
+            ds.get_current_speed(DOWNLOAD))
+
+    return (1.0, False)
+
+def main():
+    try:
+        # opts = a list of (option, value) pairs
+        # args = the list of program arguments left after the option list was stripped
+        opts, args = getopt.getopt(sys.argv[1:], "hvo:p:", ["help", "version", "output-dir=", "port=", "proxymode=", "proxyservice=", "helpers=", "test-mode=", "state-dir=", "no-download"])
+    except getopt.GetoptError, err:
+        print str(err)
+        usage()
+        sys.exit(2)
+
+    # init the default values
+    output_dir = os.getcwd()
+    port = random.randint(10000, 65535)
+    id = None
+    proxy_mode = PROXY_MODE_OFF
+    proxy_service = PROXYSERVICE_OFF
+    helpers = 5
+    test_mode="off" # off, coord, helper
+    no_download = False
+    statedir = "/tmp/tmp-tribler"
+
+    # get values from arguments
+    for option, value in opts:
+        if option in ("-h", "--help"):
+            usage()
+            sys.exit(0)
+        elif option in ("-o", "--output-dir"):
+            output_dir = value
+        elif option in ("--state-dir"):
+            statedir = value
+        elif option in ("-p", "--port"):
+            port = int(value)
+        elif option in ("--proxymode"):
+            if value == "off":
+                proxy_mode = PROXY_MODE_OFF
+            elif value == "private":
+                proxy_mode = PROXY_MODE_PRIVATE
+            elif value == "speed":
+                proxy_mode = PROXY_MODE_SPEED
+            else:
+                proxy_mode = PROXY_MODE_OFF
+        elif option in ("--proxyservice"):
+            if value == "off":
+                proxy_service = PROXYSERVICE_OFF
+            elif value == "on":
+                proxy_service = PROXYSERVICE_ON
+            else:
+                proxy_service = PROXYSERVICE_OFF
+        elif option in ("--helpers"):
+            helpers = int(value)
+        elif option in ("--test-mode"):
+            test_mode = value
+        elif option in ("-v", "--version"):
+            print_version()
+            sys.exit(0)
+        elif option in ("--no-download"):
+            no_download = True
+        else:
+            assert False, "unhandled option"
+
+    # arg should have only one element left: the torrent file name
+    # ProxyDevel
+    # if no_download is false (the client has to download torrent data), check number of arguments
+    if (no_download == False) and len(args) == 0:
+        usage()
+        sys.exit(2)
+    if len(args) > 1:
+        print "Too many arguments"
+        usage()
+        sys.exit(2)
+
+    # ProxyDevel
+    # is no_download is false (the client has to download torrent data), get torrent file name
+    if (no_download == False):
+        torrent_file = args[0]
+
+    print "Press Ctrl-C to stop the download"
+
+    # session setup
+    session_startup_config = SessionStartupConfig()
+    #statedir = tempfile.mkdtemp()
+    # ProxyDevel - set custom state dir
+    session_startup_config.set_state_dir(statedir)
+    session_startup_config.set_download_help_dir(os.path.join(statedir,"help_dir"))
+    session_startup_config.set_listen_port(port)
+    session_startup_config.set_megacache(True)
+    session_startup_config.set_overlay(True)
+    session_startup_config.set_dialback(True)
+    session_startup_config.set_internal_tracker(False)
+    # ProxyDevel - turn DHT off
+    session_startup_config.set_mainline_dht(False)
+    # ProxyDevel - turn buddycast off
+    session_startup_config.set_buddycast(False)
+    # ProxyDevel - set new core API values
+    session_startup_config.set_proxyservice_status(proxy_service)
+    
+    s = Session(session_startup_config)
+    
+    # DEBUG
+    print "*** My Permid = ", show_permid_short(s.get_permid())
+
+    # ProxyDevel - Receive overlay messages from anyone
+    s.set_overlay_request_policy(AllowAllRequestPolicy())
+
+    if test_mode == "coord":
+        # add the helper 1 as a friend
+        # get helper1 permid
+        helper1_keypair_filename = os.path.join("../../P2P-Testing-Infrastructure/ClientWorkingFolders/Proxy01/statedir","ec.pem")
+        helper1_keypair = EC.load_key(helper1_keypair_filename)
+        helper1_permid = str(helper1_keypair.pub().get_der())
+        # set helper1 ip address
+        helper1_ip="10.10.3.1"
+#        helper1_ip="141.85.224.203"
+        # set helper1 port
+        helper1_port = 25123
+        # add helper1 as a peer
+        peerdb = s.open_dbhandler(NTFY_PEERS)
+        peer = {}
+        peer['permid'] = helper1_permid
+        peer['ip'] = helper1_ip
+        peer['port'] = helper1_port
+        peer['last_seen'] = 0
+        peerdb.addPeer(peer['permid'], peer, update_dns=True, commit=True)
+
+        # add the helper 2 as a friend
+        # get helper2 permid
+        helper2_keypair_filename = os.path.join("../../P2P-Testing-Infrastructure/ClientWorkingFolders/Proxy02/statedir","ec.pem")
+        helper2_keypair = EC.load_key(helper2_keypair_filename)
+        helper2_permid = str(helper2_keypair.pub().get_der())
+        # set helper2 ip address
+        helper2_ip="10.10.4.1"
+#        helper2_ip="141.85.224.204"
+        # set helper2 port
+        helper2_port = 25123
+        # add helper2 as a peer
+        peerdb = s.open_dbhandler(NTFY_PEERS)
+        peer = {}
+        peer['permid'] = helper2_permid
+        peer['ip'] = helper2_ip
+        peer['port'] = helper2_port
+        peer['last_seen'] = 0
+        peerdb.addPeer(peer['permid'], peer, update_dns=True, commit=True)
+        
+    # ProxyDevel - if in no_download is false (the client has to download torrent data), then start downloading 
+    if (no_download == False):
+        # setup and start download
+        download_startup_config = DownloadStartupConfig()
+        download_startup_config.set_dest_dir(output_dir);
+        # ProxyDevel - turn PEX off
+        download_startup_config.set_ut_pex_max_addrs_from_peer(0)
+        download_startup_config.set_proxy_mode(proxy_mode)
+        download_startup_config.set_no_helpers(helpers)
+
+        torrent_def = TorrentDef.load(torrent_file)
+    
+        d = s.start_download(torrent_def, download_startup_config)
+        d.set_state_callback(state_callback, getpeerlist=False)
+    
+        # if the client is a coordinator
+        if test_mode == "coord":
+            # allow time for the download to start, before starting the help request
+            time.sleep(3)
+            # ask peer for help
+            for download in s.get_downloads():
+                # DEBUG
+                print "*** COORDINATOR Sending help request"
+                peerlist = []
+                peerlist.append(helper1_permid)
+                peerlist.append(helper2_permid)
+                download.ask_coopdl_helpers(peerlist)
+
+    #
+    # loop while waiting for CTRL-C (or any other signal/interrupt)
+    #
+    # - cannot use sys.stdin.read() - it means busy waiting when running
+    #   the process in background
+    # - cannot use condition variable - that don't listen to KeyboardInterrupt
+    #
+    # time.sleep(sys.maxint) has "issues" on 64bit architectures; divide it
+    # by some value (2048) to solve problem
+    #
+    try:
+        while True:
+            time.sleep(sys.maxint/2048)
+    except:
+        print_exc()
+
+    s.shutdown()
+    time.sleep(3)
+    # ProxyDevel
+    #shutil.rmtree(statedir)
+    
+if __name__ == "__main__":
+    main()
diff --git a/instrumentation/next-share/BaseLib/Tools/seed-njaal.py b/instrumentation/next-share/BaseLib/Tools/seed-njaal.py
new file mode 100644 (file)
index 0000000..e0c4eb4
--- /dev/null
@@ -0,0 +1,120 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+#
+
+import sys
+import os
+import time
+import tempfile
+import random
+import urllib2
+import socket # To get fq host name
+from base64 import encodestring
+from traceback import print_exc
+from threading import Condition
+
+from BaseLib.Core.API import *
+from BaseLib.Core.Statistics.Status import *
+
+import BaseLib.Core.BitTornado.parseargs as parseargs
+
+
+argsdef = [('nuploads', 200, 'the max number of peers to serve directly'),
+           ('destdir', '/tmp/', 'Where to save the downloaded/seeding files')
+           ]
+
+
+def state_callback(ds):
+    d = ds.get_download()
+    print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
+
+    return (1.0,False)
+
+def get_usage(defs):
+    return parseargs.formatDefinitions(defs,80)
+    
+
+class PrintStatusReporter(Status.OnChangeStatusReporter):
+    """
+    Print all changes to the screen
+    """
+    def report(self, event):
+        """
+        print to screen
+        """
+        print >> sys.stderr, "STATUS: %s=%s"%(event.get_name(),
+                                              event.get_value())
+
+
+if __name__ == "__main__":
+
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})
+
+    if len(sys.argv) < 2:
+        raise SystemExit("Missing .torrent or .tstream to seed")
+    
+    sscfg = SessionStartupConfig()
+    state_dir = Session.get_default_state_dir('.seeder')
+    sscfg.set_state_dir(state_dir)
+    port = random.randint(10000,20000)
+    sscfg.set_listen_port(port)
+    sscfg.set_megacache(False)
+    sscfg.set_overlay(False)
+    sscfg.set_dialback(True)
+    
+    s = Session(sscfg)
+
+    print >>sys.stderr,"My permid:",encodestring(s.get_permid()).replace("\n","")
+    
+    source = sys.argv[1]
+    if source.startswith("http://"):
+        tdef = TorrentDef.load_from_url(source)
+    else:
+        tdef = TorrentDef.load(source)
+
+    poa = None
+    if tdef.get_cs_keys():
+        try:
+            poa = ClosedSwarm.trivial_get_poa(s.get_default_state_dir(),
+                                              s.get_permid(),
+                                              tdef.infohash)
+        except:
+            pass # DEBUG ONLY
+
+    dscfg = DownloadStartupConfig()
+    dscfg.set_dest_dir(config['destdir'])
+
+    if poa:
+        dscfg.set_poa(poa)
+
+    dscfg.set_max_uploads(config['nuploads'])
+
+    print "Press Ctrl-C to stop seeding"
+
+    status = Status.get_status_holder("LivingLab")
+    id = "seed_" + socket.getfqdn()
+
+    # Print status updates to the screen
+    #status.add_reporter(PrintStatusReporter("Screen"))
+
+    # Report status to the Living lab every 30 minutes
+    reporter = LivingLabReporter.LivingLabPeriodicReporter("Living lab CS reporter", 60*30, id, print_post=True)
+    status.add_reporter(reporter)
+    
+
+    d = s.start_download(tdef,dscfg)
+    d.set_state_callback(state_callback,getpeerlist=False)
+
+    while True:
+        try:
+            time.sleep(60)
+        except:
+            break
+            
+    #cond = Condition()
+    #cond.acquire()
+    #cond.wait()
+    reporter.stop()
+    
+    s.shutdown()
+    
diff --git a/instrumentation/next-share/BaseLib/Tools/seeking.py b/instrumentation/next-share/BaseLib/Tools/seeking.py
new file mode 100644 (file)
index 0000000..bd3f659
--- /dev/null
@@ -0,0 +1,217 @@
+# Written by Riccardo Petrocco
+# see LICENSE.txt for license information
+
+import sys
+import time
+from traceback import print_exc
+
+from BaseLib.Core.API import *
+from BaseLib.Core.TorrentDef import *
+from BaseLib.Core.DownloadConfig import get_default_dest_dir
+
+import BaseLib.Core.BitTornado.parseargs as parseargs
+
+FIRST_ITERATION = True
+QUIT_NOW = False
+SESSION = None
+
+argsdef = [('torr', '', 'original torrent file, mandatory argument!'), 
+           ('start', '0', 'start time in seconds'), 
+           ('end', '', 'end time in seconds, if not specified the program will download the original video file until the end'),
+           ('videoOut', 'video_part.mpeg', 'name for the segment of downloaded video'),
+           ('torrName', 'videoOut', 'name for the torrent created from the downloaded segment of video'),
+           ('destdir', 'default download dir','dir to save torrent (and stream)'),
+           ('continueDownload', 'False', 'set to true to continue downloading and seeding the original torrent'),
+           ('createNewTorr', 'True', 'create a torrent with the newly downloaded section of video'),
+           ('quitAfter', 'Flase', 'quit the program after the segmented video has been downloaded'),
+           ('seedAfter', 'True', 'share the newly created torrent'),
+           ('debug', 'False', 'set to true for additional information about the process')]
+
+def get_usage(defs):
+    return parseargs.formatDefinitions(defs,80)
+
+
+def vod_event_callback(d,event,params):
+  if event == VODEVENT_START:
+    
+    stream = params["stream"]
+    length   = params["length"]
+    mimetype = params["mimetype"]
+
+    global FIRST_ITERATION, QUIT_NOW, SESSION
+    epoch_server = None
+    epoch_local = time.time()
+    bitrate = None
+    estduration = None
+    currentSize = 0
+    partialSize = length
+    start = int(config['start'])
+    end = int(config['end'])
+
+    if FIRST_ITERATION:
+            
+      if config['debug']:
+        print >>sys.stderr, "main: Seeking to second: ", config['start'], "estimated duration: ", estduration
+            
+      file = None
+      blocksize = d.get_def().get_piece_length()
+    
+      if d.get_def().is_multifile_torrent():
+        file = d.get_selected_files()[0]
+      bitrate = d.get_def().get_bitrate(file)
+      if bitrate is not None:
+        estduration = float(length) / float(bitrate)
+
+      if config['debug']:
+        print >> sys.stderr, "main: Seeking: bitrate: ", bitrate, "duration: ", estduration
+    
+      if start < int(estduration):
+        seekbyte = float(bitrate * start)
+
+        # Works only with TS container
+        if mimetype == 'video/mp2t':
+          # Ric if it is a ts stream we can round the start
+          # byte to the beginning of a ts packet (ts pkt = 188 bytes)
+          seekbyte = seekbyte - seekbyte%188
+                    
+          stream.seek(int(seekbyte))      
+          
+          if config['debug']:  
+            print >>sys.stderr, "main: Seeking: seekbyte: ", seekbyte, "start time: ", config['start']
+
+        FIRST_ITERATION = False
+
+      else:
+        print >>sys.stderr, "main: Starting time exceeds video duration!!"
+
+    if end != '':
+      # Determine the final size of the stream depending on the end Time
+      endbyte = float( bitrate * int(config['end']) )
+      partialSize = endbyte - seekbyte
+
+              
+    else:
+      print >>sys.stderr, "Seeking to the the beginning" 
+      stream.seek(0)               
+        
+    basename = config['videoOut'] + '.mpeg'
+
+    if config['destdir'] == 'default download dir':
+        config['destdir'] = get_default_dest_dir()
+        
+    filename = os.path.join(config['destdir'], basename)
+
+    if config['debug']:
+      print >>sys.stderr, "main: Saving the file in the following location: ", filename
+
+    f = open(filename,"wb")
+    prev_data = None    
+
+    while not FIRST_ITERATION and (currentSize < partialSize):
+      data = stream.read()
+      if config['debug']:
+        print >>sys.stderr,"main: VOD ready callback: reading",type(data)
+        print >>sys.stderr,"main: VOD ready callback: reading",len(data)
+      if len(data) == 0 or data == prev_data:
+        if config['debug']:
+          print >>sys.stderr, "main: Same data replicated: we reached the end of the stream"
+        break
+      f.write(data)
+      currentSize += len(data)
+      prev_data = data
+        
+        
+    # Stop the download
+    if not config['continueDownload']:
+      #SESSION.remove_
+      d.stop()
+    
+    #seek(0)
+            
+    if config['quitAfter']:
+      QUIT_NOW = True
+
+    f.close()    
+    stream.close()
+       
+    print >> sys.stderr, "main: Seeking: END!!"
+
+    if config['createNewTorr']:
+        createTorr(filename)      
+
+def createTorr(filename):
+
+  #get the time in a convinient format
+  seconds = int(config['end']) - int(config['start'])
+  m, s = divmod(seconds, 60)
+  h, m = divmod(m, 60)
+
+  humantime = "%02d:%02d:%02d" % (h, m, s)
+
+  if config['debug']:
+    print >>sys.stderr, "duration for the newly created torrent: ", humantime
+
+  dcfg = DownloadStartupConfig()
+#  dcfg.set_dest_dir(basename)
+  tdef = TorrentDef()
+  tdef.add_content( filename, playtime=humantime)
+  tdef.set_tracker(SESSION.get_internal_tracker_url())
+  print >>sys.stderr, tdef.get_tracker()
+  tdef.finalize()
+  
+  if config['torrName'] == '':
+    torrentbasename = config['videoOut']+'.torrent'
+  else:
+    torrentbasename = config['torrName']+'.torrent'
+    
+  torrentfilename = os.path.join(config['destdir'],torrentbasename)
+  tdef.save(torrentfilename)
+    
+  if config['seedAfter']:
+    if config['debug']:
+      print >>sys.stderr, "Seeding the newly created torrent"
+    d = SESSION.start_download(tdef,dcfg)
+    d.set_state_callback(state_callback,getpeerlist=False)
+        
+
+def state_callback(ds):
+  try:
+    d = ds.get_download()
+    p = "%.0f %%" % (100.0*ds.get_progress())
+    dl = "dl %.0f" % (ds.get_current_speed(DOWNLOAD))
+    ul = "ul %.0f" % (ds.get_current_speed(UPLOAD))
+    print >>sys.stderr,dlstatus_strings[ds.get_status() ],p,dl,ul,"=====", d.get_def().get_name()
+  except:
+    print_exc()
+
+  return (1.0,False)
+
+if __name__ == "__main__":
+
+  config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})
+  print >>sys.stderr,"config is",config
+  print "fileargs is",fileargs
+    
+  if config['torr'] == '' or config['start'] == '':
+    print "Usage:  ",get_usage(argsdef)
+    sys.exit(0)
+
+
+  scfg = SessionStartupConfig()
+  scfg.set_megacache( False )
+  scfg.set_overlay( False )
+  s = Session( scfg )
+  
+  SESSION = s
+
+  tdef = TorrentDef.load( config['torr'] )
+  dscfg = DownloadStartupConfig()
+  dscfg.set_video_event_callback( vod_event_callback )
+
+  d = s.start_download( tdef, dscfg )
+
+  d.set_state_callback(state_callback,getpeerlist=False)
+
+  while not QUIT_NOW:
+    time.sleep(10)
+
diff --git a/instrumentation/next-share/BaseLib/Tools/stunserver.py b/instrumentation/next-share/BaseLib/Tools/stunserver.py
new file mode 100644 (file)
index 0000000..cbb5cfb
--- /dev/null
@@ -0,0 +1,323 @@
+# Written by Lucia D'Acunto
+# see LICENSE.txt for license information
+
+# natserver.py
+
+import os
+from socket import *
+from time import strftime
+import select
+import sys
+import thread
+
+
+DEBUG = True
+
+
+# Print usage information
+def usage() :
+
+    print "Usage:"
+    print "    python natserver.py <serverport> <bounceip> <bounceport>"
+
+
+# Serve client connections (if server 1 or 2)
+def servemain(bounceaddr, serveraddr) :
+
+    # Set up the sockets
+    try :
+        udpsock = socket(AF_INET, SOCK_DGRAM)
+        udpsock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
+        udpsock.bind(serveraddr)
+
+    except error, (errno, strerror) :
+
+        if udpsock :
+            udpsock.close()
+
+        if DEBUG:
+            print >> sys.stderr, "Could not open socket: %s" % (strerror)
+        sys.stdout.flush()
+
+        sys.exit(1)
+
+    try :
+        udpsock.setblocking(0)
+    except error :
+        pass
+
+
+    # Loop forever receiving messages and sending pings
+    while 1 :
+
+        if DEBUG:
+            print >> sys.stderr, serveraddr, "Waiting for connection..."
+
+        try :
+            ready_to_read, ready_to_write, errors = select.select([udpsock],[],[])
+
+        except (KeyboardInterrupt, SystemExit):
+
+            if udpsock :
+                udpsock.close()
+
+            if DEBUG:
+                print >> sys.stderr, "Exiting ..."
+
+            sys.exit(1)
+
+        except select.error, (errno, strerror) :
+
+            if udpsock :
+                udpsock.close()
+
+            if DEBUG:
+                print >> sys.stderr, "I/O error: %s" % (strerror)
+
+            sys.exit(1)
+
+        for i in ready_to_read :
+
+            if DEBUG:
+                print >> sys.stderr, "Incoming connection..."
+
+            # Serve udp connections
+            if i == udpsock :
+
+                BUFSIZ = 1024
+                try:
+                    data, clientaddr = udpsock.recvfrom(BUFSIZ)
+                    print >> sys.stderr, strftime("%Y/%m/%d %H:%M:%S"), "...connected from:", clientaddr
+
+                except error, (errno, strerr) :
+                    if DEBUG:
+                        print >> sys.stderr, strerr
+                    break
+
+                if data == "ping1" : # The client is running Test I
+
+                    if DEBUG:
+                        print >> sys.stderr, "received ping1"
+
+                    reply = "%s:%s" % (clientaddr[0], clientaddr[1])
+                    try:
+                        udpsock.sendto(reply, clientaddr)
+                    except:
+                        break
+
+                if data == "ping2": # The client is running Test II
+
+                    if DEBUG:
+                        print >> sys.stderr, "received ping2"
+
+                    reply = "%s:%s" % (clientaddr[0], clientaddr[1])
+                    try:
+                        udpsock.sendto(reply, bounceaddr)
+                    except:
+                        break
+                    if DEBUG:
+                        print >> sys.stderr, "bounce request is", reply
+                        print >> sys.stderr, "bounce request sent to ", (bounceaddr)
+
+                if data == "ping3" :  # The client is running Test III
+
+                    if DEBUG:
+                        print >> sys.stderr, "received ping3"
+
+                    # Create a new socket and bind it to a different port
+                    try :
+
+                        #serveraddr2 = (gethostbyname(gethostname()), int(sys.argv[1]) + 5)
+                        serveraddr2 = (serveraddr[0], serveraddr[1] + 5)
+                        udpsock2 = socket(AF_INET, SOCK_DGRAM)
+                        udpsock2.bind(serveraddr2)
+                        if DEBUG:
+                            print >> sys.stderr, "new socket bind at ", serveraddr2
+
+                    except error, (errno, strerror) :
+
+                        if udpsock2 :
+                            udpsock2.close()
+
+                        if DEBUG:
+                            print >> sys.stderr, "Could not open socket: %s" % (strerror)
+
+                        break
+
+                    # Send an echo back to the client using the new socket
+                    reply =  "%s:%s " % (clientaddr[0], clientaddr[1])
+                    print >> sys.stderr, "send an echo back to the client using the new socket... reply=", reply, "clientaddr=", clientaddr
+                    udpsock2.sendto(reply, clientaddr)
+
+                    udpsock2.close()
+
+                else:
+                    if DEBUG:
+                        print >> sys.stderr, "data is: ", data
+
+                    try :
+                        host, port = data.split(":")
+
+                    except (ValueError) :
+                        break
+
+                    try :
+                        bouncedest = (host, int(port))
+
+                    except ValueError :
+                        break
+
+                    try:
+                        udpsock.sendto(data, bouncedest)
+                    except:
+                        break
+                    if DEBUG:
+                        print >> sys.stderr, "Bounceping sent to", bouncedest
+
+
+    udpsock.close()
+
+
+
+# Serve bounce connections
+def bouncemain(serveraddr) :
+
+    # Set up the sockets
+    try :
+        udpsock = socket(AF_INET, SOCK_DGRAM)
+        udpsock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
+        udpsock.bind(serveraddr)
+
+    except error, (errno, strerror) :
+
+        if udpsock :
+            udpsock.close()
+
+        if DEBUG:
+            print >> sys.stderr, "Could not open socket: %s" % (strerror)
+
+        sys.exit(1)
+
+    try :
+        udpsock.setblocking(0)
+    except error :
+        pass
+
+
+    # Loop forever receiving messages and sending pings
+    while 1 :
+
+        if DEBUG:
+            print >> sys.stderr, serveraddr,  "Waiting for connection..."
+
+        try :
+            ready_to_read, ready_to_write, errors = select.select([udpsock],[],[])
+
+        except (KeyboardInterrupt, SystemExit):
+
+            if udpsock :
+                udpsock.close()
+
+            if DEBUG:
+                print >> sys.stderr, "Exiting ..."
+
+            sys.exit(1)
+
+        except select.error, (errno, strerror) :
+
+            if udpsock :
+                udpsock.close()
+
+            if DEBUG:
+                print >> sys.stderr, "I/O error: %s" % (strerror)
+
+            sys.exit(1)
+
+        for i in ready_to_read :
+
+            if DEBUG:
+                print >> sys.stderr, "Incoming connection..."
+
+
+            # Serve udp connections
+            if i == udpsock :
+
+                BUFSIZ = 1024
+                data, clientaddr = udpsock.recvfrom(BUFSIZ)
+                print >> sys.stderr, strftime("%Y/%m/%d %H:%M:%S"), "...connected from: ", clientaddr
+                if DEBUG:
+                    print >> sys.stderr, "data is: ", data
+
+                try :
+                    host, port = data.split(":")
+
+                except (ValueError) :
+                    break
+
+                try :
+                    bouncedest = (host, int(port))
+
+                except ValueError :
+                    break
+
+                try:
+                    udpsock.sendto(data, bouncedest)
+                except:
+                    break
+                if DEBUG:
+                    print >> sys.stderr, "Bounceping sent to", bouncedest
+
+    udpsock.close()
+
+
+
+if __name__=="__main__" :
+
+    # Server initialization
+
+    if len(sys.argv) != (4) :
+        usage()
+        sys.exit(1)
+
+    bounceaddr = None
+    serveraddr = None
+
+    try :
+        bounceaddr = (sys.argv[2], int(sys.argv[3]))
+
+    except ValueError, strerror :
+        if DEBUG:
+            print >> sys.stderr, "ValueError: ", strerror
+        usage()
+        sys.exit(1)
+
+    try :
+        #serveraddr = (gethostbyname(gethostname()), int(sys.argv[1]))
+        serveraddr = ("0.0.0.0", int(sys.argv[1]))
+
+    except ValueError, strerror :
+        if DEBUG:
+            print >> sys.stderr, "ValueError: ", strerror
+        usage()
+        sys.exit(1)
+
+    # Run the appropriate server code
+    while True:
+        try:
+            if DEBUG:
+                print >> sys.stderr, strftime("%Y/%m/%d %H:%M:%S"), "Stun server started"
+            #thread.start_new_thread(servemain, (bounceaddr, serveraddr) )
+            #bouncemain(serveraddr)
+            servemain(bounceaddr, serveraddr)
+        
+        except (KeyboardInterrupt, SystemExit):
+
+            if DEBUG:
+                print >> sys.stderr, "Exiting ..."
+
+            sys.exit(1)
+
+        #except:
+            #if DEBUG:
+                #print >> sys.stderr, "Unexpected error:", sys.exc_info()[0]
+
diff --git a/instrumentation/next-share/BaseLib/Tools/superpeer.py b/instrumentation/next-share/BaseLib/Tools/superpeer.py
new file mode 100644 (file)
index 0000000..c09801e
--- /dev/null
@@ -0,0 +1,103 @@
+# Written by Arno Bakker \r
+# see LICENSE.txt for license information\r
+#\r
+# See www.tribler.org/trac/wiki/SuperpeerMode for considerations.\r
+#\r
+\r
+\r
+import sys\r
+import os\r
+import shutil\r
+import time\r
+import tempfile\r
+import random\r
+import urllib2\r
+from traceback import print_exc\r
+from threading import Condition\r
+\r
+from BaseLib.Core.API import *\r
+import BaseLib.Core.BitTornado.parseargs as parseargs\r
+from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge\r
+import BaseLib.Core.BuddyCast.buddycast as BuddyCastMod\r
+BuddyCastMod.debug = True\r
+\r
+argsdef = [('nickname', '', 'name of the superpeer'),\r
+           ('port', 7001, 'TCP+UDP listen port'),\r
+           ('permid', '', 'filename containing EC keypair'),\r
+           ('overlaylogpostfix', '', 'postfix of filename where overlay is saved to, hostname+date are prepended, new log for each day automatically, default: spPORT.log'),\r
+           ('statedir', '.Tribler','dir to save session state'),\r
+           ('installdir', '', 'source code install dir')]\r
+\r
+\r
+def get_usage(defs):\r
+    return parseargs.formatDefinitions(defs,80)\r
+    \r
+def olthread_start_session():\r
+    """ This code is run by the OverlayThread """\r
+    \r
+    sscfg = SessionStartupConfig()\r
+\r
+    sscfg.set_nickname(config['nickname'])\r
+    sscfg.set_listen_port(config['port'])\r
+    sscfg.set_state_dir(config['statedir'])\r
+    if config['installdir'] != '':\r
+        sscfg.set_install_dir(config['installdir'])\r
+\r
+    sscfg.set_buddycast(True)\r
+    sscfg.set_superpeer(True)\r
+    sscfg.set_overlay_log(config['overlaylogpostfix'])\r
+    if config['permid'] != '':\r
+        sscfg.set_permid_keypair_filename(config['permid'])\r
+    \r
+    # Disable features\r
+    sscfg.set_torrent_collecting(False)\r
+    sscfg.set_torrent_checking(False)\r
+    sscfg.set_download_help(False)\r
+    sscfg.set_dialback(False)\r
+    sscfg.set_remote_query(False)\r
+    sscfg.set_internal_tracker(False)\r
+    \r
+    global session\r
+    session = Session(sscfg)\r
+\r
+\r
+\r
+if __name__ == "__main__":\r
+    """ This code is run by the MainThread """\r
+\r
+    config, fileargs = parseargs.parseargs(sys.argv, argsdef, presets = {})\r
+    print >>sys.stderr,"superpeer: config is",config\r
+\r
+    if config['overlaylogpostfix'] == '':\r
+        config['overlaylogpostfix'] = 'sp'+str(config['port'])+'.log'\r
+\r
+    #\r
+    # Currently we use an in-memory database for superpeers.\r
+    # SQLite supports only per-thread in memory databases.\r
+    # As our Session initialization is currently setup, the MainThread\r
+    # creates the DB and adds the superpeer entries, and the OverlayThread\r
+    # does most DB operations. So two threads accessing the DB.\r
+    #\r
+    # To work around this I start the Session using the OverlayThread.\r
+    # Dirty, but a simple solution.\r
+    # \r
+    overlay_bridge = OverlayThreadingBridge.getInstance()\r
+    overlay_bridge.add_task(olthread_start_session,0)\r
+    \r
+    #\r
+    # NetworkThread and OverlayThread will now do their work. The MainThread\r
+    # running this here code should wait indefinitely to avoid exiting the \r
+    # process.\r
+    #\r
+    try:\r
+        while True:\r
+            # time.sleep(sys.maxint) has "issues" on 64bit architectures; divide it\r
+            # by some value (2048) to solve problem\r
+            time.sleep(sys.maxint/2048)\r
+    except:\r
+        print_exc()\r
+    \r
+    global session\r
+    session.shutdown()\r
+    time.sleep(3)\r
+    
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Tools/trial_poa_server.py b/instrumentation/next-share/BaseLib/Tools/trial_poa_server.py
new file mode 100644 (file)
index 0000000..287ce02
--- /dev/null
@@ -0,0 +1,262 @@
+# Written by Njaal Borch
+# see LICENSE.txt for license information
+
+import os.path
+
+import sys
+import socket # For IPv6 override
+import select
+import threading
+
+import BaseHTTPServer
+
+import random # Do not allow all nodes access
+
+
+from BaseLib.Core.ClosedSwarm import ClosedSwarm,Tools
+from BaseLib.Core.Statistics.Status import *
+
+# Add SocketServer.ThreadingMixIn to get multithreaded
+class MyWebServer(BaseHTTPServer.HTTPServer):
+    """
+    Non-blocking, multi-threaded IPv6 enabled web server
+    """
+
+    if socket.has_ipv6:
+        address_family = socket.AF_INET6
+
+    # Override in case python has IPv6 but system does not
+    def __init__(self, server_address, RequestHandlerClass):
+        try:
+            BaseHTTPServer.HTTPServer.__init__(self,
+                                               server_address,
+                                               RequestHandlerClass)
+        except:
+            print >>sys.stderr,"Failed to use IPv6, using IPv4 instead"
+            self.address_family = socket.AF_INET
+            BaseHTTPServer.HTTPServer.__init__(self,
+                                               server_address,
+                                               RequestHandlerClass)
+    # Override that blasted blocking thing!
+    def get_request(self):
+        """Get the request and client address from the socket.
+        Override to allow non-blocking requests.
+
+        WARNING: This will make "serve_forever" and "handle_request"
+        throw exceptions and stuff! Serve_forever thus does not work!
+        """
+
+        # Use select for non-blocking IO
+        if select.select([self.socket], [], [], 1)[0]:
+            return self.socket.accept()
+        else:
+            return (None, None)
+
+
+
+
+class WebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+    """
+    Handle requests
+    """
+
+    server_version = "CS_Trial/2009_09"
+
+    def log_message(self, format, *args):
+        """
+        Override message logging - don't want reverse DNS lookups
+        or output to stderr
+
+        The first argument, FORMAT, is a format string for the
+        message to be logged.  If the format string contains
+        any % escapes requiring parameters, they should be
+        specified as subsequent arguments (it's just like
+        printf!).
+
+        The client host and current date/time are prefixed to
+        every message.
+
+        """
+
+        print format%args
+        
+    def failed(self, code, message = None):
+        """
+        Request failed, return error
+        """
+        
+        try:
+            if message:
+                print "Sending %d (%s)"%(code, message)
+                self.send_error(code, message)
+            else:
+                print "Sending %d "%code
+                self.send_error(code)
+
+            try: # Should this be here?
+                self.end_headers()
+            except Exception,e:
+                print >>sys.stderr, "Error sending end_headers - I guess I shouldn't do  it then"
+            
+            #self.wfile.close()
+        except Exception,e:
+            
+            # Error sending error...  Log and ingnore
+            print >>sys.stderr, "Error sending error %s, ignoring (%s)"%(code, e)
+
+            # TODO: Remove this error thingy
+            raise Exception("Could not send error")
+        
+        return False
+
+
+       
+
+    def prepareSend(self, type, size=None, response=200):
+
+        # We're ready!
+        try:
+            self.send_response(response)
+        except Exception, e:
+            print >>sys.stderr, "Error sending response: %s"%e
+            return
+
+        #self.send_header("date", makeRFC1123time(time.time()))
+        self.send_header("server", self.server_version)
+        self.send_header("Content-Type", type)
+        if size:
+            self.send_header("Content-Length",size)
+        self.end_headers()
+
+
+    def do_POST(self):
+        """
+        Handle a POST request for a POA for the trial
+        """
+
+
+        # Don't block forever here
+        self.rfile._sock.settimeout(5.0)
+
+        import cgi
+        env = {}
+        env['REQUEST_METHOD'] = self.command
+        if self.headers.typeheader is None:
+            env['CONTENT_TYPE'] = self.headers.type
+        else:
+            env['CONTENT_TYPE'] = self.headers.typeheader
+        length = self.headers.getheader('content-length')
+        if length:
+            env['CONTENT_LENGTH'] = length
+
+        form = cgi.FieldStorage(environ=env, fp=self.rfile)
+
+        try:
+            swarm_id = form['swarm_id'].value
+            perm_id = form['perm_id'].value
+        except:
+            return self.failed(400)
+
+        try:
+            poa = self.generate_poa(swarm_id, perm_id)
+        except Exception,e:
+            print >>sys.stderr, "Missing key for swarm '%s'"%swarm_id,e
+            return self.failed(404)
+        
+        self.prepareSend("application/octet-stream", len(poa))
+        self.wfile.write(poa)
+        self.wfile.close()
+        
+    def generate_poa(self, swarm_id, perm_id):
+        """
+        Generate a POA if the swarm-id private key is available
+        """
+
+        status = Status.get_status_holder("LivingLab")
+
+        # Randomly allow 80% to be authorized...
+        if random.randint(0,100) > 80:
+            status.create_and_add_event("denied", [swarm_id, perm_id])
+            status.get_status_element("poas_failed").inc()
+            raise Exception("Randomly denied...")
+
+        key_file = os.path.join(KEY_PATH, swarm_id + ".tkey")
+        if not os.path.exists(key_file):
+            raise Exception("Missing key file")
+
+        # Load keys
+        try:
+            torrent_keypair = ClosedSwarm.read_cs_keypair(key_file)
+        except Exception,e:
+            raise Exception("Bad torrent key file")
+            
+        # TODO? Sanity - check that this key matches the torrent
+
+        poa = ClosedSwarm.create_poa(swarm_id, torrent_keypair, perm_id)
+        
+        status.create_and_add_event("allowed", [swarm_id, perm_id])
+        status.get_status_element("poas_generated").inc()
+
+        return poa.serialize()
+
+class WebServer(threading.Thread):
+
+    def __init__(self, port):
+        threading.Thread.__init__(self)
+        print "Starting WebServer on port %s"%port
+        self.server = MyWebServer(('', int(port)), WebHandler)
+        self.port = port
+        self.running = False
+
+    def run(self):
+        
+        self.running = True
+        
+        print "WebServer Running on port %s"%self.port
+
+        while self.running:
+            try:
+                print "Waiting..."
+                self.server.handle_request()
+            except Exception,e:
+                if e.args[0] != "unpack non-sequence":
+                    print >>sys.stderr, "Error handling request",e
+                
+                # Ignore these, Just means that there was no request
+                # waiting for us
+                pass
+
+        print "Web server Stopped"
+
+    def stop(self):
+        self.running = False
+
+        self.server.socket.shutdown(socket.SHUT_RDWR)
+
+        self.server.socket.close()
+
+
+if __name__ == "__main__":
+
+    KEY_PATH = "./"    
+
+    status = Status.get_status_holder("LivingLab")
+    id = "poa_generator"
+    reporter = LivingLabReporter.LivingLabPeriodicReporter("Living lab CS reporter", 300, id) # Report every 5 minutes 
+    status.add_reporter(reporter)
+
+    status.create_status_element("poas_generated", 0)
+    status.create_status_element("poas_failed", 0)
+    
+        
+    ws = WebServer(8080)
+    ws.start()
+
+    raw_input("WebServer running, press ENTER to stop it")
+
+    print "Stopping server"
+    reporter.stop()
+    ws.stop()
+    
+
+    
diff --git a/instrumentation/next-share/BaseLib/TrackerChecking/TorrentChecking.py b/instrumentation/next-share/BaseLib/TrackerChecking/TorrentChecking.py
new file mode 100644 (file)
index 0000000..75a2aa3
--- /dev/null
@@ -0,0 +1,211 @@
+# written by Yuan Yuan, Jie Yang
+# see LICENSE.txt for license information
+#
+#  
+# New Tracker Checking Algortihm by Jie Yang
+# ==========================
+# 
+# Each time when a torrent checking thread starts, it uses one policy to select
+# a torrent to check. The question turns to how to set the weight of these policies.
+#
+# Policy 1: Random 1/3
+#   Randomly select a torrent to collect (last_check < 5 min ago)
+#
+# Policy 2: Oldest (unknown) first  1/3
+#   Select the non-dead torrent which was not been checked for the longest time (last_check < 5 min ago)
+#
+# Policy 3: Popular (good) first    1/3
+#   Select the non-dead most popular (3*num_seeders+num_leechers) one which has not been checked in last N seconds
+#   (The default N = 4 hours, so at most 4h/torrentchecking_interval popular peers)
+#
+#===============================================================================
+
+import sys
+import threading
+from threading import Thread
+from random import sample
+from time import time
+
+from BaseLib.Core.BitTornado.bencode import bdecode
+from BaseLib.TrackerChecking.TrackerChecking import trackerChecking
+from BaseLib.Core.CacheDB.sqlitecachedb import safe_dict
+
+
+# LAYERVIOLATION: careful: uses two threads depending on code, make sure we have DB session per thread.
+from BaseLib.Core.CacheDB.CacheDBHandler import TorrentDBHandler
+from BaseLib.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker
+#from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
+
+DEBUG = False
+
+class TorrentChecking(Thread):
+    
+    def __init__(self, infohash=None):
+        Thread.__init__(self)
+        self.setName('TorrentChecking'+self.getName())
+        if DEBUG:
+            print >> sys.stderr, 'TorrentChecking: Started torrentchecking', threading.currentThread().getName()
+        self.setDaemon(True)
+        
+        self.infohash = infohash
+        self.retryThreshold = 10
+        self.gnThreashold = 0.9
+        self.mldhtchecker = mainlineDHTChecker.getInstance()
+        self.db_thread = None   # if it is set, use another thread to access db
+        #self.db_thread = OverlayThreadingBridge.getInstance()
+        
+    def selectPolicy(self):
+        policies = ["oldest", "random", "popular"]
+        return sample(policies, 1)[0]
+        
+    def readTorrent(self, torrent):
+        try:
+            torrent_path = torrent['torrent_path']
+            f = open(torrent_path,'rb')
+            _data = f.read()
+            f.close()
+            data = bdecode(_data)
+            assert 'info' in data
+            del data['info']
+            torrent['info'] = data
+            return torrent
+        except Exception:
+            #print_exc()
+            return torrent
+            
+    def run(self):
+        """ Gets one torrent from good or unknown list and checks it """
+        
+        try:
+            if DEBUG:
+                print >> sys.stderr, "Torrent Checking: RUN", threading.currentThread().getName()
+                
+            event = threading.Event()
+            return_value = safe_dict()
+            return_value['event'] = event
+            return_value['torrent'] = None
+            if self.infohash is None:   # select torrent by a policy
+                policy = self.selectPolicy()
+                if self.db_thread:
+                    self.db_thread.add_task(lambda:
+                        TorrentDBHandler.getInstance().selectTorrentToCheck(policy=policy, return_value=return_value))
+                else:
+                    TorrentDBHandler.getInstance().selectTorrentToCheck(policy=policy, return_value=return_value)
+            else:   # know which torrent to check
+                if self.db_thread:
+                    self.db_thread.add_task(lambda:TorrentDBHandler.getInstance().selectTorrentToCheck(infohash=self.infohash, return_value=return_value))
+                else:
+                    TorrentDBHandler.getInstance().selectTorrentToCheck(infohash=self.infohash, return_value=return_value)
+            event.wait(60.0)
+            
+            torrent = return_value['torrent']
+            if DEBUG:
+                print >> sys.stderr, "Torrent Checking: get value from DB:", torrent
+            
+            if not torrent:
+                return
+    
+            if self.infohash is None and torrent['ignored_times'] > 0:
+                if DEBUG:
+                    print >> sys.stderr, 'Torrent_checking: torrent: %s' % torrent
+                kw = { 'ignored_times': torrent['ignored_times']-1 }
+                if self.db_thread:
+                    self.db_thread.add_task(lambda:
+                        TorrentDBHandler.getInstance().updateTracker(torrent['infohash'], kw))
+                else:
+                    TorrentDBHandler.getInstance().updateTracker(torrent['infohash'], kw)
+                return
+    
+            # may be block here because the internet IO
+            torrent = self.readTorrent(torrent)    # read the torrent 
+            if 'info' not in torrent:    #torrent has been deleted
+                if self.db_thread:
+                    self.db_thread.add_task(lambda:
+                        TorrentDBHandler.getInstance().deleteTorrent(torrent['infohash']))
+                else:
+                    TorrentDBHandler.getInstance().deleteTorrent(torrent['infohash'])
+                return
+            
+            # TODO: tracker checking also needs to be update
+            if DEBUG:
+                print >> sys.stderr, "Tracker Checking"
+            trackerChecking(torrent)
+            
+            # Must come after tracker check, such that if tracker dead and DHT still alive, the
+            # status is still set to good
+            self.mldhtchecker.lookup(torrent['infohash'])
+            
+            self.updateTorrentInfo(torrent)            # set the ignored_times
+            
+            kw = {
+                'last_check_time': int(time()),
+                'seeder': torrent['seeder'],
+                'leecher': torrent['leecher'],
+                'status': torrent['status'],
+                'ignored_times': torrent['ignored_times'],
+                'retried_times': torrent['retried_times'],
+                #'info': torrent['info']
+                }
+            
+            if DEBUG:
+                print >> sys.stderr, "Torrent Checking: selectTorrentToCheck:", kw
+            
+            if self.db_thread:
+                self.db_thread.add_task(lambda:
+                    TorrentDBHandler.getInstance().updateTorrent(torrent['infohash'], **kw))
+            else:
+                TorrentDBHandler.getInstance().updateTorrent(torrent['infohash'], **kw)
+        finally:
+            if not self.db_thread:
+                TorrentDBHandler.getInstance().close()
+            
+#===============================================================================
+#    def tooFast(self, torrent):
+#        interval_time = long(time()) - torrent["last_check_time"]
+#        if interval_time < 60 * 5:
+#            return True
+#        return False
+#===============================================================================
+    
+    def updateTorrentInfo(self,torrent):
+        if torrent["status"] == "good":
+            torrent["ignored_times"] = 0
+        elif torrent["status"] == "unknown":
+            if torrent["retried_times"] > self.retryThreshold:    # set to dead
+                torrent["ignored_times"] = 0
+                torrent["status"] = "dead"
+            else:
+                torrent["retried_times"] += 1 
+                torrent["ignored_times"] = torrent["retried_times"]
+        elif torrent["status"] == "dead": # dead
+            if torrent["retried_times"] < self.retryThreshold:
+                torrent["retried_times"] += 1 
+                    
+    def tooMuchRetry(self, torrent):
+        if (torrent["retried_times"] > self.retryThreshold):
+            return True
+        return False
+
+
+if __name__ == '__main__':
+    from BaseLib.Core.CacheDB.sqlitecachedb import init as init_db, str2bin
+    configure_dir = sys.argv[1]
+    config = {}
+    config['state_dir'] = configure_dir
+    config['install_dir'] = '.'
+    config['peer_icon_path'] = '.'
+    init_db(config)
+    t = TorrentChecking()
+    t.start()
+    t.join()
+    
+    
+    infohash_str = 'TkFX5S4qd2DPW63La/VObgOH/Nc='
+    infohash = str2bin(infohash_str)
+    
+    del t
+    
+    t = TorrentChecking(infohash)
+    t.start()
+    t.join()
+    
diff --git a/instrumentation/next-share/BaseLib/TrackerChecking/TrackerChecking.py b/instrumentation/next-share/BaseLib/TrackerChecking/TrackerChecking.py
new file mode 100644 (file)
index 0000000..b01bf82
--- /dev/null
@@ -0,0 +1,161 @@
+# written by Yuan Yuan
+# see LICENSE.txt for license information
+
+# single torrent checking without Thread
+import sys
+from BaseLib.Core.BitTornado.bencode import bdecode
+from random import shuffle
+import urllib
+import socket
+import BaseLib.Core.Utilities.timeouturlopen as timeouturlopen
+from time import time
+from traceback import print_exc
+
+HTTP_TIMEOUT = 30 # seconds
+
+DEBUG = False
+
+def trackerChecking(torrent):    
+    single_no_thread(torrent)              
+        
+def single_no_thread(torrent):
+    
+    (seeder, leecher) = (-2, -2)        # default dead
+    if ( torrent["info"].get("announce-list", "") == "" ):        # no announce-list
+        try:
+            announce = torrent["info"]["announce"]                    # get the single tracker
+            (s, l) = singleTrackerStatus(torrent, announce)
+            seeder = max(seeder, s)
+            leecher = max(leecher, l)
+        except:
+            pass
+    else:                                                # have announce-list
+        for announces in torrent["info"]["announce-list"]:
+            a_len = len(announces)
+            if (a_len == 0):                            # length = 0
+                continue
+            if (a_len == 1):                            # length = 1
+                announce = announces[0]
+                (s, l) = singleTrackerStatus(torrent, announce)
+                seeder = max(seeder, s)
+                leecher = max(leecher, l)
+            else:                                        # length > 1
+                aindex = torrent["info"]["announce-list"].index(announces)                                    
+                shuffle(announces)
+                # Arno: protect agaist DoS torrents with many trackers in announce list. 
+                announces = announces[:16]
+                for announce in announces:                # for eache announce
+                    (s, l) = singleTrackerStatus(torrent, announce)
+                    seeder = max(seeder, s)
+                    leecher = max(leecher, l)
+                    if seeder > 0:  # good
+                        break
+                if (seeder > 0 or leecher > 0):        # put the announce\
+                    announces.remove(announce)            # in front of the tier
+                    announces.insert(0, announce)                    
+                    torrent["info"]["announce-list"][aindex] = announces
+#                    print "one changed"
+            if (seeder > 0):
+                break
+    if (seeder == -3 and leecher == -3):
+        pass        # if interval problem, just keep the last status
+    else:
+        torrent["seeder"] = seeder
+        torrent["leecher"] = leecher
+        if (torrent["seeder"] > 0 or torrent["leecher"] > 0):
+            torrent["status"] = "good"
+        elif (torrent["seeder"] == 0 and torrent["leecher"] == 0):
+            torrent["status"] = "unknown"
+#            torrent["seeder"] = 0
+#            torrent["leecher"] = 0
+        elif (torrent["seeder"] == -1 and torrent["leecher"] == -1):    # unknown
+            torrent["status"] = "unknown"
+#            torrent["seeder"] = -1
+#            torrent["leecher"] = -1
+        else:        # if seeder == -2 and leecher == -2, dead
+            torrent["status"] = "dead"
+            torrent["seeder"] = -2
+            torrent["leecher"] = -2
+    torrent["last_check_time"] = long(time())
+    return torrent
+
+
+def singleTrackerStatus(torrent, announce):
+    # return (-1, -1) means the status of torrent is unknown
+    # return (-2. -2) means the status of torrent is dead
+    # return (-3, -3) means the interval problem 
+    info_hash = torrent["infohash"]
+    
+    if DEBUG:
+        print >>sys.stderr,"TrackerChecking: Checking",announce,"for",`info_hash`
+    
+    url = getUrl(announce, info_hash)            # whether scrape support
+    if (url == None):                            # tracker url error
+        return (-2, -2)                            # use announce instead
+    try:
+        #print 'Checking url: %s' % url
+        (seeder, leecher) = getStatus(url, info_hash)
+        
+        if DEBUG:
+            print >>sys.stderr,"TrackerChecking: Result",(seeder,leecher)
+    except:
+        (seeder, leecher) = (-2, -2)
+    return (seeder, leecher)
+
+# generate the query URL
+def getUrl(announce, info_hash):
+    if (announce == -1):                        # tracker url error
+        return None                                # return None
+    announce_index = announce.rfind("announce")
+    last_index = announce.rfind("/")    
+    
+    url = announce    
+    if (last_index +1 == announce_index):        # srape supprot
+        url = url.replace("announce","scrape")
+    url += "?info_hash=" + urllib.quote(info_hash)
+#    print url
+    return url
+
+
+            
+def getStatus(url, info_hash):
+    try:
+        resp = timeouturlopen.urlOpenTimeout(url,timeout=HTTP_TIMEOUT)
+        response = resp.read()
+        
+    except IOError:
+#        print "IOError"
+        return (-1, -1)                    # unknown
+    except AttributeError:
+#        print "AttributeError"
+        return (-2, -2)                    # dead
+    
+    try:
+        response_dict = bdecode(response)
+
+    except:
+#        print "DeCode Error "  + response
+        return (-2, -2)                    # dead
+    
+    try:
+        status = response_dict["files"][info_hash]
+        seeder = status["complete"]
+        if seeder < 0:
+            seeder = 0
+        leecher = status["incomplete"]
+        if leecher < 0:
+            leecher = 0
+        
+    except KeyError:
+#        print "KeyError "  + info_hash + str(response_dict)
+        try:
+            if response_dict.has_key("flags"): # may be interval problem        
+                if response_dict["flags"].has_key("min_request_interval"):
+#                    print "interval problem"
+                    return (-3 ,-3)
+        except:
+            pass
+#        print "KeyError "  + info_hash + str(response_dict)
+        return (-2, -2)                    # dead
+    
+    return (seeder, leecher)
diff --git a/instrumentation/next-share/BaseLib/TrackerChecking/__init__.py b/instrumentation/next-share/BaseLib/TrackerChecking/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Mac/Info.plist b/instrumentation/next-share/BaseLib/Transport/Build/Mac/Info.plist
new file mode 100644 (file)
index 0000000..a63ab02
--- /dev/null
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+       <key>CFBundleDevelopmentRegion</key>
+       <string>English</string>
+       <key>CFBundleDocumentTypes</key>
+       <array>
+      <dict>
+         <key>CFBundleTypeIconFile</key>
+         <string>SwarmPlayerDoc</string>
+         <key>CFBundleTypeMIMETypes</key>
+         <array>
+            <string>application/x-tribler-stream</string>
+         </array>
+         <key>CFBundleTypeName</key>
+         <string>Tribler Stream Meta-Info</string>
+         <key>CFBundleTypeOSTypes</key>
+         <array>
+            <string>BTMF</string>
+         </array>
+         <key>CFBundleTypeRole</key>
+         <string>Viewer</string>
+         <key>NSDocumentClass</key>
+         <string>DownloadDocument</string>
+      </dict>
+               <dict>
+                       <key>CFBundleTypeOSTypes</key>
+                       <array>
+                               <string>****</string>
+                               <string>fold</string>
+                               <string>disk</string>
+                       </array>
+                       <key>CFBundleTypeRole</key>
+                       <string>Viewer</string>
+               </dict>
+       </array>
+       <key>CFBundleExecutable</key>
+       <string>SwarmPlayer</string>
+       <key>CFBundleIconFile</key>
+       <string>swarmplayer.icns</string>
+       <key>CFBundleIdentifier</key>
+       <string>SwarmPlayer</string>
+       <key>CFBundleInfoDictionaryVersion</key>
+       <string>1.0</string>
+       <key>CFBundleName</key>
+       <string>SwarmPlayer</string>
+       <key>CFBundlePackageType</key>
+       <string>APPL</string>
+       <key>CFBundleSignature</key>
+       <string>????</string>
+</dict>
+</plist>
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Mac/setupBGapp.py b/instrumentation/next-share/BaseLib/Transport/Build/Mac/setupBGapp.py
new file mode 100644 (file)
index 0000000..829cd1e
--- /dev/null
@@ -0,0 +1,80 @@
+# Written by Riccardo Petrocco
+# see LICENSE.txt for license information
+#
+# This script builds SwarmPlayer FF plugin
+#
+#
+
+import os
+from distutils.util import get_platform
+import sys,os,platform,shutil
+
+from plistlib import Plist
+
+from setuptools import setup
+import py2app # Not a superfluous import!
+
+from BaseLib.__init__ import LIBRARYNAME
+
+
+def includedir( srcpath, dstpath = None ):
+    """ Recursive directory listing, filtering out svn files. """
+
+    total = []
+
+    cwd = os.getcwd()
+    os.chdir( srcpath )
+
+    if dstpath is None:
+        dstpath = srcpath
+
+    for root,dirs,files in os.walk( "." ):
+        if '.svn' in dirs:
+            dirs.remove('.svn')
+
+        for f in files:
+            total.append( (root,f) )
+
+    os.chdir( cwd )
+
+    # format: (targetdir,[file])
+    # so for us, (dstpath/filedir,[srcpath/filedir/filename])
+    return [("%s/%s" % (dstpath,root),["%s/%s/%s" % (srcpath,root,f)]) for root,f in total]
+
+def filterincludes( l, f ):
+    """ Return includes which pass filter f. """
+
+    return [(x,y) for (x,y) in l if f(y[0])]
+
+
+# modules to include into bundle
+includeModules=["encodings.hex_codec","encodings.utf_8","encodings.latin_1","xml.sax", "email.iterators"]
+
+# ----- build the app bundle
+mainfile = os.path.join(LIBRARYNAME,'Transport','SwarmEngine.py')
+
+setup(
+    setup_requires=['py2app'],
+    name='SwarmPlayer',
+    app=[mainfile],
+    options={ 'py2app': {
+        'argv_emulation': True,
+        'includes': includeModules,
+        'excludes': ["Tkinter","Tkconstants","tcl"],
+        'iconfile': LIBRARYNAME+'/Player/Build/Mac/tribler.icns',
+        'plist': Plist.fromFile(LIBRARYNAME+'/Transport/Build/Mac/Info.plist'),
+        'resources':
+            [LIBRARYNAME+"/readme.txt",
+             LIBRARYNAME+"/Images/SwarmPlayerIcon.ico",
+             LIBRARYNAME+"/Player/Build/Mac/TriblerDoc.icns",
+           ]
+        # add images
+        + includedir( LIBRARYNAME+"/Images" )
+
+        # add Web UI files
+        + includedir( LIBRARYNAME+"/WebUI" )
+    } }
+)
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/changelog b/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/changelog
new file mode 100644 (file)
index 0000000..a9c0a31
--- /dev/null
@@ -0,0 +1,5 @@
+swarmplayer (2.1.0-1ubuntu0) lucid; urgency=low
+
+  * First release
+
+ -- Arno Bakker <arno@cs.vu.nl>  Wed, 16 Jun 2010 10:09:04 +0200
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/compat b/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/control b/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/control
new file mode 100644 (file)
index 0000000..eb3f16b
--- /dev/null
@@ -0,0 +1,17 @@
+Source: swarmplayer
+Section: web
+Priority: optional
+Maintainer: Arno Bakker <arno@cs.vu.nl>
+Build-Depends: debhelper (>= 7.0.50~), mozilla-devscripts (>= 0.19~)
+Standards-Version: 3.8.4
+Homepage: http://swarmplayer.p2p-next.org/
+
+Package: xul-ext-swarmplayer
+Architecture: all
+Depends: ${misc:Depends}, python, python-wxgtk2.8, python-m2crypto, python-apsw
+Recommends: ${xpi:Recommends}
+Provides: ${xpi:Provides}
+Enhances: ${xpi:Enhances}
+Description: Adds the P2P-based tribe:// transfer protocol to Firefox, which enables the use of torrent URLs inside e.g. the video tag.
+ .
+ Homepage: http://swarmplayer.p2p-next.org/
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/rules b/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/rules
new file mode 100755 (executable)
index 0000000..43c0052
--- /dev/null
@@ -0,0 +1,21 @@
+#! /usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+DS_XPI_FILES = bgprocess chrome components skin LICENSE.txt chrome.manifest icon.png install.rdf 
+
+include /usr/share/mozilla-devscripts/xpi.mk
+
+# 'clean' is defined in the included file
+build install binary binary-arch binary-indep:
+       cp -f dist/installdir/LICENSE.txt debian/copyright
+       dh $@
+
+override_dh_auto_build:
+       xpi-pack dist/installdir swarmplayer-deb.xpi
+
+override_dh_auto_install: xpi-install
+
+.PHONY: build install binary binary-arch binary-indep
+.PHONY: override_dh_auto_build override_dh_auto_install
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/source/format b/instrumentation/next-share/BaseLib/Transport/Build/Ubuntu/source/format
new file mode 100644 (file)
index 0000000..163aaf8
--- /dev/null
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/README.txt b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/README.txt
new file mode 100644 (file)
index 0000000..651afb1
--- /dev/null
@@ -0,0 +1,15 @@
+\r
+This directory contains the build scripts for SwarmPlayer V2 for IE8.\r
+That is, given IE8 doesn't support HTML5 we cannot use the SwarmTransport\r
+concept for this browser. Instead, to have the SwarmPlayer concept work\r
+on all platforms we use the SwarmPlugin with just Ogg/Theora+Vorbis formats\r
+as a substitute. We call this SwarmPlayer V2 for IE8.\r
+\r
+To prevent interference with the normal SwarmPlugin (that has all codecs),\r
+the SwarmPlayer/Transport suite is independent from SwarmPlugin. That is,\r
+SwarmPlayers have their own statedir (.SwarmPlayer), TCP ports, and ActiveX/\r
+COM object IDs. In particular, 98FF91C0-A3B8-11DF-8555-0002A5D5C51B is the\r
+objectID to use for the SwarmPlayer IE8.\r
+\r
+Arno, 2010-08-09.\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/SwarmPlayer_IE.inf b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/SwarmPlayer_IE.inf
new file mode 100644 (file)
index 0000000..3baf179
--- /dev/null
@@ -0,0 +1,26 @@
+; Version number and signature of INF file.\r
+; Written by Diego Andres Rabaioli\r
+; see LICENSE.txt for license information\r
+;\r
+[version]\r
+  signature="$CHICAGO$"\r
+  AdvancedINF=2.0\r
+\r
+[Add.Code]\r
+  axvlc.dll=axvlc.dll\r
+  SwarmPlayer_2.0.0.exe=SwarmPlayer_IE_2.0.0.exe\r
+\r
+[axvlc.dll]\r
+  FileVersion=2,0,0,0\r
+  clsid={98FF91C0-A3B8-11DF-8555-0002A5D5C51B}\r
+  RegisterServer=no\r
+  Hook=runinstaller\r
+\r
+[SwarmPlayer_2.0.0.exe]\r
+  FileVersion=2,0,0,0\r
+  file-win32-x86=http://swarmplayer.p2p-next.org/download/SwarmPlayer_IE_2.0.0.exe\r
+\r
+[runinstaller]\r
+  run=%EXTRACT_DIR%\SwarmPlayer_IE_2.0.0.exe\r
+\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/heading.bmp b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/heading.bmp
new file mode 100644 (file)
index 0000000..7bdbfcd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/heading.bmp differ
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/patentfreevlc.bat b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/patentfreevlc.bat
new file mode 100644 (file)
index 0000000..691a225
--- /dev/null
@@ -0,0 +1,251 @@
+del liba52tospdif_plugin.dll\r
+del liba52_plugin.dll\r
+del libaccess_bd_plugin.dll\r
+del libaccess_directory_plugin.dll\r
+REM  libaccess_fake_plugin.dll\r
+REM  libaccess_file_plugin.dll\r
+REM  libaccess_ftp_plugin.dll\r
+REM  libaccess_http_plugin.dll\r
+del libaccess_mms_plugin.dll\r
+del libaccess_output_dummy_plugin.dll\r
+del libaccess_output_file_plugin.dll\r
+del libaccess_output_http_plugin.dll\r
+del libaccess_output_rtmp_plugin.dll\r
+del libaccess_output_udp_plugin.dll\r
+del libaccess_raw_plugin.dll\r
+del libaccess_rtmp_plugin.dll\r
+del libaccess_smb_plugin.dll\r
+REM  libaccess_tcp_plugin.dll\r
+REM  libaccess_udp_plugin.dll\r
+del libadjust_plugin.dll\r
+del libadpcm_plugin.dll\r
+del libaes3_plugin.dll\r
+del libaiff_plugin.dll\r
+del libalphamask_plugin.dll\r
+REM  libaout_directx_plugin.dll\r
+del libaout_file_plugin.dll\r
+del libaraw_plugin.dll\r
+del libasf_plugin.dll\r
+del libaudioscrobbler_plugin.dll\r
+del libaudio_format_plugin.dll\r
+del libau_plugin.dll\r
+del libavcodec_plugin.dll\r
+del libavformat_plugin.dll\r
+del libavi_plugin.dll\r
+del libbandlimited_resampler_plugin.dll\r
+del libbda_plugin.dll\r
+del libblendbench_plugin.dll\r
+del libblend_plugin.dll\r
+del libbluescreen_plugin.dll\r
+del libcanvas_plugin.dll\r
+del libcc_plugin.dll\r
+del libcdda_plugin.dll\r
+del libcdg_plugin.dll\r
+del libchain_plugin.dll\r
+del libclone_plugin.dll\r
+del libcmml_plugin.dll\r
+del libcolorthres_plugin.dll\r
+REM  libconverter_fixed_plugin.dll\r
+REM  libconverter_float_plugin.dll\r
+del libcroppadd_plugin.dll\r
+del libcrop_plugin.dll\r
+del libcvdsub_plugin.dll\r
+del libdeinterlace_plugin.dll\r
+del libdemuxdump_plugin.dll\r
+del libdemux_cdg_plugin.dll\r
+del libdirac_plugin.dll\r
+REM  libdirect3d_plugin.dll\r
+del libdmo_plugin.dll\r
+del libdolby_surround_decoder_plugin.dll\r
+REM  libdrawable_plugin.dll\r
+REM  libdshow_plugin.dll\r
+del libdtstospdif_plugin.dll\r
+del libdts_plugin.dll\r
+REM  libdummy_plugin.dll\r
+del libdvbsub_plugin.dll\r
+del libdvdnav_plugin.dll\r
+del libequalizer_plugin.dll\r
+del liberase_plugin.dll\r
+del libes_plugin.dll\r
+del libexport_plugin.dll\r
+del libextract_plugin.dll\r
+del libfaad_plugin.dll\r
+del libfake_plugin.dll\r
+del libflacsys_plugin.dll\r
+del libflac_plugin.dll\r
+REM  libfloat32_mixer_plugin.dll\r
+del libfolder_plugin.dll\r
+del libfreetype_plugin.dll\r
+del libgaussianblur_plugin.dll\r
+del libgestures_plugin.dll\r
+del libglobalhotkeys_plugin.dll\r
+REM  libglwin32_plugin.dll\r
+del libgnutls_plugin.dll\r
+del libgoom_plugin.dll\r
+del libgradient_plugin.dll\r
+del libgrain_plugin.dll\r
+del libgrey_yuv_plugin.dll\r
+del libh264_plugin.dll\r
+del libheadphone_channel_mixer_plugin.dll\r
+del libhotkeys_plugin.dll\r
+del libhttp_plugin.dll\r
+REM  libi420_rgb_mmx_plugin.dll\r
+REM  libi420_rgb_plugin.dll\r
+REM  libi420_ymga_mmx_plugin.dll\r
+REM  libi420_ymga_plugin.dll\r
+REM  libi420_yuy2_mmx_plugin.dll\r
+REM  libi420_yuy2_plugin.dll\r
+REM  libi422_i420_plugin.dll\r
+REM  libi422_yuy2_mmx_plugin.dll\r
+REM  libi422_yuy2_plugin.dll\r
+del libinvert_plugin.dll\r
+del libinvmem_plugin.dll\r
+del libkate_plugin.dll\r
+del liblibmpeg2_plugin.dll\r
+del liblinear_resampler_plugin.dll\r
+REM  liblogger_plugin.dll\r
+del liblogo_plugin.dll\r
+del liblpcm_plugin.dll\r
+del liblua_plugin.dll\r
+del libm4v_plugin.dll\r
+del libmagnify_plugin.dll\r
+del libmarq_plugin.dll\r
+REM  libmemcpy3dn_plugin.dll\r
+REM  libmemcpymmxext_plugin.dll\r
+REM  libmemcpymmx_plugin.dll\r
+REM  libmemcpy_plugin.dll\r
+del libmjpeg_plugin.dll\r
+del libmkv_plugin.dll\r
+del libmod_plugin.dll\r
+del libmono_plugin.dll\r
+del libmosaic_plugin.dll\r
+del libmotionblur_plugin.dll\r
+del libmotiondetect_plugin.dll\r
+del libmp4_plugin.dll\r
+del libmpc_plugin.dll\r
+del libmpeg_audio_plugin.dll\r
+del libmpgv_plugin.dll\r
+del libmsn_plugin.dll\r
+del libmux_asf_plugin.dll\r
+del libmux_avi_plugin.dll\r
+REM  libmux_dummy_plugin.dll\r
+del libmux_mp4_plugin.dll\r
+del libmux_mpjpeg_plugin.dll\r
+REM  libmux_ogg_plugin.dll\r
+del libmux_ps_plugin.dll\r
+del libmux_ts_plugin.dll\r
+del libmux_wav_plugin.dll\r
+del libnoise_plugin.dll\r
+del libnormvol_plugin.dll\r
+del libnsc_plugin.dll\r
+del libnsv_plugin.dll\r
+del libntservice_plugin.dll\r
+del libnuv_plugin.dll\r
+REM  libogg_plugin.dll\r
+REM  libopengl_plugin.dll\r
+del libosdmenu_plugin.dll\r
+del libosd_parser_plugin.dll\r
+del libpacketizer_copy_plugin.dll\r
+del libpacketizer_dirac_plugin.dll\r
+del libpacketizer_h264_plugin.dll\r
+del libpacketizer_mlp_plugin.dll\r
+del libpacketizer_mpeg4audio_plugin.dll\r
+del libpacketizer_mpeg4video_plugin.dll\r
+del libpacketizer_mpegvideo_plugin.dll\r
+del libpacketizer_vc1_plugin.dll\r
+del libpanoramix_plugin.dll\r
+del libparam_eq_plugin.dll\r
+del libplaylist_plugin.dll\r
+del libpng_plugin.dll\r
+del libpodcast_plugin.dll\r
+del libpostproc_plugin.dll\r
+del libpsychedelic_plugin.dll\r
+del libps_plugin.dll\r
+del libpuzzle_plugin.dll\r
+del libpva_plugin.dll\r
+del librawaud_plugin.dll\r
+del librawdv_plugin.dll\r
+del librawvideo_plugin.dll\r
+del librawvid_plugin.dll\r
+del librc_plugin.dll\r
+del libreal_plugin.dll\r
+del libremoteosd_plugin.dll\r
+del libripple_plugin.dll\r
+del librotate_plugin.dll\r
+del librss_plugin.dll\r
+del librtp_plugin.dll\r
+del librv32_plugin.dll\r
+del libsap_plugin.dll\r
+del libscaletempo_plugin.dll\r
+del libscale_plugin.dll\r
+del libscene_plugin.dll\r
+del libscreen_plugin.dll\r
+del libsdl_image_plugin.dll\r
+del libsharpen_plugin.dll\r
+del libshout_plugin.dll\r
+del libshowintf_plugin.dll\r
+del libsimple_channel_mixer_plugin.dll\r
+del libsmf_plugin.dll\r
+del libspatializer_plugin.dll\r
+REM  libspdif_mixer_plugin.dll\r
+del libspeex_plugin.dll\r
+del libspudec_plugin.dll\r
+del libstats_plugin.dll\r
+del libstream_filter_rar_plugin.dll\r
+del libstream_filter_record_plugin.dll\r
+del libstream_out_autodel_plugin.dll\r
+del libstream_out_bridge_plugin.dll\r
+del libstream_out_description_plugin.dll\r
+del libstream_out_display_plugin.dll\r
+del libstream_out_dummy_plugin.dll\r
+del libstream_out_duplicate_plugin.dll\r
+del libstream_out_es_plugin.dll\r
+del libstream_out_gather_plugin.dll\r
+del libstream_out_mosaic_bridge_plugin.dll\r
+del libstream_out_raop_plugin.dll\r
+del libstream_out_record_plugin.dll\r
+del libstream_out_rtp_plugin.dll\r
+del libstream_out_standard_plugin.dll\r
+del libstream_out_transcode_plugin.dll\r
+del libstream_out_transrate_plugin.dll\r
+del libsubsdec_plugin.dll\r
+del libsubsusf_plugin.dll\r
+del libsubtitle_plugin.dll\r
+del libsvcdsub_plugin.dll\r
+del libt140_plugin.dll\r
+del libtaglib_plugin.dll\r
+del libtelnet_plugin.dll\r
+del libtelx_plugin.dll\r
+REM  libtheora_plugin.dll\r
+del libtransform_plugin.dll\r
+REM  libtrivial_channel_mixer_plugin.dll\r
+REM  libtrivial_mixer_plugin.dll\r
+REM  libtrivial_resampler_plugin.dll\r
+del libts_plugin.dll\r
+del libtta_plugin.dll\r
+del libtwolame_plugin.dll\r
+del libty_plugin.dll\r
+del libugly_resampler_plugin.dll\r
+del libvc1_plugin.dll\r
+del libvcd_plugin.dll\r
+del libvisual_plugin.dll\r
+REM  libvmem_plugin.dll\r
+del libvobsub_plugin.dll\r
+del libvoc_plugin.dll\r
+del libvod_rtsp_plugin.dll\r
+REM  libvorbis_plugin.dll\r
+REM  libvout_directx_plugin.dll\r
+del libwall_plugin.dll\r
+del libwaveout_plugin.dll\r
+REM  libwave_plugin.dll\r
+del libwav_plugin.dll\r
+REM libwingdi_plugin.dll\r
+del libx264_plugin.dll\r
+del libxa_plugin.dll\r
+del libxml_plugin.dll\r
+del libxtag_plugin.dll\r
+REM  libyuvp_plugin.dll\r
+REM  libyuv_plugin.dll\r
+REM  libyuy2_i420_plugin.dll\r
+REM  libyuy2_i422_plugin.dll\r
+del libzip_plugin.dll\r
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/swarmplayer.exe.manifest b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/swarmplayer.exe.manifest
new file mode 100644 (file)
index 0000000..62bafdb
--- /dev/null
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\r
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">\r
+       <assemblyIdentity\r
+          version="0.1.0.0"\r
+          processorArchitecture="X86"\r
+          name="Microsoft.Winweb.SwarmPlugin"\r
+          type="win32"\r
+       />\r
+       <description>SwarmPlayer plugin for Internet Explorer</description>\r
+       <dependency>\r
+          <dependentAssembly>\r
+            <assemblyIdentity\r
+              type="win32"\r
+              name="Microsoft.Windows.Common-Controls"\r
+              version="6.0.0.0"\r
+              processorArchitecture="X86"\r
+              publicKeyToken="6595b64144ccf1df"\r
+              language="*"\r
+            />\r
+          </dependentAssembly>\r
+       </dependency>\r
+</assembly>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/swarmplayer_IE_only.nsi b/instrumentation/next-share/BaseLib/Transport/Build/Win32/IE8/swarmplayer_IE_only.nsi
new file mode 100644 (file)
index 0000000..f709815
--- /dev/null
@@ -0,0 +1,174 @@
+!define PRODUCT "SwarmPlayer"\r
+!define VERSION "2.0.0"\r
+!define BG "bgprocess"\r
+\r
+\r
+!include "MUI.nsh"\r
+\r
+;--------------------------------\r
+;Configuration\r
+\r
+;General\r
+Name "${PRODUCT} ${VERSION}"\r
+OutFile "${PRODUCT}_${VERSION}.exe"\r
+\r
+;Folder selection page\r
+InstallDir "$PROGRAMFILES\${PRODUCT}"\r
\r
+;Remember install folder\r
+InstallDirRegKey HKCU "Software\${PRODUCT}" ""\r
+\r
+;\r
+; Uncomment for smaller file size\r
+;\r
+SetCompressor "lzma"\r
+;\r
+; Uncomment for quick built time\r
+;\r
+;SetCompress "off"\r
+\r
+CompletedText "Installation completed. Thank you for choosing ${PRODUCT}"\r
+\r
+BrandingText "${PRODUCT}"\r
+\r
+;--------------------------------\r
+;Modern UI Configuration\r
+\r
+!define MUI_ABORTWARNING\r
+!define MUI_HEADERIMAGE\r
+!define MUI_HEADERIMAGE_BITMAP "heading.bmp"\r
+\r
+;--------------------------------\r
+;Pages\r
+\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT "I accept"\r
+!define MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE "I decline"\r
+;   !define MUI_FINISHPAGE_RUN "$INSTDIR\swarmplayer.exe"\r
+\r
+!insertmacro MUI_PAGE_INSTFILES\r
+\r
+!insertmacro MUI_UNPAGE_CONFIRM\r
+!insertmacro MUI_UNPAGE_INSTFILES\r
+\r
+;!insertmacro MUI_DEFAULT UMUI_HEADERIMAGE_BMP heading.bmp"\r
+\r
+;--------------------------------\r
+;Languages\r
+\r
+!insertmacro MUI_LANGUAGE "English"\r
\r
+;--------------------------------\r
+;Language Strings\r
+\r
+;Description\r
+LangString DESC_SecMain ${LANG_ENGLISH} "Install ${PRODUCT}"\r
+LangString DESC_SecStart ${LANG_ENGLISH} "Create Start Menu Shortcuts"\r
+\r
+;--------------------------------\r
+;Installer Sections\r
+\r
+Section "!Main EXE" SecMain\r
+ SectionIn RO\r
+ SetOutPath "$INSTDIR"\r
+ File *.txt\r
+  ; TODO : add checkbox for IE and Fx\r
+ File activex\axvlc.dll\r
+ File activex\axvlc.dll.manifest\r
+ File *.dll\r
+ File *.dll.manifest\r
\r
+ File /r bgprocess\r
+\r
+ File /r plugins\r
+ File /r locale\r
+ ; Arno, 2010-08-10: Appears to work without.\r
+ ;File /r osdmenu\r
+ ;File /r http\r
+\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "BGProcessPath" "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+ WriteRegStr HKLM "Software\${PRODUCT}" "InstallDir" "$INSTDIR"\r
\r
+ ; Register IE Plug-in\r
+ RegDLL "$INSTDIR\axvlc.dll"\r
+\r
+; Vista Registration\r
+  ; Vista detection\r
+  ReadRegStr $R0 HKLM "SOFTWARE\Microsoft\Windows NT\CurrentVersion" CurrentVersion\r
+  StrCpy $R1 $R0 3\r
+  StrCmp $R1 '6.0' lbl_vista lbl_done\r
+\r
+  ; TODO : look at that\r
+  lbl_vista:\r
+  WriteRegStr HKLM "Software\RegisteredApplications" "${PRODUCT}" "Software\Clients\Media\${PRODUCT}\Capabilities"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationName" "${PRODUCT} media player"\r
+  WriteRegStr HKLM "Software\Clients\Media\${PRODUCT}\Capabilities" "ApplicationDescription" "${PRODUCT} - Torrent videostreaming browser plugin"\r
+\r
+  lbl_done:\r
+\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "DisplayName" "${PRODUCT} (remove only)"\r
+ WriteRegStr HKEY_LOCAL_MACHINE "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}" "UninstallString" "$INSTDIR\Uninstall.exe"\r
+\r
+; Now writing to KHEY_LOCAL_MACHINE only -- remove references to uninstall from current user\r
+ DeleteRegKey HKEY_CURRENT_USER "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+; Remove old error log if present\r
+ Delete "$INSTDIR\swarmplayer.exe.log"\r
+\r
+ WriteUninstaller "$INSTDIR\Uninstall.exe"\r
+\r
+  ; Add an application to the firewall exception list - All Networks - All IP Version - Enabled\r
+  SimpleFC::AddApplication "SwarmEngine" "$INSTDIR\bgprocess\SwarmEngine.exe" 0 2 "" 1\r
+\r
+  ; Pop $0 ; return error(1)/success(0)\r
+\r
+\r
+SectionEnd\r
+\r
+Section "Startmenu Icons" SecStart\r
+   CreateDirectory "$SMPROGRAMS\${PRODUCT}"\r
+   CreateShortCut "$SMPROGRAMS\${PRODUCT}\Uninstall.lnk" "$INSTDIR\Uninstall.exe" "" "$INSTDIR\Uninstall.exe" 0\r
+SectionEnd\r
+\r
+\r
+;--------------------------------\r
+;Descriptions\r
+\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecMain} $(DESC_SecMain)\r
+!insertmacro MUI_DESCRIPTION_TEXT ${SecStart} $(DESC_SecStart)\r
+!insertmacro MUI_FUNCTION_DESCRIPTION_END\r
+\r
+;--------------------------------\r
+;Uninstaller Section\r
+\r
+Section "Uninstall"\r
+\r
+ UnRegDLL "$INSTDIR\axvlc.dll"\r
+ RMDir /r "$INSTDIR"\r
+\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "Software\Clients\Media\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\${PRODUCT}"\r
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT}"\r
+\r
+ ; Remove an application from the firewall exception list\r
+ SimpleFC::RemoveApplication "$INSTDIR\bgprocess\SwarmEngine.exe"\r
+\r
+ ; Pop $0 ; return error(1)/success(0)\r
+\r
+SectionEnd\r
+\r
+\r
+;--------------------------------\r
+;Functions Section\r
+\r
+Function .onInit\r
+  System::Call 'kernel32::CreateMutexA(i 0, i 0, t "SwarmPlayer") i .r1 ?e' \r
+\r
+  Pop $R0 \r
+\r
+  StrCmp $R0 0 +3 \r
+\r
+  MessageBox MB_OK "The installer is already running."\r
+\r
+  Abort \r
+FunctionEnd\r
diff --git a/instrumentation/next-share/BaseLib/Transport/Build/Win32/setupBGexe.py b/instrumentation/next-share/BaseLib/Transport/Build/Win32/setupBGexe.py
new file mode 100644 (file)
index 0000000..00082b7
--- /dev/null
@@ -0,0 +1,17 @@
+# Written by Diego Rabioli, Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# Run from console: "python createBGexe.py py2exe"\r
+import os\r
+\r
+from distutils.core import setup\r
+import py2exe # Not a superfluous import!\r
+\r
+from BaseLib.__init__ import LIBRARYNAME\r
+mainfile = os.path.join(LIBRARYNAME,'Transport','SwarmEngine.py')\r
+\r
+# Arno: 2009-06-09: changed from console= to make sure py2exe writes\r
+# a BackgroundProcess.exe.log\r
+#\r
+setup(windows=[mainfile]) \r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/README.txt b/instrumentation/next-share/BaseLib/Transport/README.txt
new file mode 100644 (file)
index 0000000..bae2dd2
--- /dev/null
@@ -0,0 +1,39 @@
+install dependancies:
+ sudo apt-get install python-m2crypto python-openssl python2.6-wxgtk2.8 python-apsw
+
+in one terminal:
+ bzr branch lp:~j/+junk/tribe/
+ cd tribe
+ ant
+ or for dev setup:
+  echo `pwd` > ~/.mozilla/firefox/*default/extensions/tribe@p2pnext.org
+
+restart firefox and open trib/test.html
+
+to enable debugging open about:config and set
+tribe.logging.enabled to true
+
+Arno Remarks:
+==========
+- Using the domain name "p2p-next.org" gives problems on Linux
+- To run from source, by linking ~/mozilla/firefox/..../tribe@p2pnext.org to Tribler/Transport
+  you must add a symbolic link in the bgprocess dir that links to Tribler. Or use
+  a different bgprocessd.
+  
+- The xulrunner that comes with Ubuntu lucid gives problems, I manually installed
+  1.9.1.7 which does work:
+       
+  wget http://releases.mozilla.org/pub/mozilla.org/xulrunner/releases/1.9.1.7/source/xulrunner-1.9.1.7.source.tar.bz2
+  gtar -xvjf xulrunner-1.9.1.7.source.tar.bz2
+  sudo apt-get build-dep xulrunner-1.9.2
+  cd mozilla-1.9.1/
+  .../Tribler/Transport/lucid-configure-xulrunner191.sh
+  make 
+  make install
+  Make coffee :-(
+  
+- With xpitransmakedeb.sh you can create a .deb that installs SwarmTransport
+  as a FX extension. Required software:
+     devscripts
+     mozilla-devscripts
+     
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/Transport/SwarmEngine.py b/instrumentation/next-share/BaseLib/Transport/SwarmEngine.py
new file mode 100644 (file)
index 0000000..c953256
--- /dev/null
@@ -0,0 +1,27 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+# This is the main file for the SwarmPlayer V2, the transport protocol for \r
+# use with HTML5 can be found in Transport/SwarmEngine.py (Sharing code with \r
+# SwarmPlugin and SwarmPlayer v1 (standalone player) confusing the code a bit).\r
+#\r
+#\r
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r
+# This is the SwarmEngine.py for the SwarmTransport which currently self \r
+# destructs when the browser quits.\r
+#\r
+# So there are two SwarmEngine.py's\r
+#\r
+\r
+from BaseLib.Plugin.BackgroundProcess import run_bgapp\r
+\r
+# Disjunct from SwarmPlayer 1.0 and SwarmPlugin\r
+I2I_LISTENPORT = 62063\r
+BG_LISTENPORT = 8622\r
+VIDEOHTTP_LISTENPORT = 6877\r
+\r
+def start():\r
+    run_bgapp("SwarmPlayer","2.1.0",I2I_LISTENPORT,BG_LISTENPORT,VIDEOHTTP_LISTENPORT,killonidle=True)\r
+\r
+if __name__ == '__main__':\r
+    start()\r
diff --git a/instrumentation/next-share/BaseLib/Transport/__init__.py b/instrumentation/next-share/BaseLib/Transport/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Transport/bgprocess/swarmengined b/instrumentation/next-share/BaseLib/Transport/bgprocess/swarmengined
new file mode 100644 (file)
index 0000000..3af5a80
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+import sys
+import os
+import getpass
+
+logfilename = "/tmp/swarmplay-"+getpass.getuser()+".log"
+f = open(logfilename,"w")
+sys.stderr = f
+
+tribler_root = os.path.abspath(os.path.dirname(__file__))
+os.chdir(tribler_root)
+
+sys.path.insert(0, tribler_root)
+import BaseLib.Plugin.BackgroundProcess
+import BaseLib.Transport.SwarmEngine
+
+BaseLib.Plugin.BackgroundProcess.DEBUG = False
+if len(sys.argv) > 1 and sys.argv[1] == 'debug':
+   BaseLib.Plugin.BackgroundProcess.DEBUG = True
+
+BaseLib.Transport.SwarmEngine.start()
diff --git a/instrumentation/next-share/BaseLib/Transport/chrome.manifest b/instrumentation/next-share/BaseLib/Transport/chrome.manifest
new file mode 100644 (file)
index 0000000..e8ba848
--- /dev/null
@@ -0,0 +1,8 @@
+content     tribe chrome/content/
+skin        tribe wiki skin/wiki/
+
+# Firefox
+overlay        chrome://browser/content/browser.xul chrome://tribe/content/tribe_status_bar.xul
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Transport/chrome/content/tribe_status_bar.js b/instrumentation/next-share/BaseLib/Transport/chrome/content/tribe_status_bar.js
new file mode 100644 (file)
index 0000000..69fd845
--- /dev/null
@@ -0,0 +1,184 @@
+\r
+/*\r
+  TribeStatuBar - functions for the SwarmPlayer status bar\r
+\r
+  Written by Riccardo Petrocco\r
+  see LICENSE.txt for license information\r
+*/\r
+
+// TODO make async requests using ajax
+
+var TribeStatusBar = {
+       // Install a timeout handler to install the interval routine
+
+  startup: function()
+  {
+    this.refreshInformation();
+    window.setInterval(this.refreshInformation, 1000);
+    this.tribeChannel = null;
+  },
+
+
+  // Called periodically to refresh traffic information
+  refreshInformation: function()
+  {
+
+    var httpRequest = null;
+    var fullUrl = "http://127.0.0.1:6877/webUI?&{%22method%22:%22get_speed_info%22}";
+    var tribeBar = this;
+
+    function infoReceived()
+    {
+
+           var tribePanel = document.getElementById('tribestatusbar');
+           var output = httpRequest.responseText;
+
+               
+           if (output.length)
+           {
+                   var resp = JSON.parse(output);
+
+                   if(resp.success) {
+                     
+                     if (tribePanel.src != "chrome://tribe/skin/swarmplugin.png") {
+                   
+                       tribePanel.src = "chrome://tribe/skin/swarmplugin.png";
+                       //tribePanel.onclick = openWebUI;
+                       tribePanel.onclick = openAndReuseTab;
+                   tribePanel.tooltipText="Click here to access the SwarmPlayer Web Interface"
+                 }
+                 
+                     tribePanel.label = "Down: " + parseInt(resp.downspeed) + " KB/s, Up: " + parseInt(resp.upspeed) + " KB/s";
+        }                              
+               
+
+           }
+
+    }
+    
+    function openWebUI()
+        {
+          var win = Components.classes['@mozilla.org/appshell/window-mediator;1'].getService(Components.interfaces.nsIWindowMediator).getMostRecentWindow('navigator:browser'); 
+          win.openUILinkIn('http://127.0.0.1:6877/webUI', 'tab');
+        }
+        
+    function openAndReuseTab() 
+        {
+          url = "http://127.0.0.1:6877/webUI";
+          var wm = Components.classes["@mozilla.org/appshell/window-mediator;1"]
+                             .getService(Components.interfaces.nsIWindowMediator);
+          var browserEnumerator = wm.getEnumerator("navigator:browser");
+
+          // Check each browser instance for our URL
+          var found = false;
+          while (!found && browserEnumerator.hasMoreElements()) {
+            var browserWin = browserEnumerator.getNext();
+            var tabbrowser = browserWin.gBrowser;
+
+            // Check each tab of this browser instance
+            var numTabs = tabbrowser.browsers.length;
+            for (var index = 0; index < numTabs; index++) {
+              var currentBrowser = tabbrowser.getBrowserAtIndex(index);
+              if (url == currentBrowser.currentURI.spec) {
+
+                // The URL is already opened. Select this tab.
+                tabbrowser.selectedTab = tabbrowser.tabContainer.childNodes[index];
+
+                // Focus *this* browser-window
+                browserWin.focus();
+
+                found = true;
+                break;
+              }
+            }
+          }
+
+          // Our URL isn't open. Open it now.
+          if (!found) {
+            var recentWindow = wm.getMostRecentWindow("navigator:browser");
+            if (recentWindow) {
+              // Use an existing browser window
+              recentWindow.delayedOpenTab(url, null, null, null, null);
+            }
+            else {
+              // No browser windows are open, so open a new one.
+              window.open(url);
+            }
+          }
+      }
+
+    
+    function restartBG()
+    {
+
+      TribeStatusBar.startBG();
+
+    }
+    
+    function restoreBar()
+    {
+           var tribePanel = document.getElementById('tribestatusbar');
+
+      if (tribePanel.src != "chrome://tribe/skin/swarmplugin_grey.png") {    
+          tribePanel.src = "chrome://tribe/skin/swarmplugin_grey.png";
+             tribePanel.onclick=restartBG;
+             tribePanel.label = " ";
+                 tribePanel.tooltipText="SwarmPlayer: Sharing is disabled. Click here to start sharing"
+                   
+                 TribeStatusBar.tribeChannel = null;
+      }
+      
+    }
+
+    //TODO remove
+    function reqTimeout()
+    {
+        httpRequest.abort();
+        return;
+        // Note that at this point you could try to send a notification to the
+        // server that things failed, using the same xhr object.
+    }
+    
+    try 
+    {
+        httpRequest = new XMLHttpRequest();
+        httpRequest.open("GET", fullUrl, true);
+        httpRequest.onload = infoReceived;
+        httpRequest.onerror = restoreBar;
+        httpRequest.send(null);
+        // Timeout to abort in 5 seconds
+        //var reqTimeout = setTimeout(reqTimeout(),1000);
+        setTimeout(function()
+            {
+                httpRequest.abort();
+                return;
+            }
+            ,1000);
+    }
+    catch( err )
+    {
+        aMsg = ("*** StatusBar : " + err.description);\r
+        Cc["@mozilla.org/consoleservice;1"].getService(Ci.nsIConsoleService).logStringMessage(aMsg);\r
+        dump(aMsg);
+    }
+  },
+  
+  startBG: function() {
+
+    if (this.tribeChannel == null) { 
+      var tribeChannel = Components.classes['@p2pnext.org/tribe/channel;1'].getService().wrappedJSObject;
+                                       
+      this.tribeChannel = tribeChannel;
+                                       
+    }
+    
+    if (!tribeChannel.init) {
+      tribeChannel.startBackgroundDaemon();
+    }
+    
+  },
+  
+}
+
+
+window.addEventListener("load", function(e) { TribeStatusBar.startup(); }, false);
diff --git a/instrumentation/next-share/BaseLib/Transport/chrome/content/tribe_status_bar.xul b/instrumentation/next-share/BaseLib/Transport/chrome/content/tribe_status_bar.xul
new file mode 100644 (file)
index 0000000..4a585cd
--- /dev/null
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!DOCTYPE overlay >
+<overlay id="tribe_status_bar_overlay"
+  xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
+
+<script type="application/javascript"
+   src="chrome://tribe/content/tribe_status_bar.js"/>
+
+<!-- Firefox -->
+    <statusbar id="status-bar">
+           <statusbarpanel 
+               class="statusbarpanel-iconic-text"
+               id="tribestatusbar"
+               src="chrome://tribe/skin/swarmplugin_grey.png"
+                   label=""
+                   tooltiptext="SwarmPlayer: Sharing is disabled. Click here to start sharing"
+                   onclick="TribeStatusBar.startBG()"
+           />
+
+
+    </statusbar>
+
+</overlay>
diff --git a/instrumentation/next-share/BaseLib/Transport/components/SwarmTransport.js b/instrumentation/next-share/BaseLib/Transport/components/SwarmTransport.js
new file mode 100644 (file)
index 0000000..d321086
--- /dev/null
@@ -0,0 +1,54 @@
+// -*- coding: utf-8 -*-
+// vi:si:et:sw=2:sts=2:ts=2
+/*
+  JavaScript global constructor swarmTransport
+  
+  Written by Jan Gerber
+  see LICENSE.txt for license information
+ */
+
+Components.utils.import("resource://gre/modules/XPCOMUtils.jsm");
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+
+function SwarmTransport() {
+}
+
+SwarmTransport.prototype =
+{
+  classDescription: "swarmTransport",
+  classID: Components.ID("3dfea7b2-52e6-467f-b2c6-19fd6d4596bf"),
+  contractID: "@p2pnext.org/tribe/swarmTransport;1",
+  QueryInterface: XPCOMUtils.generateQI(
+    [Ci.tribeISwarmTransport,
+     Ci.nsISecurityCheckedComponent,
+     Ci.nsISupportsWeakReference,
+     Ci.nsIClassInfo]),
+  _xpcom_factory : SwarmTransportFactory,
+  _xpcom_categories : [{
+    category: "JavaScript global constructor",
+    entry: "swarmTransport"
+  }],
+  version: 0.1,
+} 
+
+var SwarmTransportFactory =
+{
+  createInstance: function (outer, iid)
+  {
+    if (outer != null)
+      throw Components.results.NS_ERROR_NO_AGGREGATION;
+
+    if (!iid.equals(Ci.nsIProtocolHandler) &&
+        !iid.equals(Ci.nsISupports) )
+      throw Components.results.NS_ERROR_NO_INTERFACE;
+
+    return (new SwarmTransport()).QueryInterface(iid);
+  }
+};
+
+function NSGetModule(compMgr, fileSpec) {
+  return XPCOMUtils.generateModule([SwarmTransport]);
+}
+
diff --git a/instrumentation/next-share/BaseLib/Transport/components/TribeChannel.js b/instrumentation/next-share/BaseLib/Transport/components/TribeChannel.js
new file mode 100644 (file)
index 0000000..acc4b2c
--- /dev/null
@@ -0,0 +1,280 @@
+// -*- coding: utf-8 -*-\r
+// vi:si:et:sw=2:sts=2:ts=2\r
+/*\r
+  TribeChannel - Torrent video for <video>\r
+\r
+  Written by Jan Gerber, Riccardo Petrocco\r
+  see LICENSE.txt for license information\r
+ */\r
+\r
+Components.utils.import("resource://gre/modules/XPCOMUtils.jsm");\r
+\r
+const Cc = Components.classes;\r
+const Ci = Components.interfaces;\r
+\r
+var tribeLoggingEnabled = true;\r
+\r
+function LOG(aMsg) {\r
+  if (tribeLoggingEnabled)\r
+  {\r
+    aMsg = ("*** Tribe : " + aMsg);\r
+    Cc["@mozilla.org/consoleservice;1"].getService(Ci.nsIConsoleService).logStringMessage(aMsg);\r
+    dump(aMsg);\r
+  }\r
+}\r
+\r
+\r
+function TribeChannel() {\r
+  this.wrappedJSObject = this;\r
+  this.prefService = Cc["@mozilla.org/preferences-service;1"].getService(Ci.nsIPrefBranch).QueryInterface(Ci.nsIPrefService);\r
+  try {\r
+    tribeLoggingEnabled = this.prefService.getBoolPref("tribe.logging.enabled");\r
+  } catch (e) {}\r
+\r
+}\r
+\r
+TribeChannel.prototype =\r
+{\r
+  classDescription: "Tribe channel",\r
+  classID: Components.ID("68bfe8e9-c7ec-477d-a26c-2391333a7a24"),\r
+  contractID: "@p2pnext.org/tribe/channel;1",\r
+  QueryInterface: XPCOMUtils.generateQI([Ci.tribeIChannel,\r
+                                         Ci.nsIChannel,\r
+                                         Ci.nsISupports]),\r
+  _xpcom_factory : TribeChannelFactory,\r
+  init: false,\r
+  backend: 'python',\r
+  running: false,\r
+  torrent_url: '',\r
+  setTorrentUrl: function(url) {\r
+    this.torrent_url = url;\r
+    \r
+    if (url.lastIndexOf('@')-url.lastIndexOf('/') == 41) // Format /root hash@xcontentdur\r
+       this.backend = 'swift';\r
+    else\r
+       this.backend = 'python';\r
+  },\r
+  shutdown: function() {\r
+    LOG("shutdown called\n"); \r
+    var msg = 'SHUTDOWN\r\n';\r
+    this.outputStream.write(msg, msg.length);\r
+\r
+    //this.outputStream.close();\r
+    //this.inputStream.close();\r
+    this.transport.close(Components.results.NS_OK);\r
+  },\r
+  asyncOpen: function(aListener, aContext)\r
+  {\r
+    var _this = this;\r
+    if(this.init) {\r
+      LOG('asyncOpen called again\n');\r
+      throw Components.results.NS_ERROR_ALREADY_OPENED;\r
+    }\r
+    this.init = true;\r
+    var socketTransportService = Cc["@mozilla.org/network/socket-transport-service;1"].getService(Ci.nsISocketTransportService);\r
+    \r
+    var hostIPAddr = "127.0.0.1";\r
+    var hostPort = "62063"; // Arno, 2010-08-10: SwarmPlayer independent from SwarmPlugin\r
+    if (this.backend == 'swift')\r
+       hostPort = "62481"; // dummy hack coexistence\r
+    \r
+    try {\r
+      hostIPAddr = this.prefService.getCharPref("tribe.host.ipaddr");\r
+    } catch (e) {}\r
+\r
+    try {\r
+      hostPort = this.prefService.getCharPref("tribe.host.port");\r
+    } catch (e) {}\r
+\r
+    this.transport = socketTransportService.createTransport(null, 0, hostIPAddr, hostPort, null);\r
+    // Alright to open streams here as they are non-blocking by default\r
+    this.outputStream = this.transport.openOutputStream(0,0,0);\r
+    this.inputStream = this.transport.openInputStream(0,0,0);\r
+\r
+       /* Arno, 2010-06-15: Let player inform BG process about capabilities\r
+          to allow sharing of BGprocess between SwarmTransport and SwarmPlugin\r
+          (the latter has pause capability)\r
+        */\r
+    var msg = 'SUPPORTS VIDEVENT_START\r\n';\r
+    msg = msg + 'START ' + this.torrent_url + '\r\n'; // concat, strange async interface\r
+    this.outputStream.write(msg, msg.length);\r
+\r
+    var dataListener = {\r
+      onStartRequest: function(request, context) {},\r
+      onStopRequest: function(request, context, status) {\r
+      \r
+        if(status == Components.results.NS_ERROR_CONNECTION_REFUSED) {\r
+               \r
+          LOG("onStopRequest" + _this.running );\r
+          if (_this.backend == 'swift' && _this.running == true)\r
+                 return;\r
+          \r
+          _this.startBackgroundDaemon();\r
+          _this.init=false;\r
+          _this.running=true;\r
+          var timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);\r
+          timer.initWithCallback(function() { _this.asyncOpen(aListener, aContext) },\r
+                                 1000, Ci.nsITimer.TYPE_ONE_SHOT);\r
+\r
+          // swift backend\r
+          if (_this.backend == 'swift')\r
+          {\r
+                 // TODO: concurrency between swift starting and this HTTP req\r
+                 var hashidx = _this.torrent_url.indexOf('/')+1;\r
+                 var video_url = 'http://127.0.0.1:8080/' ;\r
+                 video_url = video_url + _this.torrent_url.substr(hashidx,_this.torrent_url.length-hashidx);\r
+                 this.onPlay(video_url);\r
+          }\r
+        }\r
+        else \r
+        {\r
+          LOG('BackgroundProcess closed Control connection\n');\r
+          this.onBGError();\r
+        }\r
+      },\r
+      onDataAvailable: function(request, context, inputStream, offset, count) {\r
+        var sInputStream = Cc["@mozilla.org/scriptableinputstream;1"].createInstance(Ci.nsIScriptableInputStream);\r
+        sInputStream.init(inputStream);\r
+\r
+        var s = sInputStream.read(count).split('\r\n');\r
+        \r
+        for(var i=0;i<s.length;i++) {\r
+          var cmd = s[i];\r
+          if (cmd.substr(0,4) == 'PLAY') {\r
+            var video_url = cmd.substr(5);\r
+            this.onPlay(video_url);\r
+            break;\r
+          }\r
+          if (cmd.substr(0,5) == "ERROR") {\r
+            LOG('ERROR in BackgroundProcess\n');\r
+            this.onBGError();\r
+            break;\r
+          }\r
+        }\r
+      },\r
+      onBGError: function() {\r
+            // Arno: It's hard to figure out how to throw an exception here\r
+            // that causes FX to fail over to alternative <source> elements\r
+            // inside the <video> element. The hack that appears to work is\r
+            // to create a Channel to some URL that doesn't exist.\r
+            //\r
+            var fake_video_url = 'http://127.0.0.1:6877/createxpierror.html';\r
+            var ios = Cc["@mozilla.org/network/io-service;1"].getService(Ci.nsIIOService);\r
+            var video_channel = ios.newChannel(fake_video_url, null, null);\r
+            video_channel.asyncOpen(aListener, aContext);\r
+      },\r
+      onPlay: function(video_url) {\r
+          LOG('PLAY !!!!!! '+video_url+'\n');\r
+          var ios = Cc["@mozilla.org/network/io-service;1"].getService(Ci.nsIIOService);\r
+          var video_channel = ios.newChannel(video_url, null, null);\r
+          video_channel.asyncOpen(aListener, aContext);\r
+          //video_channel.onShutdown(_this.shutdown);\r
+          //cleanup if window is closed\r
+          var windowMediator = Cc["@mozilla.org/appshell/window-mediator;1"].getService(Ci.nsIWindowMediator);\r
+          var nsWindow = windowMediator.getMostRecentWindow("navigator:browser");\r
+          nsWindow.content.addEventListener("unload", function() { _this.shutdown() }, false);\r
+      },\r
+    };\r
+    var pump = Cc["@mozilla.org/network/input-stream-pump;1"].createInstance(Ci.nsIInputStreamPump);\r
+    pump.init(this.inputStream, -1, -1, 0, 0, false);\r
+    pump.asyncRead(dataListener, null);\r
+  },\r
+  startBackgroundDaemon: function() {\r
+    var osString = Cc["@mozilla.org/xre/app-info;1"]\r
+                     .getService(Components.interfaces.nsIXULRuntime).OS;  \r
+    var bgpath = "";\r
+    if (this.backend == 'python')\r
+    {\r
+        if (osString == "WINNT")\r
+            bgpath = 'SwarmEngine.exe';\r
+        else if (osString == "Darwin")\r
+            bgpath = "SwarmPlayer.app/Contents/MacOS/SwarmPlayer";\r
+        else\r
+            bgpath = 'swarmengined';\r
+\r
+    }\r
+    else\r
+    {\r
+           // swift backend\r
+        if (osString == "WINNT")\r
+            bgpath = 'swift.exe';\r
+        else if (osString == "Darwin")\r
+            bgpath = "SwarmPlayer.app/Contents/MacOS/Swift"; // guess\r
+        else\r
+            bgpath = 'swift';\r
+           var urlarg = this.torrent_url.substr(0,this.torrent_url.indexOf('/'));\r
+    }\r
+   \r
+    function runBackgroundDaemon(file) {\r
+\r
+      // Arno, 2010-06-16: Doesn't work on Ubuntu with /usr/share/xul-ext* install      \r
+      try {\r
+          file.permissions = 0755;\r
+      } catch (e) {}\r
+      var process = Cc["@mozilla.org/process/util;1"].createInstance(Ci.nsIProcess);\r
+      process.init(file);\r
+      var args = [];\r
+      if (this.backend == 'python')\r
+      {\r
+          if (tribeLoggingEnabled && osString != "Darwin")\r
+            args.push('debug');\r
+      }\r
+      else\r
+      {\r
+             // swift backend\r
+             args.push('-t');\r
+             args.push(urlarg);\r
+             args.push('-g');\r
+             args.push('0.0.0.0:8080');\r
+             args.push('-w');\r
+             // debugging on\r
+             //if (tribeLoggingEnabled && osString != "Darwin")\r
+             //{\r
+           //    args.push('-D');\r
+           //    args.push('log.log'); //dummy argument?\r
+            // }\r
+      }\r
+      process.run(false, args, args.length);\r
+    }\r
+    try {\r
+      var em = Cc["@mozilla.org/extensions/manager;1"].getService(Ci.nsIExtensionManager);\r
+      \r
+      var file = em.getInstallLocation('tribe@p2pnext.org')\r
+                   .getItemFile('tribe@p2pnext.org', 'bgprocess/'+bgpath);\r
+      runBackgroundDaemon(file);\r
+    } catch(e) {\r
+      Components.utils.import("resource://gre/modules/AddonManager.jsm");\r
+      AddonManager.getAddonByID('tribe@p2pnext.org', function(addon) {\r
+        if (addon.hasResource('bgprocess')) {\r
+          var resource = addon.getResourceURI('bgprocess');\r
+          var file = resource.QueryInterface(Ci.nsIFileURL).file.QueryInterface(Ci.nsILocalFile);\r
+          file.appendRelativePath(bgpath);\r
+          runBackgroundDaemon(file);\r
+        }\r
+      });\r
+    }\r
+  },\r
+} \r
+\r
+var TribeChannelFactory =\r
+{\r
+  createInstance: function (outer, iid)\r
+  {\r
+    if (outer != null)\r
+      throw Components.results.NS_ERROR_NO_AGGREGATION;\r
+\r
+    if (!iid.equals(Ci.tribeIChannel) &&\r
+        !iid.equals(Ci.nsIChannel) &&\r
+        !iid.equals(Ci.nsISupports) )\r
+      throw Components.results.NS_ERROR_NO_INTERFACE;\r
+\r
+    var tc =  new TribeChannel();\r
+    var tcid = tc.QueryInterface(iid);\r
+    return tcid;\r
+  }\r
+};\r
+\r
+function NSGetModule(compMgr, fileSpec) {\r
+  return XPCOMUtils.generateModule([TribeChannel]);\r
+}\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/components/TribeProtocolHandler.js b/instrumentation/next-share/BaseLib/Transport/components/TribeProtocolHandler.js
new file mode 100644 (file)
index 0000000..ba2e7ec
--- /dev/null
@@ -0,0 +1,97 @@
+// -*- coding: utf-8 -*-
+// vi:si:et:sw=2:sts=2:ts=2
+/*
+  TribeProtocolHandler - Torrent video for <video>
+  
+  Written by Jan Gerber, Riccardo Petrocco
+  see LICENSE.txt for license information
+ */
+
+Components.utils.import("resource://gre/modules/XPCOMUtils.jsm");
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+
+var tribeLoggingEnabled = false;
+
+function LOG(aMsg) {
+  if (tribeLoggingEnabled)
+  {
+    aMsg = ("*** Tribe : " + aMsg);
+    Cc["@mozilla.org/consoleservice;1"].getService(Ci.nsIConsoleService).logStringMessage(aMsg);
+    dump(aMsg);
+  }
+}
+
+
+function TribeProtocol() {
+  this.prefService = Cc["@mozilla.org/preferences-service;1"].getService(Ci.nsIPrefBranch).QueryInterface(Ci.nsIPrefService);
+  try {
+    tribeLoggingEnabled = this.prefService.getBoolPref("tribe.logging.enabled");
+  } catch (e) {}
+
+}
+
+TribeProtocol.prototype =
+{
+  classDescription: "Tribe protocol",
+  classID: Components.ID("bcb8d306-66cf-4358-8473-807981ffe365"),
+  contractID: "@mozilla.org/network/protocol;1?name=tribe",
+  QueryInterface: XPCOMUtils.generateQI([Ci.nsIProtocolHandler,
+                                         Ci.nsISupports]),
+  _xpcom_factory : TribeProtocolFactory,
+  scheme: "tribe",
+  defaultPort: -1,
+  protocolFlags: Ci.nsIProtocolHandler.URI_NORELATIVE |
+             Ci.nsIProtocolHandler.URI_NOAUTH |
+             Ci.nsIProtocolHandler.URI_LOADABLE_BY_ANYONE,
+
+  allowPort: function(port, scheme)
+  {
+    return false;
+  },
+
+  newURI: function(spec, charset, baseURI)
+  {
+    var uri = Cc["@mozilla.org/network/simple-uri;1"].createInstance(Ci.nsIURI);
+    uri.spec = spec;
+    return uri;
+  },
+
+  newChannel: function(input_uri)
+  {
+    // aURI is a nsIUri, so get a string from it using .spec
+    var key = input_uri.spec;
+
+    // strip away the kSCHEME: part
+    var torrent_url = key.substring(key.indexOf("://") + 3, key.length);    
+    // the URL will not be encoded since we will not be able to decode it afterwards
+    //torrent_url = encodeURI(torrent_url);
+    LOG('\nopening: '+torrent_url+'\n');
+
+    var channel = Cc["@p2pnext.org/tribe/channel;1"].createInstance(Ci.tribeIChannel);
+    channel.setTorrentUrl(torrent_url);
+    return channel;
+  },
+
+} 
+
+var TribeProtocolFactory =
+{
+  createInstance: function (outer, iid)
+  {
+    if (outer != null)
+      throw Components.results.NS_ERROR_NO_AGGREGATION;
+
+    if (!iid.equals(Ci.nsIProtocolHandler) &&
+        !iid.equals(Ci.nsISupports) )
+      throw Components.results.NS_ERROR_NO_INTERFACE;
+
+    return (new TribeProtocol()).QueryInterface(iid);
+  }
+};
+
+function NSGetModule(compMgr, fileSpec) {
+  return XPCOMUtils.generateModule([TribeProtocol]);
+}
+
diff --git a/instrumentation/next-share/BaseLib/Transport/icon.png b/instrumentation/next-share/BaseLib/Transport/icon.png
new file mode 100644 (file)
index 0000000..5db0cdd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/icon.png differ
diff --git a/instrumentation/next-share/BaseLib/Transport/ie8.html b/instrumentation/next-share/BaseLib/Transport/ie8.html
new file mode 100644 (file)
index 0000000..1ce3893
--- /dev/null
@@ -0,0 +1,95 @@
+<html>\r
+<head><title>SwarmPlayer V2 IE8 Page 1</title></head>\r
+\r
+<body>\r
+\r
+\r
+<h1>SwarmPlayer V2 IE8 Page 1</h1>\r
+\r
+<object classid="clsid:98FF91C0-A3B8-11DF-8555-0002A5D5C51B"\r
+        width="380" height="320" id="vlc" events="True" target=''>\r
+<param name="Src" value="http://www.cs.vu.nl/~arno/vod/bunny480pogg.tstream" />\r
+<param name="ShowDisplay" value="True" />\r
+<param name="Loop" value="False" />\r
+<param name="AutoPlay" value="True" />\r
+</object>\r
+<br>\r
+NSSA reports: <em><p id="p2pstatusline"></p></em>\r
+\r
+\r
+<script language="Javascript">\r
+var vlc = document.getElementById("vlc");\r
+\r
+vlc.log.verbosity = 3;\r
+\r
+function debugonclick()\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>this is once again a popup.</p>');\r
+\r
+    tmp.write("Count "+document.vlc.log.messages.count);\r
+\r
+    var iter = document.vlc.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+    \r
+function updatep2pstatus()\r
+{\r
+    line = document.vlc.input.p2pstatus\r
+    var p2pstatusline = document.getElementById("p2pstatusline");\r
+       p2pstatusline.innerHTML = line\r
+}\r
+\r
+setInterval(updatep2pstatus,100)\r
+\r
+\r
+function seek(percentage)\r
+{\r
+       document.vlc.input.position = percentage\r
+}\r
+\r
+function switchtorrent(url)\r
+{\r
+       document.vlc.playlist.stop();\r
+       document.vlc.playlist.clear();\r
+       document.vlc.playlist.items.clear();\r
+       while (document.vlc.playlist.items.count != 0)\r
+               ;\r
+       \r
+       document.vlc.input.set_p2ptarget(url);\r
+}\r
+\r
+\r
+</script>\r
+\r
+\r
+<input type=button value="Play" onClick='document.vlc.playlist.playItem(0);'>\r
+<input type=button value="Pause" onClick='document.vlc.playlist.togglePause();'>\r
+<input type=button value="Stop" onclick='document.vlc.playlist.stop();'>\r
+<input type=button value="Debug" onclick='debugonclick();'>\r
+<input type=button value="Seek 25%" onclick='seek(.25);'>\r
+<input type=button value="Seek 50%" onclick='seek(.5);'>\r
+\r
+<input type=button value="Switch 2" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/html5/treeOfLife/treeOfLife.ogv.tstream");'>\r
+<input type=button value="Switch 3" onClick='switchtorrent("http://www.cs.vu.nl/~arno/vod/html5/rdtv/RDTV_ep2_5min.tstream");'>\r
+\r
+<br/>\r
+\r
+\r
+<hr>\r
+\r
+\r
+</body>\r
+</html>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/install.rdf b/instrumentation/next-share/BaseLib/Transport/install.rdf
new file mode 100644 (file)
index 0000000..60001fb
--- /dev/null
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+     xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+  <Description about="urn:mozilla:install-manifest">
+         <em:id>tribe@p2pnext.org</em:id>
+         <em:version>2.1.0</em:version>
+         <em:type>2</em:type>
+
+    <!-- Firefox -->
+    <em:targetApplication>
+      <Description>
+               <em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
+       <em:minVersion>3.5</em:minVersion>
+       <em:maxVersion>4.9.9</em:maxVersion>
+      </Description>
+    </em:targetApplication>            
+
+    <!-- Front End MetaData -->
+    <em:name>SwarmPlayer</em:name>
+    <em:description>The P2P-based tribe:// transfer protocol, enables use of torrent URLs inside e.g. the video tag.</em:description>
+    <em:creator>p2pnext.org</em:creator>
+    <em:homepageURL>http://www.p2pnext.org/</em:homepageURL>
+<!--
+    <em:iconURL>chrome://p2pnext/content/icon.png</em:iconURL>
+    <em:updateURL>http://p2pnext.org/update.rdf</em:updateURL>
+    <em:updateKey>
+    </em:updateKey>
+-->
+  </Description>
+</RDF>
+
diff --git a/instrumentation/next-share/BaseLib/Transport/live-jip.html b/instrumentation/next-share/BaseLib/Transport/live-jip.html
new file mode 100644 (file)
index 0000000..e72a5df
--- /dev/null
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\r
+<html xmlns="http://www.w3.org/1999/xhtml"><head>\r
+\r
+\r
+\r
+<title>HTML5 Live P2P Vide Demo</title>\r
+<meta http-equiv="Content-Type" content="text/html; charset=us-ascii">\r
+</head>\r
+<body>\r
+\r
+\r
+\r
+<h2>HTML5 Live P2P Video Demo</h2>\r
+\r
+<h3>Original Stream</h3>\r
+\r
+\r
+<script>\r
+ function failed(e) {\r
+   // video playback failed - show a message saying why\r
+   switch (e.target.error.code) {\r
+     case e.target.error.MEDIA_ERR_ABORTED:\r
+       alert('You aborted the video playback.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_NETWORK:\r
+       alert('A network error caused the video download to fail part-way.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_DECODE:\r
+       alert('The video playback was aborted due to a corruption problem or because the video used features your browser did not support.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_SRC_NOT_SUPPORTED:\r
+       alert('The video could not be loaded, either because the server or network failed or because the format is not supported.');\r
+       break;\r
+     default:\r
+       alert('An unknown error occurred, code:'+e.target.error.code);\r
+       break;\r
+   }\r
+ }\r
+</script>\r
+\r
+<video src="http://modulix.org:8000/libre.ogg" controls="controls" autoplay="autoplay" "></video>\r
+\r
+<h3>P2P Rebroadcast Stream</h3>\r
+\r
+<video src="tribe://http://jip.cs.vu.nl:7764/file?name=modulix-libre-live-p2p.ogg" controls="controls" autoplay="autoplay" onerror="failed(event)"></video>\r
+\r
+</body></html>\r
diff --git a/instrumentation/next-share/BaseLib/Transport/lucid-configure-xulrunner191.sh b/instrumentation/next-share/BaseLib/Transport/lucid-configure-xulrunner191.sh
new file mode 100755 (executable)
index 0000000..e99c963
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/sh -x
+./configure --prefix=$HOME/pkgs/xulrunner-1.9.1.7 --enable-application=xulrunner \
+       --enable-safe-browsing \
+       --with-user-appdir=.mozilla \
+       --without-system-jpeg \
+       --with-system-zlib=/usr \
+       --with-system-bz2=/usr \
+       --disable-javaxpcom \
+       --disable-crashreporter \
+       --disable-elf-dynstr-gc \
+       --disable-installer \
+       --disable-strip \
+       --disable-strip-libs \
+       --disable-install-strip \
+       --disable-updater \
+       --enable-optimize \
+       --enable-libnotify \
+       --with-distribution-id=com.ubuntu 
+
diff --git a/instrumentation/next-share/BaseLib/Transport/port-to-other-browsers.txt b/instrumentation/next-share/BaseLib/Transport/port-to-other-browsers.txt
new file mode 100644 (file)
index 0000000..9141adc
--- /dev/null
@@ -0,0 +1,79 @@
+\r
+                  ==== Adding Custom URL Protocols to IE/Safari/Chrome ===\r
+\r
+\r
+Win32 Documentation:\r
+====================\r
+\r
+Custom URL protocols can be added to Windows by using Asynchronous Pluggable Protocols (APP):\r
+       \r
+    http://msdn.microsoft.com/en-us/library/aa767916%28VS.85%29.aspx\r
+    http://support.microsoft.com/?id=kb;en-us;303740\r
+    http://support.microsoft.com/kb/280522\r
+    http://social.msdn.microsoft.com/Forums/en-US/ieextensiondevelopment/thread/5edc4348-0752-490d-aa43-cd89d495858b\r
+    \r
+There is also Pluggable Protocol support specific to .Net, but I don't think that is usable.\r
+\r
+    http://msdn.microsoft.com/en-us/library/1f6c88af%28vs.71%29.aspx\r
+    http://support.microsoft.com/kb/812409    \r
+\r
+Lots of people are using APP, some examples:\r
+\r
+    http://www.codeproject.com/kb/aspnet/AspxProtocol.aspx  (C# example)\r
+    http://www.codeproject.com/KB/IP/DataProtocol.aspx      (C++ example)\r
+    http://gears.googlecode.com/svn/trunk/third_party/passthru_app/\r
+    http://www.blackfishsoftware.com/blog/don/passthroughapp_bho_toolbar_intercepting_requests_responses\r
+    \r
+\r
+\r
+\r
+Experience with Win32 Sample:\r
+=============================\r
+\r
+I played around with the code for this example:\r
+\r
+     http://www.codeproject.com/kb/aspnet/AspxProtocol.aspx\r
+\r
+which adds a echo: protocol and an aspx: protocol to Windows. I made the following notes:\r
+\r
+- Win7\r
+\r
+  * Open in VS 2008, Select Release and Build.\r
+  * Run install.cmd from bin\Release\r
+\r
+  >>> echo: works for IE8 and IE9 \r
+\r
+- Virgin XP\r
+\r
+  * Install with .Net 3.5 SP1 and SDK for SP1\r
+\r
+  * IE8 recognizes echo\r
+  * Chrome doesn't do anything on click\r
+  * Safari says: "Safari can't open "echo:Linked1Clicked" because Microsoft Windows doesn't recognize\r
+    Internet addresses starting with "echo:".\r
+\r
+  * Doing regasm /codebase doesn't help.\r
+\r
+So although APPs should extend Windows and any Windows app should be able to use them, Safari and Chrome\r
+do not.\r
+\r
+\r
+Safari:\r
+=======\r
+\r
+I found the following Web page on custom URL protocol handlers for Mac:\r
+\r
+   http://developer.apple.com/mac/library/documentation/Cocoa/Reference/Foundation/Classes/NSURLProtocol_Class/Reference/Reference.html\r
+   http://developer.apple.com/mac/library/samplecode/SpecialPictureProtocol/Introduction/Intro.html#//apple_ref/doc/uid/DTS10003816\r
+\r
+apparently also releated:\r
+   \r
+   http://developer.apple.com/mac/library/documentation/Cocoa/Conceptual/URLLoadingSystem/URLLoadingSystem.html#//apple_ref/doc/uid/10000165i\r
+\r
+IE9 Preview 4:\r
+==============\r
+\r
+As mentioned above it works with APPs. Regarding container formats it appears to support just MP4 at\r
+the moment (test with H.264 and AAC content). MPEG-TS doesn't work. \r
+\r
+So live P2P wouldn't work on IE9p4.\r
diff --git a/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin.png b/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin.png
new file mode 100644 (file)
index 0000000..f64e032
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin.png differ
diff --git a/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey.png b/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey.png
new file mode 100644 (file)
index 0000000..6c576dc
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey.png differ
diff --git a/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey2.png b/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey2.png
new file mode 100644 (file)
index 0000000..0decc17
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/skin/wiki/swarmplugin_grey2.png differ
diff --git a/instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki.png b/instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki.png
new file mode 100644 (file)
index 0000000..f2fac2c
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki.png differ
diff --git a/instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki_grey.png b/instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki_grey.png
new file mode 100644 (file)
index 0000000..04db1b1
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Transport/skin/wiki/wiki_grey.png differ
diff --git a/instrumentation/next-share/BaseLib/Transport/swift.html b/instrumentation/next-share/BaseLib/Transport/swift.html
new file mode 100644 (file)
index 0000000..a572a88
--- /dev/null
@@ -0,0 +1,50 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\r
+    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\r
+\r
+<html xmlns="http://www.w3.org/1999/xhtml">\r
+<head>\r
+<title>swift HTML5 Demo</title>\r
+<meta http-equiv="Content-Type" content="text/html; charset=us-ascii" />\r
+</head>\r
+\r
+<body>\r
+\r
+\r
+<h2>HTML5 swift backend</h2>\r
+\r
+<h3>Video here</h3>\r
+\r
+<script>\r
+ function failed(e) {\r
+   // video playback failed - show a message saying why\r
+   switch (e.target.error.code) {\r
+     case e.target.error.MEDIA_ERR_ABORTED:\r
+       alert('arno: You aborted the video playback.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_NETWORK:\r
+       alert('arno: A network error caused the video download to fail part-way.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_DECODE:\r
+       alert('arno: The video playback was aborted due to a corruption problem or because the video used features your browser did not support.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_SRC_NOT_SUPPORTED:\r
+       alert('arno: The video could not be loaded, either because the server or network failed or because the format is not supported.');\r
+       break;\r
+     default:\r
+       alert('An unknown error occurred, code:'+e.target.error.code);\r
+       break;\r
+   }\r
+ }\r
+</script>\r
+\r
+<!-- RD TV -->\r
+<video src="tribe://127.0.0.1:20000/60bcdfd1c2c42d252b536f0bf43b53cc75ff4f75@2561" controls autoplay onerror="failed(event)"></video>\r
+\r
+<!-- Tree of Life -->\r
+<!-- video src="tribe://127.0.0.1:20001/19cbe88150f7744e1fee6bd42f09c796a16929b1@2722" controls autoplay onerror="failed(event)"></video -->\r
+\r
+\r
+</body>\r
+</html>\r
+\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/test-nofallback.html b/instrumentation/next-share/BaseLib/Transport/test-nofallback.html
new file mode 100644 (file)
index 0000000..141225b
--- /dev/null
@@ -0,0 +1,292 @@
+<html>\r
+<head>\r
+<link rel="stylesheet" type="text/css" href="trial.css">\r
+<title>Wikipedia P2P Video Streaming Test</title>\r
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\r
+<style type="text/css">\r
+<!--\r
+body {\r
+        background-color: #FFFFFF;\r
+}\r
+.style4 {\r
+        font-size: 24pt;\r
+        color: #333333;\r
+}\r
+.style7 {color: #666666; font-style: italic;}\r
+-->\r
+</style>\r
+</head>\r
+<body>\r
+<table>\r
+<tr>\r
+  <td><a href="http://www.p2p-next.org/"><img src="images/p2p-next_logo.png" border=0></a></td>\r
+  <td class="header">Wikipedia P2P Video Streaming Test</td>\r
+  <td><a href="http://www.p2p-next.org/"><img src="images/p2p-next_logo.png" border=0></a></td>\r
+</tr>\r
+</table>\r
+<center>\r
+<table class="container">\r
+<tr><td valign="top">\r
+<table>\r
+\r
+\r
+<!-- Step 1 -->\r
+<script type="text/javascript">\r
+\r
+if (navigator.appName == "Netscape")\r
+{\r
+document.write('<tr>');\r
+document.write('  <td  class="nr"><img src="images/01_120px.png"  height="120"></td>');\r
+document.write('  <td><span class="title">Install SwarmTransport for Firefox</span>');\r
+document.write('    <span class="text">');\r
+document.write('    <ol type="a">');\r
+document.write('    <li> Open the <a href="http://www.tribler.org/trac/wiki/WikimediaCooperation">XPI for your platform</a> with Firefox');\r
+document.write('    <li> Restart Firefox, and revisit this page.');\r
+document.write('    </ol>');\r
+document.write('    For IE, visit this page using IE, or look in PageSource');\r
+document.write('    </span>');\r
+document.write('  </td>');\r
+document.write('</tr>');\r
+}\r
+else\r
+{\r
+document.write('<tr>');\r
+document.write('  <td  class="nr"><img src="images/01_120px.png"  height="120"></td>');\r
+document.write('  <td><span class="title">Install SwarmPlugin for Internet Explorer</span>');\r
+document.write('    <span class="text">');\r
+document.write('    <ol type="a">');\r
+document.write('    <li> Run the <a href="http://www.tribler.org/trac/wiki/WikimediaCooperation">SwarmPlugin-Ogg installer</a> for IE');\r
+document.write('    <li> Revisit this page.');\r
+document.write('    </ol>');\r
+document.write('    For Firefox, visit this page using Firefox, or look in PageSource');\r
+document.write('    </span>');\r
+document.write('  </td>');\r
+document.write('</tr>');\r
+}\r
+</script>\r
+\r
+\r
+<!-- Step 2 -->\r
+<tr>\r
+  <td class="nr"><img src="images/02_120px.png"  height="120" align="baseline"></td>\r
+  <td><span class="title">Watch these Wikipedia videos via P2P</span>\r
+    <span class="text"> </a> \r
+</span>\r
+<p>\r
+\r
+\r
+\r
+<!-- NextShare Tstream Lookup Service -->\r
+<script src="http://wikipedia.p2p-next.org/tlookup.js"></script>\r
+\r
+\r
+<!-- NextShare Common Javascript Code -->\r
+<script>\r
+\r
+// Code for video tag error reporting \r
+function failed(e) {\r
+   // video playback failed - show a message saying why\r
+   switch (e.target.error.code) {\r
+     case e.target.error.MEDIA_ERR_ABORTED:\r
+       alert('You aborted the video playback.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_NETWORK:\r
+       alert('A network error caused the video download to fail part-way.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_DECODE:\r
+       alert('The video playback was aborted due to a corruption problem or because the video used features your browser did not support.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_SRC_NOT_SUPPORTED:\r
+       alert('The video could not be loaded, either because the server or network failed or because the format is not supported.');\r
+       break;\r
+     default:\r
+       alert('An unknown error occurred, code:'+e.target.error.code);\r
+       break;\r
+   }\r
+ }\r
+\r
+// Code for IE8 SwarmPlugin\r
+function debugonclick(vlcobj)\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>SwarmPlugin Debugging Popup</p>');\r
+\r
+    tmp.write("Number of log messages "+vlcobj.log.messages.count);\r
+\r
+    var iter = vlcobj.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+\r
+function seek(vlcobj,percentage)\r
+{\r
+    vlcobj.input.position = percentage\r
+}\r
+\r
+\r
+function writeplugin(vlcid,url)\r
+{\r
+       // 1. Write plugin HTML\r
+       writeobject(vlcid,url);\r
+       \r
+       // 2. Write statusline HTML\r
+       writestatusline(vlcid);\r
+       \r
+       // 3. Write controls HTML\r
+       document.write('<p class="center">');\r
+       writecontrols(vlcid);\r
+       document.write('</p>');\r
+       \r
+       // 4. Periodically retrieve p2pstatus from plugin and show on page\r
+       setInterval(function() { updatestatusline(vlcid); },100);\r
+}\r
+\r
+function writeobject(vlcid,url)\r
+{\r
+       document.write('<object classid="clsid:1800B8AF-4E33-43C0-AFC7-894433C13538" ');\r
+       //document.write('codebase="http://trial.p2p-next.org/download/SwarmPlugin_IE_1.0.5.cab"');\r
+       document.write('width="320" height="240" id="'+vlcid+'" name="'+vlcid+'" events="True" target="">');\r
+       document.write('<param name="Src" value="'+url+'" />');\r
+       document.write('<param name="ShowDisplay" value="True" />');\r
+       document.write('<param name="Loop" value="False" />');\r
+       document.write('<param name="AutoPlay" value="True" />');\r
+       document.write('<param name="Toolbar" value="True" />');\r
+       document.write('</object>');\r
+}\r
+\r
+function writestatusline(vlcid)\r
+{\r
+    document.write('<em><p id="'+vlcid+'p2pstatusline">Statusline</p></em>');\r
+}\r
+\r
+function updatestatusline(vlcid)\r
+{\r
+       vlcobj = document.getElementById(vlcid);\r
+    if (vlcobj.input != null)\r
+    {\r
+        line = vlcobj.input.p2pstatus\r
+        var p2pstatusline = document.getElementById(vlcid+"p2pstatusline");\r
+        p2pstatusline.innerHTML = line\r
+    }\r
+}\r
+\r
+function writecontrols(vlcid)\r
+{\r
+       vlcfullid = 'document.'+vlcid\r
+       document.write('<input type=button value="Play" onClick="');\r
+                                                 document.write(vlcid+'.playlist.play();">');\r
+       document.write('<input type=button value="Pause" onClick="');\r
+                                                                                         document.write(vlcid+'.playlist.togglePause();">');\r
+       document.write('<input type=button value="Stop" onclick="');\r
+                                                 document.write(vlcid+'.playlist.stop();">');\r
+       document.write('<input type=button value="Debug" onclick="debugonclick(');\r
+                                                                                         document.write(vlcid+');">');\r
+       document.write('<input type=button value="Seek 25%" onclick="seek(');\r
+                                                                                         document.write(vlcid+',.25);">');\r
+       document.write('<input type=button value="Seek 50%" onclick="seek(');\r
+                                                                                         document.write(vlcid+',.5);">');\r
+       document.write('<input type=button value="Fullscreen" onClick="');\r
+                                                                                         document.write(vlcid+'.video.toggleFullscreen();">');\r
+}\r
+\r
+function writep2pvideotag(videourl,posterurl)\r
+{\r
+    if (typeof swarmTransport != 'undefined')\r
+    {\r
+           var tribeurl = "tribe://"+httpseed2tstream(videourl);\r
+           document.write('<video controls="controls" onerror="failed(event)" autoplay="autoplay" poster="'+posterurl+'">');\r
+           document.write('  <source src="'+tribeurl+'"/>');\r
+           //document.write('  <source src="'+videourl+'"/>');\r
+           document.write('</video>');\r
+    }\r
+    else\r
+    {\r
+       // No SwarmTransport installed\r
+       document.write('NO SWARMTRANSPORT');\r
+           document.write('<video controls="controls" onerror="failed(event)" poster="'+posterurl+'">');\r
+           document.write('  <source src="'+videourl+'"/>');\r
+           document.write('</video>');\r
+    }\r
+}\r
+\r
+</script>\r
+\r
+<table border="1">\r
+\r
+<tr>\r
+<!-- Video 1 -->\r
+<td>\r
+\r
+<!-- NextShare: Generate video code for FX or IE -->\r
+<script type="text/javascript">\r
+if (navigator.appName == "Netscape")\r
+{\r
+    //\r
+    // FX: use HTML5 video tag and NextShare SwarmTransport\r
+    //\r
+    var posterurl = "http://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Welcome_to_globallives_2.0.ogv/seek%3D2-Welcome_to_globallives_2.0.ogv.jpg";\r
+    var videourl = "http://upload.wikimedia.org/wikipedia/commons/c/c3/Welcome_to_globallives_2.0.ogv";\r
+    writep2pvideotag(videourl,posterurl);\r
+}\r
+else\r
+{\r
+    //\r
+    // IE8: Fallback to NextShare SwarmPlugin\r
+    //\r
+    writeplugin("vlc1","http://tracker1.p2p-next.org:6970/file?info_hash=%3E7%F1%7F%F9%C7%20%9D%BE%3D%96%10%1B%7BK%3Bs%9C%06%7E");\r
+}\r
+</script>\r
+\r
+</td>\r
+\r
+<td><a href="http://globallives.org" rel="nofollow">GlobalLives.org</a> is a \r
+collaboratively built video library of human life experience that reshapes how \r
+we as both producers and viewers conceive of cultures, nations and people \r
+outside of our own communities. More \r
+<a href="http://globallives.org/about/" rel="nofollow">about global lives</a>.\r
+</td>\r
+</tr>\r
+\r
+\r
+\r
+\r
+<!-- End of 2-column table of videos -->\r
+</table>\r
+\r
+</tr>\r
+\r
+\r
+\r
+\r
+<!-- Step 3 -->\r
+<tr>\r
+  <td class="nr"><img src="images/03_120px.png"  height="120" lowsrc="images/03.png"></td>\r
+  <td><p class="title">Statistics</p>\r
+    <p class="text">Visit the <a href="http://tracker1.p2p-next.org:6970/">NextShare tracker</a> to see some statistics.</p>\r
+</tr>\r
+</table>\r
+</td>\r
+\r
+\r
+</tr>\r
+</table>\r
+<br>\r
+<table class="footer">\r
+<tr>\r
+  <td> Home |  <a href="legal.html">Legal</a> </td>\r
+</tr>\r
+</table>\r
+</center>\r
+</body>\r
+</html>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Transport/tribeIChannel.idl b/instrumentation/next-share/BaseLib/Transport/tribeIChannel.idl
new file mode 100644 (file)
index 0000000..7fe4ba0
--- /dev/null
@@ -0,0 +1,8 @@
+#include "nsISupports.idl"
+
+[scriptable, uuid(bcb8d306-66cf-4358-8473-807981ff7366)]
+interface tribeIChannel : nsISupports
+{
+  void setTorrentUrl(in AString url);
+};
+
diff --git a/instrumentation/next-share/BaseLib/Transport/tribeISwarmTransport.idl b/instrumentation/next-share/BaseLib/Transport/tribeISwarmTransport.idl
new file mode 100644 (file)
index 0000000..0dbf862
--- /dev/null
@@ -0,0 +1,8 @@
+#include "nsISupports.idl"
+
+[scriptable, uuid(946ef9dc-af29-466d-89a7-541f62d914a1)]
+interface tribeISwarmTransport : nsISupports
+{
+  readonly attribute string version;
+};
+
diff --git a/instrumentation/next-share/BaseLib/Transport/wikipedia.html b/instrumentation/next-share/BaseLib/Transport/wikipedia.html
new file mode 100644 (file)
index 0000000..8fc7c12
--- /dev/null
@@ -0,0 +1,361 @@
+<html>\r
+<head>\r
+<link rel="stylesheet" type="text/css" href="trial.css">\r
+<title>Wikipedia P2P Video Streaming Test</title>\r
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\r
+<style type="text/css">\r
+<!--\r
+body {\r
+        background-color: #FFFFFF;\r
+}\r
+.style4 {\r
+        font-size: 24pt;\r
+        color: #333333;\r
+}\r
+.style7 {color: #666666; font-style: italic;}\r
+-->\r
+</style>\r
+</head>\r
+<body>\r
+<table>\r
+<tr>\r
+  <td><a href="http://www.p2p-next.org/"><img src="images/p2p-next_logo.png" border=0></a></td>\r
+  <td class="header">Wikipedia P2P Video Streaming Test</td>\r
+  <td><a href="http://www.p2p-next.org/"><img src="images/p2p-next_logo.png" border=0></a></td>\r
+</tr>\r
+</table>\r
+<center>\r
+<table class="container">\r
+<tr><td valign="top">\r
+<table>\r
+\r
+\r
+<!-- Step 1 -->\r
+<script type="text/javascript">\r
+\r
+if (navigator.appName == "Netscape")\r
+{\r
+document.write('<tr>');\r
+document.write('  <td  class="nr"><img src="images/01_120px.png"  height="120"></td>');\r
+document.write('  <td><span class="title">Install SwarmTransport for Firefox</span>');\r
+document.write('    <span class="text">');\r
+document.write('    <ol type="a">');\r
+document.write('    <li> Open the <a href="http://www.tribler.org/trac/wiki/WikimediaCooperation">XPI for your platform</a> with Firefox');\r
+document.write('    <li> Restart Firefox, and revisit this page.');\r
+document.write('    </ol>');\r
+document.write('    For IE, visit this page using IE, or look in PageSource');\r
+document.write('    </span>');\r
+document.write('  </td>');\r
+document.write('</tr>');\r
+}\r
+else\r
+{\r
+document.write('<tr>');\r
+document.write('  <td  class="nr"><img src="images/01_120px.png"  height="120"></td>');\r
+document.write('  <td><span class="title">Install SwarmPlugin for Internet Explorer</span>');\r
+document.write('    <span class="text">');\r
+document.write('    <ol type="a">');\r
+document.write('    <li> Run the <a href="http://www.tribler.org/trac/wiki/WikimediaCooperation">SwarmPlugin-Ogg installer</a> for IE');\r
+document.write('    <li> Revisit this page.');\r
+document.write('    </ol>');\r
+document.write('    For Firefox, visit this page using Firefox, or look in PageSource');\r
+document.write('    </span>');\r
+document.write('  </td>');\r
+document.write('</tr>');\r
+}\r
+</script>\r
+\r
+\r
+<!-- Step 2 -->\r
+<tr>\r
+  <td class="nr"><img src="images/02_120px.png"  height="120" align="baseline"></td>\r
+  <td><span class="title">Watch these Wikipedia videos via P2P</span>\r
+    <span class="text"> </a> \r
+</span>\r
+<p>\r
+\r
+\r
+<!-- NextShare Tstream Lookup Service -->\r
+<script src="http://wikipedia.p2p-next.org/tlookup.js"></script>\r
+\r
+\r
+<!-- NextShare Common Javascript Code -->\r
+<script>\r
+\r
+// Code for video tag error reporting \r
+function failed(e) {\r
+   // video playback failed - show a message saying why\r
+   switch (e.target.error.code) {\r
+     case e.target.error.MEDIA_ERR_ABORTED:\r
+       alert('You aborted the video playback.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_NETWORK:\r
+       alert('A network error caused the video download to fail part-way.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_DECODE:\r
+       alert('The video playback was aborted due to a corruption problem or because the video used features your browser did not support.');\r
+       break;\r
+     case e.target.error.MEDIA_ERR_SRC_NOT_SUPPORTED:\r
+       alert('The video could not be loaded, either because the server or network failed or because the format is not supported.');\r
+       break;\r
+     default:\r
+       alert('An unknown error occurred, code:'+e.target.error.code);\r
+       break;\r
+   }\r
+ }\r
+\r
+// Code for IE8 SwarmPlugin\r
+function debugonclick(vlcobj)\r
+{\r
+    newwindow2=window.open('','name','height=400,width=640,scrollbars=1,resizable=1');\r
+    var tmp = newwindow2.document;\r
+    tmp.write('<html><head><title>popup</title>');\r
+    tmp.write('</head><body><p>SwarmPlugin Debugging Popup</p>');\r
+\r
+    tmp.write("Number of log messages "+vlcobj.log.messages.count);\r
+\r
+    var iter = vlcobj.log.messages.iterator();\r
+    while(iter.hasNext)\r
+    {\r
+        msg = iter.next();\r
+        tmp.write(msg.message+"<br>");\r
+    }\r
+\r
+    tmp.write('</body></html>');\r
+    tmp.close();\r
+\r
+}\r
+\r
+\r
+function seek(vlcobj,percentage)\r
+{\r
+    vlcobj.input.position = percentage\r
+}\r
+\r
+\r
+function writeplugin(vlcid,url)\r
+{\r
+       // 1. Write plugin HTML\r
+       writeobject(vlcid,url);\r
+       \r
+       // 2. Write statusline HTML\r
+       writestatusline(vlcid);\r
+       \r
+       // 3. Write controls HTML\r
+       document.write('<p class="center">');\r
+       writecontrols(vlcid);\r
+       document.write('</p>');\r
+       \r
+       // 4. Periodically retrieve p2pstatus from plugin and show on page\r
+       setInterval(function() { updatestatusline(vlcid); },100);\r
+}\r
+\r
+function writeobject(vlcid,url)\r
+{\r
+       document.write('<object classid="clsid:1800B8AF-4E33-43C0-AFC7-894433C13538" ');\r
+       //document.write('codebase="http://trial.p2p-next.org/download/SwarmPlugin_IE_1.0.5.cab"');\r
+       document.write('width="320" height="240" id="'+vlcid+'" name="'+vlcid+'" events="True" target="">');\r
+       document.write('<param name="Src" value="'+url+'" />');\r
+       document.write('<param name="ShowDisplay" value="True" />');\r
+       document.write('<param name="Loop" value="False" />');\r
+       document.write('<param name="AutoPlay" value="True" />');\r
+       document.write('<param name="Toolbar" value="True" />');\r
+       document.write('</object>');\r
+}\r
+\r
+function writestatusline(vlcid)\r
+{\r
+    document.write('<em><p id="'+vlcid+'p2pstatusline">Statusline</p></em>');\r
+}\r
+\r
+function updatestatusline(vlcid)\r
+{\r
+       vlcobj = document.getElementById(vlcid);\r
+    if (vlcobj.input != null)\r
+    {\r
+        line = vlcobj.input.p2pstatus\r
+        var p2pstatusline = document.getElementById(vlcid+"p2pstatusline");\r
+        p2pstatusline.innerHTML = line\r
+    }\r
+}\r
+\r
+function writecontrols(vlcid)\r
+{\r
+       vlcfullid = 'document.'+vlcid\r
+       document.write('<input type=button value="Play" onClick="');\r
+                                                 document.write(vlcid+'.playlist.play();">');\r
+       document.write('<input type=button value="Pause" onClick="');\r
+                                                                                         document.write(vlcid+'.playlist.togglePause();">');\r
+       document.write('<input type=button value="Stop" onclick="');\r
+                                                 document.write(vlcid+'.playlist.stop();">');\r
+       document.write('<input type=button value="Debug" onclick="debugonclick(');\r
+                                                                                         document.write(vlcid+');">');\r
+       document.write('<input type=button value="Seek 25%" onclick="seek(');\r
+                                                                                         document.write(vlcid+',.25);">');\r
+       document.write('<input type=button value="Seek 50%" onclick="seek(');\r
+                                                                                         document.write(vlcid+',.5);">');\r
+       document.write('<input type=button value="Fullscreen" onClick="');\r
+                                                                                         document.write(vlcid+'.video.toggleFullscreen();">');\r
+}\r
+\r
+function writep2pvideotag(videourl,posterurl)\r
+{\r
+    if (typeof swarmTransport != 'undefined')\r
+    {\r
+           var tribeurl = "tribe://"+httpseed2tstream(videourl);\r
+           document.write('<video controls="controls" onerror="failed(event)" preload="none" poster="'+posterurl+'">');\r
+           document.write('  <source src="'+tribeurl+'"/>');\r
+           document.write('  <!-- source src="'+videourl+'"/ -->');\r
+           document.write('</video>');\r
+    }\r
+    else\r
+    {\r
+       // No SwarmTransport installed\r
+       document.write('NO SWARMTRANSPORT');\r
+           document.write('<video controls="controls" onerror="failed(event)" poster="'+posterurl+'">');\r
+           document.write('  <source src="'+videourl+'"/>');\r
+           document.write('</video>');\r
+    }\r
+}\r
+</script>\r
+\r
+\r
+<table border="1">\r
+\r
+<tr>\r
+<!-- Video 1 -->\r
+<td>\r
+\r
+<!-- NextShare: Generate video code for FX or IE -->\r
+<script type="text/javascript">\r
+if (navigator.appName == "Netscape")\r
+{\r
+    //\r
+    // FX: use HTML5 video tag and NextShare SwarmTransport\r
+    //\r
+    var posterurl = "http://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Welcome_to_globallives_2.0.ogv/seek%3D2-Welcome_to_globallives_2.0.ogv.jpg";\r
+    var videourl = "http://upload.wikimedia.org/wikipedia/commons/c/c3/Welcome_to_globallives_2.0.ogv";\r
+    writep2pvideotag(videourl,posterurl);\r
+}\r
+else\r
+{\r
+       //\r
+       // IE8: Fallback to NextShare SwarmPlugin\r
+       //\r
+       writeplugin("vlc1","http://tracker1.p2p-next.org:6970/file?info_hash=%3E7%F1%7F%F9%C7%20%9D%BE%3D%96%10%1B%7BK%3Bs%9C%06%7E");\r
+}\r
+</script>\r
+\r
+</td>\r
+\r
+<td><a href="http://globallives.org" rel="nofollow">GlobalLives.org</a> is a \r
+collaboratively built video library of human life experience that reshapes how \r
+we as both producers and viewers conceive of cultures, nations and people \r
+outside of our own communities. More \r
+<a href="http://globallives.org/about/" rel="nofollow">about global lives</a>.\r
+</td>\r
+</tr>\r
+\r
+\r
+\r
+\r
+<tr>\r
+<!-- Video 2 -->\r
+<td>\r
+\r
+<!-- NextShare: Generate video code for FX or IE -->\r
+<script type="text/javascript">\r
+if (navigator.appName == "Netscape")\r
+{\r
+    //\r
+    // FX: use HTML5 video tag and NextShare SwarmTransport\r
+    //\r
+    var posterurl = "http://upload.wikimedia.org/wikipedia/commons/thumb/b/bd/Elephants_Dream.ogg/seek%3D13-Elephants_Dream.ogg.jpg";\r
+    var videourl = "http://upload.wikimedia.org/wikipedia/commons/b/bd/Elephants_Dream.ogg";\r
+    writep2pvideotag(videourl,posterurl);\r
+}\r
+else\r
+{\r
+       //\r
+       // IE8: Fallback to NextShare SwarmPlugin\r
+       //\r
+       writeplugin("vlc2","http://tracker1.p2p-next.org:6970/file?info_hash=%ED%5D%D9%81%5E%04%0A%12%07%CE/%40%7C0d%8F%7C%15%29%CB");\r
+}\r
+</script>\r
+\r
+</td>\r
+<td><a href="http://www.elephantsdream.org/" rel="nofollow">Elephants Dream</a> \r
+is the world's first open movie, made entirely with open source graphics \r
+software such as <a href="http://en.wikipedia.org/wiki/Blender">Blender</a>, \r
+and with all production files freely available to use however you please, \r
+under a <a href="http://en.wikipedia.org/wiki/Creative_Commons">Creative \r
+Commons</a> license. This clip subtitles have been translated into many languages.\r
+</td>\r
+\r
+</tr>\r
+\r
+<tr>\r
+<!-- Video 3 -->\r
+<td>\r
+\r
+<!-- NextShare: Generate video code for FX or IE -->\r
+<script type="text/javascript">\r
+if (navigator.appName == "Netscape")\r
+{\r
+       //\r
+       // FX: use HTML5 video tag and NextShare SwarmTransport\r
+       //\r
+       var posterurl = "http://upload.wikimedia.org/wikipedia/commons/thumb/d/d6/Yochai_Benkler_-_On_Autonomy%2C_Control_and_Cultureal_Experience.ogg/mid-Yochai_Benkler_-_On_Autonomy%2C_Control_and_Cultureal_Experience.ogg.jpg";\r
+       var videourl = "http://upload.wikimedia.org/wikipedia/commons/d/d6/Yochai_Benkler_-_On_Autonomy%2C_Control_and_Cultureal_Experience.ogg";  \r
+    writep2pvideotag(videourl,posterurl);\r
+}\r
+else\r
+{\r
+       //\r
+       // IE8: Fallback to NextShare SwarmPlugin\r
+       //\r
+       writeplugin("vlc3","http://tracker1.p2p-next.org:6970/file?info_hash=%BCy%E5%B0%A8%0EK%E1i%00%82%94%7B%0FoH%3Dn%FAZ");\r
+}\r
+</script>\r
+\r
+</td>\r
+<td><a href="http://en.wikipedia.org/wiki/Steal_This_Film">Steal This Film</a> \r
+is a film series documenting the movement against intellectual property produced\r
+by <a href="http://en.wikipedia.org/wiki/The_League_of_Noble_Peers">The League \r
+of Noble Peers</a> and released via the \r
+<a href="http://en.wikipedia.org/wiki/BitTorrent">BitTorrent</a> peer-to-peer \r
+protocol. \r
+</td>\r
+\r
+</tr>\r
+\r
+<!-- End of 2-column table of videos -->\r
+</table>\r
+\r
+</tr>\r
+\r
+\r
+\r
+\r
+<!-- Step 3 -->\r
+<tr>\r
+  <td class="nr"><img src="images/03_120px.png"  height="120" lowsrc="images/03.png"></td>\r
+  <td><p class="title">Statistics</p>\r
+    <p class="text">Visit the <a href="http://tracker1.p2p-next.org:6970/">NextShare tracker</a> to see some statistics.</p>\r
+</tr>\r
+</table>\r
+</td>\r
+\r
+\r
+</tr>\r
+</table>\r
+<br>\r
+<table class="footer">\r
+<tr>\r
+  <td> Home |  <a href="legal.html">Legal</a> </td>\r
+</tr>\r
+</table>\r
+</center>\r
+</body>\r
+</html>\r
+\r
diff --git a/instrumentation/next-share/BaseLib/UPnP/__init__.py b/instrumentation/next-share/BaseLib/UPnP/__init__.py
new file mode 100644 (file)
index 0000000..512a836
--- /dev/null
@@ -0,0 +1,193 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""
+This implements a UPnP package facilitating implementation of
+python UPnPServers and UPnPClients (ControlPoints).
+
+The module implements one extensible UPnPServer and one
+extensible UPnPClient. They are independent, so both may be 
+run without the other. However, if they are both run at the same
+time, the UPnPClient will discover the UPnPServer, as expected.
+
+A referance to the UPnPServer may be obtained by invoking
+- get_server()
+A referance to the UPnPClient may be obtained by invoking
+- get_client()
+
+Both the UPnPServer and the UPnPClient are implemented in
+an event-based fashion. One benefit of this is that they may
+share the same thread (event loop == TaskRunner). 
+This thread and stopped by invoking. 
+-start()
+-stop()
+
+Limitations and Weaknesses
+
+Generally
+- Nested devices not supported.
+
+UPnPServer
+- UPnPService implementations can not be generated from xml
+  service descriptions.
+- Only boolean, int and string data types supported.
+
+UPnPClient: 
+- Could have a browser interface.
+- ServiceStubs are not disabled after SSDP device is removed.
+- Does not discover added services.
+- Only boolean, int, string and unsigned int supported.
+- Action invokations only available in synchronized form.
+
+"""
+import threading
+from common import get_logger
+
+_LOGGER = None
+#_LOGGER = get_logger()
+
+SERVER_PRODUCT_NAME = "NextShare"
+SERVER_ROOT_DEVICE_CONFIG = {
+    'device_type' : "Basic",
+    'device_version': 1,
+    'name': "NextShare",
+    'device_domain': 'p2p-next.org',
+    'manufacturer': "P2P Next",
+    'manufacturer_url': 'http://p2p-next.org/',
+    'model_description': 'NextShare',
+    'model_name': 'Model 1',
+    'model_number': '1.0',
+    'model_url': 'http://p2p-next.org/',
+    'serial_number': '123456',    
+    'upc': '012345678910',
+    }
+
+##############################################
+# UPNP
+##############################################
+
+class _UPnP:
+    """
+    UPnP class holds instances of TaskRunner, UPnPServer 
+    and UPnPClient.
+    """
+
+    def __init__(self):
+        self._task_runner = None
+        self._task_runner_thread = None
+        self._upnp_server = None
+        self._upnp_client = None
+
+        # Real close methods
+        self._real_client_close = None
+        self._real_server_close = None
+
+    def _get_task_runner(self):
+        """Get referance to TaskRunner instance"""
+        if self._task_runner == None:
+            from BaseLib.UPnP.common import TaskRunner
+            self._task_runner = TaskRunner()        
+        return self._task_runner
+
+    def start(self, stop_event):
+        """Starts the Task Runner Thread"""
+        if self._task_runner_thread == None:
+            task_runner = self._get_task_runner()
+            run = lambda: task_runner.run_forever(stop_event=stop_event)
+            self._task_runner_thread = threading.Thread(
+                group=None, 
+                target=run,
+                name= "TaskRunnerThread")
+            self._task_runner_thread.setDaemon(True)
+            self._task_runner_thread.start()
+            
+    def is_running(self):
+        """Return true if the TaskRunner is executing in a thread."""
+        if self._task_runner_thread != None and \
+                self._task_runner_thread.is_alive(): 
+            return True
+        else: 
+            return False
+    def stop(self):
+        """
+        Stops both client and server, 
+        before stopping the Task Runner Thread.
+        """
+        self._wrap_client_close()
+        self._wrap_server_close()
+        if self._task_runner:
+            self._task_runner.stop()            
+            self._task_runner = None
+            self._task_runner_thread.join()
+            self._task_runner_thread = None
+                
+    def get_upnp_server(self):
+        """Get a referance to the UPnPServer instance."""
+        if self._upnp_server == None:
+            task_runner = self._get_task_runner()
+            from BaseLib.UPnP.upnpserver import UPnPServer
+            self._upnp_server = UPnPServer(task_runner, 
+                                           SERVER_PRODUCT_NAME, 
+                                           SERVER_ROOT_DEVICE_CONFIG,
+                                           logger=_LOGGER)     
+            # Wrap close method
+            self._real_server_close = self._upnp_server.close
+            self._upnp_server.close = self._wrap_server_close
+        return self._upnp_server
+
+    def get_upnp_client(self):
+        """Get a referance to the UPnPClient intance."""
+        if self._upnp_client == None:
+            task_runner = self._get_task_runner()
+            from BaseLib.UPnP.upnpclient import UPnPClient
+            self._upnp_client = UPnPClient(task_runner, 
+                                           logger=_LOGGER)     
+            # Wrap close method
+            self._real_client_close = self._upnp_client.close
+            self._upnp_client.close = self._wrap_client_close
+        return self._upnp_client
+
+    def _wrap_client_close(self):
+        """Internal method: Replaces the close() method of the 
+        UPnPClient instance."""
+        if self._upnp_client != None:
+            self._upnp_client = None
+            self._real_client_close()
+
+    def _wrap_server_close(self):
+        """Internal method: Replaces the close() method of the 
+        UPnPServer instance."""
+        if self._upnp_server != None:
+            self._upnp_server = None
+            self._real_server_close()
+
+_INSTANCE = _UPnP()
+
+##############################################
+# PUBLIC API 
+##############################################
+
+def start(stop_event=None):
+    """Starts the UPnPServer and/or UPnPClient."""
+    _INSTANCE.start(stop_event)
+
+def stop():
+    """Stops the UPnPServer and/or UPnPClient."""
+    _INSTANCE.stop()
+
+def get_server():
+    """Get referance to UPnPServer."""
+    return _INSTANCE.get_upnp_server() 
+    
+def get_client():
+    """Get referance to UPnPClient."""
+    return _INSTANCE.get_upnp_client()
+
+def is_running():
+    """Check if UPnPServer and/or UPnPClient is running."""
+    return _INSTANCE.is_running()
+
+import atexit
+atexit.register(stop)
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/__init__.py b/instrumentation/next-share/BaseLib/UPnP/common/__init__.py
new file mode 100644 (file)
index 0000000..4cfba91
--- /dev/null
@@ -0,0 +1,9 @@
+"""
+This package contains the logic of common utility to the UPnP package.  
+"""
+
+from taskrunner import TaskRunner
+from upnplogger import get_logger
+from objectconsole import ObjectConsole
+from asynchHTTPserver import AsynchHTTPServer
+from asynchHTTPclient import AsynchHTTPClient
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/asynchHTTPclient.py b/instrumentation/next-share/BaseLib/UPnP/common/asynchHTTPclient.py
new file mode 100644 (file)
index 0000000..67866be
--- /dev/null
@@ -0,0 +1,624 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements the client side of an non-blocking, 
+http request-response exchange, supported by a TaskRunner.
+A blocking interace is also provided on top of the non-blocking.
+
+The implementation also sets up the connection i a non-blocking
+manner. This essentially makes it a 
+connect-request-response protocol.
+"""
+
+import socket
+import errno
+import exceptions
+import os
+import threadhotel
+
+##############################################
+# BLOCKING HTTP CLIENT
+##############################################
+
+class SynchHTTPClient:
+
+    """
+    This class wraps the AsynchHTTPClient to provide 
+    a traditional blocking API.
+    """
+    FAIL, OK = "FAIL", "OK"
+
+    def __init__(self, asynchhttpclient):
+        self._threadhotel = threadhotel.ThreadHotel()
+        self._asynchclient = asynchhttpclient
+
+    def request(self, host, port, request_data):
+        """
+        Returns tuple (status, reply).
+        - Status indicates whether the request failed or succeded.
+        - If Status is FAIL, the reply explains what went wrong.
+        - Reply is tuple of: (header, body)
+        - If Status is OK, the reply includes the http response.
+        - Reply is tuple of: (error, comment)
+        """
+        rid = self._asynchclient.get_request_id()
+        self._threadhotel.reserve(rid)
+        self._asynchclient.request(rid, host, port, request_data,
+                self._abort_handler, self._response_handler, timeout=10)
+        return self._threadhotel.wait_reply(rid)
+
+    def _abort_handler(self, rid, error, comment):
+        """Abort handler."""
+        reply = (error, comment)
+        self._threadhotel.wakeup(rid, SynchHTTPClient.FAIL, reply)
+
+    def _response_handler(self, rid, header, body):
+        """Response handler."""
+        reply = (header, body)
+        self._threadhotel.wakeup(rid, SynchHTTPClient.OK, reply)
+
+
+
+##############################################
+#  NON-BLOCKING  HTTP CLIENT
+##############################################
+
+_LOG_TAG = "HTTPClient"
+
+class AsynchHTTPClient:
+
+    """
+    This class runs non-blocking asynchronous http requests 
+    to multiple HTTP servers at once. Specify a_handler or r_handler
+    for asynchrounous upcalls. If not, the httpClient supports
+    fire-and-forget semantics (from an external point of view). 
+    Internally, the httpClient will not forget a request until it has
+    either timeout out, aborted due to failure or succeeded.
+    """
+
+    def __init__(self, task_runner, logger=None):
+        self._task_runner = task_runner
+        self._request_id = 0
+        self._requests = {} # requestID: (request, aHandler, rHandler) 
+        # Logging
+        self._log_tag = _LOG_TAG
+        self._logger = logger
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+    def get_request_id(self):
+        """Generate new request id."""
+        self._request_id += 1
+        return self._request_id
+
+    def request(self, rid, host, port, request_data,
+                a_handler=None, r_handler=None, timeout=10):
+        """
+        Issue a new http request.
+
+        host, port -- web server.
+        request_data -- string data including both header and body.
+        a_handler(error, message) -- handler to be invoked if request aborts.
+        r_handler(header, body) -- handler to be invoked with response.
+        """
+        request = HTTPRequest(self._task_runner, rid, recv_timeout=timeout)
+        request.set_abort_handler(self._handle_abort)
+        request.set_response_handler(self._handle_response)
+        self._requests[rid] = (request, a_handler, r_handler)
+        request.dispatch(host, port, request_data)
+        self._log("Request Dispatched [%d]" % rid)
+        return rid
+
+    def close(self):
+        """Stop all requests and close their sockets."""
+        for tup in self._requests.values():
+            request = tup[0]
+            request.close()
+
+    ##############################################
+    # PRIVATE HANDLERS
+    ##############################################
+
+    def _handle_response(self, rid, header, body):
+        """Dispatches responses by invoking given r_handler."""
+        self._log("Response Received [%d]" % rid)
+        request = self._requests[rid][0]
+        r_handler = self._requests[rid][2]
+        del self._requests[rid]
+        request.close()
+        if r_handler:
+            r_handler(rid, header, body)
+
+    def _handle_abort(self, rid, error, why):
+        """Dispatches aborts by invoking given a_handler."""
+        self._log("HTTP Request Aborted [%d]" % rid)
+        request = self._requests[rid][0]
+        a_handler = self._requests[rid][1]
+        del self._requests[rid]
+        request.close()
+        if a_handler:
+            a_handler(rid, error, why)
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+
+    def _log(self, msg):
+        """Logger."""
+        if self._logger:
+            self._logger.log(self._log_tag, msg)
+
+
+##############################################
+# HTTP REQUEST
+##############################################
+
+class HTTPRequestError(exceptions.Exception): 
+    """Error associated with the request response protocol."""
+    pass
+
+class HTTPRequest:
+
+    """
+    This implements a single non-blocking connect-request-response 
+    protocol from an HTTPClient to a HTTPServer.
+    For now, this class does not support sequential requests-responses on the
+    same connection. Neither does it support instance reuse.
+    """
+    STATE_INIT = 0
+    STATE_CONNECT_STARTED = 1
+    STATE_CONNECT_OK = 2
+    STATE_SEND_STARTED = 3
+    STATE_SEND_OK = 4
+    STATE_RECV_STARTED = 5
+    STATE_RECV_OK = 6
+    STATE_DONE = 7
+
+    def __init__(self, task_runner, request_id, 
+                 recv_timeout=10, conn_timeout=1, 
+                 conn_attempts=3, logger=None):
+        self._task_runner = task_runner
+        self._request_id = request_id
+        # Create Socket
+        self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self._sock.setblocking(False)
+        # Protocol State
+        self._state = HTTPRequest.STATE_INIT
+        # Request Data
+        self._request_data = None
+        self._bytes_sent = 0
+        # Response Data
+        self._response_data = ""
+        self._recv_count = 0
+        # Tasks
+        self._conn_task = None
+        self._conn_to_task = None
+        self._send_task = None
+        self._recv_task = None
+        self._recv_to_task = None
+        # Connect Attempts
+        self._connect_attempts = 0
+        self._max_connect_attempts = conn_attempts
+        # Send
+        self._bytes = 0
+        self._send_count = 0
+        # Recv
+        self._header = ""
+        self._body = ""
+        self._length = 0
+        self._delimiter = '\r\n\r\n'
+        # Timeouts
+        self._recv_to = recv_timeout
+        self._conn_to = conn_timeout
+        # Handler Upcalls
+        self._recv_handler = lambda requestID, hdr, body:None
+        self._abort_handler = lambda requestID, error, comment : None
+        # Logging
+        self._logger = logger
+        self._log_tag = "Request [%d]" % self._request_id        
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+    def dispatch(self, host, port, request_data):
+        """Dispatch a new request."""
+        if self._state !=  HTTPRequest.STATE_INIT:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        self._request_data = request_data
+        self._connect_start(host, port)
+
+    def set_response_handler(self, handler):
+        """Register a response handler."""
+        self._recv_handler = handler
+
+    def set_abort_handler(self, handler):
+        """Register an abort handler."""
+        self._abort_handler = handler
+
+    def close(self):
+        """Cleanup the request-response protocol and close the socket."""
+        if self._conn_task : 
+            self._conn_task.cancel()
+        if self._conn_to_task : 
+            self._conn_to_task.cancel()
+        if self._send_task : 
+            self._send_task.cancel()
+        if self._recv_task : 
+            self._recv_task.cancel()
+        if self._recv_to_task: 
+            self._recv_to_task.cancel()
+        self._state = HTTPRequest.STATE_DONE
+        if self._sock: 
+            try:
+                self._sock.close()
+            except socket.error:
+                pass
+            self._sock = None
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+
+    def _log(self, msg):
+        """Logging."""
+        if self._logger: 
+            self._logger.log(self._log_tag, msg)
+
+
+    def _get_content_length(self):
+        """Extract body length from HTTP header."""
+        lines = self._header.split('\r\n')
+        if not lines : 
+            return
+        for line in lines[1:]:            
+            if len(line.strip()) > 0:
+                elem_name, elem_value = line.split(":", 1)
+                if elem_name.lower() == 'content-length':
+                    return int(elem_value.strip())
+        else: return 0
+    def _http_header_ok(self):
+        """Check that received data is a valid HTTP header."""
+        if len(self._header) > 4 and self._header[:4] == "HTTP":
+            return True
+        else:
+            return False
+
+    def _do(self, method, args=()):
+        """Shorthand for add_task."""
+        return self._task_runner.add_task(method, args)
+    def _do_write(self, file_descriptor, method):
+        """Shorthand for add_write."""
+        return self._task_runner.add_write_task(file_descriptor, method)
+    def _do_read(self, file_descriptor, method):
+        """Shorthand for add_read."""
+        return self._task_runner.add_read_task(file_descriptor, method)
+    def _do_to(self, timeout, method):
+        """Shorthand for add_delay."""
+        return self._task_runner.add_delay_task(timeout, method)
+
+    ##############################################
+    # PRIVATE PROTOCOL METHODS
+    ##############################################
+
+    def _connect_start(self, host, port):
+        """Start non-blocking connect."""
+        self._log("Connect Start")
+        error = self._sock.connect_ex((host, port))
+        if error != errno.EINPROGRESS:
+            self._abort(error, "Non-Blocking Connect Failed")
+            return
+        self._state = HTTPRequest.STATE_CONNECT_STARTED
+        self._conn_task = self._do_write(self._sock.fileno(), 
+                                         self._handle_connect_ok)
+        self._conn_to_task = self._do_to(self._conn_to, 
+                                         self._handle_connect_to)
+
+    def _handle_connect_ok(self):
+        """
+        Handler successful connect. 
+        
+        In fact, certain unsuccessful connects may not be detected 
+        before write is attempted on the socket. 
+        """
+        self._log("Connect OK")
+        if self._state != HTTPRequest.STATE_CONNECT_STARTED:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        self._state = HTTPRequest.STATE_CONNECT_OK
+        self._conn_task.cancel()
+        self._conn_to_task.cancel()
+        # Start sending the Request
+        self._do(self._send)
+
+    def _handle_connect_to(self):
+        """Handle connect timeout."""
+        self._log("Connect Timeout")
+        if self._state != HTTPRequest.STATE_CONNECT_STARTED:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        self._connect_attempts += 1
+        if self._connect_attempts >= self._max_connect_attempts:
+            # Abort
+            self._conn_task.cancel()
+            self._abort(errno.ETIME, "Connect Timeout")
+        else:
+            # Try again
+            self._conn_to_task = self._do_to(self._conn_to, 
+                                             self._handle_connect_to)
+
+    def _send(self):
+        """
+        Start sending a request.
+        Or continue sending a partially sent request.
+        """
+        self._send_count += 1
+        first_attempt = True if self._send_count == 1 else False
+        if first_attempt and self._state != HTTPRequest.STATE_CONNECT_OK:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        elif not first_attempt and \
+                self._state != HTTPRequest.STATE_SEND_STARTED:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        if first_attempt: 
+            self._state = HTTPRequest.STATE_SEND_STARTED
+            self._log("Send Started")
+        else: self._log("Send Continue")
+        
+        # (Continue) Send
+        try:
+            bytes_sent = self._sock.send(self._request_data[self._bytes:])
+        except socket.error, why:
+            if why[0] == errno.EAGAIN:
+                # Send on full buffer
+                # Continue sending again once the socket becomes writeable
+                self._send_continue()
+                return
+            else:
+                # Typically EPIPE: Broken Pipe or ECONNREFUSED 
+                if self._send_task: 
+                    self._send_task.cancel()
+                self._abort(why[0], "Exception on Send")
+                return
+
+        # Send Operation returned naturally
+        if bytes > 0:
+            # Something was sent
+            self._bytes += bytes_sent
+            if self._bytes >= len(self._request_data):
+                # The complete message was sent
+                self._state = HTTPRequest.STATE_SEND_OK
+                self._task_runner.add_task(self._handle_send_ok)
+                return
+            else: 
+                # Message only partially sent
+                self._send_continue()
+                return
+        else:
+            # 0 bytes sent => error
+            if self._send_task : 
+                self._send_task.cancel()
+            msg = "Sent 0 bytes, yet fd was writeable and no exception occurred"
+            self._abort(errno.EPIPE, msg)
+
+    def _send_continue(self):
+        """Register new write task after request was only partially sent."""
+        # Register a new Write Task
+        if not self._send_task:
+            self._send_task = self._do_write(self._sock.fileno(), 
+                                             self._send)
+
+    def _handle_send_ok(self):
+        """Handle completely sent request."""
+        self._log("Send OK")
+        if self._state != HTTPRequest.STATE_SEND_OK:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        # Cancel Send Task
+        if self._send_task: 
+            self._send_task.cancel()
+        # Start waiting for the response
+        self._recv_task = self._do_read(self._sock.fileno(), self._recv)
+        # Define new Receive Timeout
+        self._recv_to_task = self._do_to(self._recv_to, 
+                                         self._handle_recv_to)
+                
+    def _recv(self):
+        """
+        Start receiveing the response.
+        Or continue to receive a partially received response.
+        """
+        self._recv_count += 1
+        first_attempt = True if self._recv_count == 1 else False
+        if first_attempt and self._state != HTTPRequest.STATE_SEND_OK:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        elif not first_attempt and \
+                self._state != HTTPRequest.STATE_RECV_STARTED:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        if first_attempt: 
+            self._state = HTTPRequest.STATE_RECV_STARTED
+            self._log("Recv Started")
+        else:
+            self._log("Recv Continue")
+
+        # Recv a chunk
+        try:
+            data = self._sock.recv(1024)
+        except socket.error, why:
+            if why[0] == errno.EAGAIN:
+                # EAGAIN: Enter/stay in writeset               
+                self._recv_continue()
+                return
+            else:
+                # EPIPE: Broken Pipe.
+                if self._recv_task:
+                    self._recv_task.cancel()
+                self._abort(why[0], "Exception on Recv")
+                return
+        
+        # Recv completed naturally
+        if data:
+            self._response_data += data        
+            # Parse HTTP response
+            if not self._header:
+                tokens = self._response_data.split(self._delimiter, 1)
+                if len(tokens) == 1:
+                    # Header Not Complete
+                    self._recv_continue()
+                    return
+                else:
+                    # Header Complete
+                    self._header = tokens[0]
+                    if not self._http_header_ok():
+                        self._abort(errno.EBADMSG, "Not HTTP Header")
+                        return
+                    else:
+                        # Header complete and OK
+                        self._length = len(self._header) + \
+                            len(self._delimiter) + self._get_content_length()
+            
+            if self._header:
+                # Header is received, entire body may not be received
+                if len(self._response_data) < self._length:
+                    # Entire body not received
+                    self._recv_continue()
+                else:
+                    # Entire response received (may be too long)
+                    if len(self._response_data) > self._length:
+                        # Truncate
+                        self._response_data = self._response_data[:self._length]
+                    # Entire response received (correct length)
+                    # Cancel Tasks
+                    self._recv_task.cancel()
+                    self._recv_to_task.cancel()
+                    self._state = HTTPRequest.STATE_RECV_OK
+                    self._do(self._handle_recv_ok)
+        else:
+            # Did not block, but received no data => error
+            self._recv_task.cancel()
+            msg = "Recv 0 bytes, yet fd was readable and no exception occurred"
+            self._abort(errno.EPIPE, msg)
+
+    def _recv_continue(self):
+        """Register read task to continue to receive a 
+        partially received response."""
+        # Make sure a ReadTask is registered.
+        if not self._recv_task:
+            self._recv_task = self._do_read(self._sock.fileno(), 
+                                            self._recv)
+
+    def _handle_recv_to(self):
+        """Handle receive timeout."""
+        self._log("Receive Timeout")
+        if self._state != HTTPRequest.STATE_SEND_OK:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        # Cancel RecvTask
+        if self._recv_task: 
+            self._recv_task.cancel()
+        self._abort(errno.ETIME, "Receive Timeout")
+
+    def _handle_recv_ok(self):
+        """Handle completely received response."""
+        self._log("Receive OK")
+        if self._state != HTTPRequest.STATE_RECV_OK:
+            raise HTTPRequestError, "Illegal Operation given protocol State"
+        # Upcall
+        body = self._response_data[len(self._header) + len(self._delimiter):]
+        self._state = HTTPRequest.STATE_DONE
+        args = (self._request_id, self._header, body)
+        self._do(self._recv_handler, args)
+
+    def _abort(self, error, comment):
+        """Abort this request-response protocol."""
+        fmt = "Abort [%d] %s '%s' (%s)" 
+        self._log(fmt % (error, errno.errorcode[error], \
+                             os.strerror(error), comment))
+        self._state = HTTPRequest.STATE_DONE
+        args = (self._request_id, error, comment)
+        self._do(self._abort_handler, args)
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    class _MockLogger:
+        """Mock-up Logger."""
+        def log(self, tag, msg):
+            """Log to stdout."""
+            print tag, msg
+
+    LOGGER = _MockLogger()
+
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    import sys
+
+    TR = taskrunner.TaskRunner()
+
+    HOME = "192.168.1.235"
+    WORK = "193.156.106.130"
+
+    HOST = WORK
+
+    if len(sys.argv) > 1:
+        PORT = int(sys.argv[1])        
+    else:
+        PORT = 44444
+
+    if len(sys.argv) > 2 and sys.argv[2] == 'home':
+        HOST = HOME
+
+    HTTP_REQUEST = "NOTIFY /path HTTP/1.1\r\nContent-length:0\r\n\r\n"
+
+    def response_handler(rid, header, body):
+        """Response handler."""
+        print rid
+        print header
+        print "----------"
+        print body
+
+    def abort_handler(rid, error, comment):
+        """Abort handler."""
+        fmt = "Abort [%d] %s '%s' (%s) [%d]" 
+        print fmt % (error, errno.errorcode[error], 
+                     os.strerror(error), comment, rid)
+
+
+    class _TestSynchHTTPClient:
+        """Runnable test class for Synchronous HTTPClient."""
+
+        def __init__(self, asynch_httpclient):
+            self._synch_httpclient = SynchHTTPClient(asynch_httpclient)
+
+        def run(self):
+            """Run the blocking connect-request-response protocol."""
+            status, reply = self._synch_httpclient.request(HOST, 
+                                                           PORT, HTTP_REQUEST)
+            if status == SynchHTTPClient.OK:
+                header, body = reply
+                print header
+                print body
+            elif status == SynchHTTPClient.FAIL:
+                error, msg = reply
+                print error, msg
+
+    # Test Asynch HTTP Client
+    ASYNCH = AsynchHTTPClient(TR, logger=LOGGER)
+    RID = ASYNCH.get_request_id()
+    ASYNCH.request(RID, HOST, PORT, 
+                   HTTP_REQUEST, abort_handler, response_handler)
+
+    # Test Synch HTTP Client
+    import threading
+    SYNCH = _TestSynchHTTPClient(ASYNCH)
+    THREAD = threading.Thread(target=SYNCH.run)
+    THREAD.start()
+
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt:
+        pass
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/asynchHTTPserver.py b/instrumentation/next-share/BaseLib/UPnP/common/asynchHTTPserver.py
new file mode 100644 (file)
index 0000000..4a82817
--- /dev/null
@@ -0,0 +1,185 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements a base class for a non-blocking HTTP Server. 
+"""
+
+import BaseHTTPServer
+import socket
+
+##############################################
+# ASYNCH REQUEST HANDLER
+##############################################
+
+class AsynchHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+    """Base Request Handler for asynchronous (non-blocking)
+    HTTPServer implementation."""
+
+    def log_request(self, code="", size=0):
+        """Override to avoid default logging."""
+        pass
+
+    def log_error(self, format_, *args):
+        """Override to avoid default logging."""
+        pass
+
+    def handle(self):
+        """
+        Override to make sure handle only attempts to read
+        a single message. The default implementation tries to read
+        multiple messages if possible, given that the HTTP/1.1 protocol
+        is used. The default implementation of handle is thus potentially
+        blocking - something that does not go well with the non-blocking
+        implementation of this Web Server.
+        """
+        self.handle_one_request()
+
+    def setup(self):
+        """Initialise Request Handler"""
+        BaseHTTPServer.BaseHTTPRequestHandler.setup(self)
+        self.protocol_version = 'HTTP/1.1'
+
+
+
+##############################################
+# ASYNCH HTTP SERVER
+##############################################
+
+_LOG_TAG = "HTTPServer"
+
+class AsynchHTTPServer(BaseHTTPServer.HTTPServer):
+    """Base implementation of asynchronous (non-blocking)
+    HTTP Server."""
+
+    def __init__(self, task_runner, port, request_handler_class, 
+                 logger=None):
+
+        if not issubclass(request_handler_class, AsynchHTTPRequestHandler):
+            msg = "Given RequestHandlerClass not" + \
+            "subclass of AsynchHTTPRequestHandler."""
+            raise AssertionError, msg
+
+        # Task Runner
+        self._task_runner = task_runner
+
+        # Base Class
+        try:
+            # Default Port
+            BaseHTTPServer.HTTPServer.__init__(self, ('', port), 
+                                          request_handler_class)
+        except socket.error:
+            # Any Port
+            BaseHTTPServer.HTTPServer.__init__(self, ('', 0), 
+                                          request_handler_class)
+
+        # Host Port
+        self._host = socket.gethostbyname(socket.gethostname())
+        self._port = self.server_address[1]
+
+        # Logging
+        self._logger = logger
+        self._log_tag = _LOG_TAG
+
+        # Clients
+        self._client_list = [] # [(sock, addr, task)]
+
+        # Register Tasks with TaskRunner
+        self._conn_task = self._task_runner.add_read_task(self.fileno(),
+                                                 self.handle_connect)
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+
+    def startup(self):
+        """Startup HTTPServer."""
+        self.log("START Port %d" % self._port)               
+
+    def handle_connect(self):
+        """
+        Accept new tcp connection.
+        The file descriptor associated with the HTTPServer socket
+        is assumed to be readable.
+        """
+        sock, addr = self.socket.accept()
+        handler = lambda:self.handle_request_noblock(sock, addr)
+        task = self._task_runner.add_read_task(sock.fileno(), handler)
+        self._client_list.append((sock, task))
+
+    def handle_request_noblock(self, client_sock, client_address):
+        """
+        Non-blocking request handler.
+        Assumes the given client_sock is readable.
+        Inspired by implementation of _handle_request_noblock() 
+        from SocketServer.BaseServer. Skip GetRequest which involves 
+        accept is already done in handle_connect()
+        """
+        if self.verify_request(client_sock, client_address):
+            try:
+                self.process_request(client_sock, client_address)
+            except:
+                self.handle_error(client_sock, client_address)
+                self.close_request(client_sock)
+        # Task Cleanup
+        for tup in self._client_list:
+            sock, task = tup
+            if sock == client_sock:
+                task.cancel()
+                self._client_list.remove(tup)
+
+
+    def log(self, msg):
+        """Logging."""
+        if self._logger:
+            self._logger.log(self._log_tag, msg)
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+    def get_port(self):
+        """Return accept port of HTTPServer."""
+        return self._port
+
+    def get_host(self):
+        """Return host ip of HTTPServer."""
+        return self._host
+
+    def close(self):
+        """Close HTTP Server."""
+        self.log("CLOSE")
+        self._conn_task.cancel()
+        for sock, task in self._client_list:
+            task.cancel()
+            sock.close()
+        self.server_close()
+
+
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == "__main__":
+
+    class _Mock_Logger:
+        """Mock Logger object."""
+        def log(self, tag, msg):
+            """Log to standard out."""
+            print tag, msg
+
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TR = taskrunner.TaskRunner()
+    SERVER = AsynchHTTPServer(TR, 
+                              44444, 
+                              AsynchHTTPRequestHandler, 
+                              logger=_Mock_Logger())
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt:
+        print
+    SERVER.close()
+    TR.stop()
+        
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/objectconsole.py b/instrumentation/next-share/BaseLib/UPnP/common/objectconsole.py
new file mode 100644 (file)
index 0000000..19eeb12
--- /dev/null
@@ -0,0 +1,85 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements a generic console interface that can
+be attached to any runnable python object.
+"""
+
+import code
+import __builtin__
+import threading
+import exceptions
+
+##############################################
+# OBJECT CONSOLE
+##############################################
+
+class ConsoleError(exceptions.Exception):
+    """Error associated with the console."""
+    pass
+
+class ObjectConsole:
+
+    """
+    This class runs a python console in the main thread, and starts 
+    a given Object in a second thread.
+    
+    The Object is assumed to implement at least two methods, run() and stop().
+    - The run() method is the entry point for the thread.
+    - The stop() method is used by the main thread to request that the
+    object thread does a controlled shutdown and returns from the run method.
+    If the worker thread does not return from run() within 2 seconds after stop()
+    has been invoked, the console terminates the object thread more aggressively.
+    
+    AttributeNames of Object listed in the provided namespace will be 
+    included in the console namespace. 
+    """
+    TIMEOUT = 2
+
+
+    def __init__(self, object_, name_space=None, run='run', 
+                 stop='stop', name=""):
+
+        self._object = object_
+        self._object_run = getattr(object_, run)
+        self._object_stop = getattr(object_, stop)        
+        self._thread = threading.Thread(group=None, 
+                                        target=self._object_run, 
+                                        name="ObjectThread")
+
+        # Configure Console Namespace
+        self._name_space = {}
+        self._name_space['__builtiname_space__'] = __builtin__
+        self._name_space['__name__'] = __name__
+        self._name_space['__doc__'] = __doc__
+        self._name_space['help'] = self._usage
+
+        if name_space and isinstance(name_space, type({})):
+            self._name_space.update(name_space)
+
+        self._app_name_space = name_space
+        self._app_name = name
+        self._usage()
+
+
+    def _usage(self):
+        """Print usage information."""
+        print "\nConsole:", self._app_name
+        for key in self._app_name_space.keys():
+            print "- ", key
+        print "-  help"
+
+    def run(self):
+        """Starts the given runnable object in a thread and 
+        then starts the console."""
+        self._thread.start()
+        try:
+            code.interact("", None, self._name_space)
+        except KeyboardInterrupt:
+            pass
+        self._object_stop()
+        self._thread.join(ObjectConsole.TIMEOUT)
+        if self._thread.isAlive():
+            raise ConsoleError, "Worker Thread still alive"
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/taskrunner.py b/instrumentation/next-share/BaseLib/UPnP/common/taskrunner.py
new file mode 100644 (file)
index 0000000..af9c378
--- /dev/null
@@ -0,0 +1,532 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+The taskrunner module implements a light-weight framework
+for asynchronous (delayed) execution of non-blocking Tasks.
+"""
+
+__all__ = ['TaskRunner']
+
+import select
+import time
+import threading
+import exceptions
+
+##############################################
+# TASK HANDLE
+##############################################
+
+class TaskHandle:
+    """
+    A Task Handle is returned to clients after having
+    registered a Task with the TaskRunner.
+    
+    Its only purpose is to enable clients to cancel a Task,
+    without exposing the internal Task objects to clients.
+    """
+    
+    def __init__(self, task_runner, task_id):
+        self._task_runner = task_runner
+        self.tid = task_id
+
+    def cancel(self):
+        """Cancel a Task within the TaskRunner."""
+        self._task_runner.cancel_task(self.tid)
+
+
+##############################################
+# TASK
+##############################################
+
+class _Task:
+
+    """
+    A Task can be executed by the TaskRunner a given number 
+    of times.
+    
+    taskRunner -- reference to TaskRunner object.
+    max -- defines the maximum number of executions for a Task. 
+    method -- a method object to be executed.
+    args -- a tuple containing method arguments.
+
+    If max is -1 this means that the number of executions is unlimited.
+    """
+
+    def __init__(self, task_runner, limit, method, args):
+        self._counter = 0
+        self._cancelled = False
+        self._task_runner = task_runner
+        self._limit = limit
+        self._method = method
+        self._args = args
+        self.name = method.__name__
+        self.tid = None
+        self._task_runner.register_task(self) # initialises self.tid
+        
+    def cancel(self):
+        """Causes the Task to never be executed."""
+        self._cancelled = True
+
+    def execute(self):
+        """Execution of Task"""
+        success = self._execute_ok()
+        if success:
+            self._counter += 1
+            self._method(*self._args)
+        success = self._execute_ok()
+        if not success:
+            self._task_runner.cancel_task(self.tid)
+        return success
+
+    def _execute_ok(self):
+        """Checks if Task may be executed on more time. Returns bool."""
+        if self._cancelled : 
+            return False
+        elif (self._limit == TaskRunner.TASK_NO_LIMIT): 
+            return True
+        elif (self._counter + 1 <= self._limit): 
+            return True
+        return False    
+
+    def __str__(self):
+        if self._limit == TaskRunner.TASK_NO_LIMIT:
+            return "%s [%s] %s[%s/INF]" % (self.__class__.__name__, 
+                                             self.tid, self.name, self._counter)
+        else:
+            return "%s [%d] %s [%d/%d]" % (self.__class__.__name__, 
+                                            self.tid, self.name, 
+                                            self._counter, self._limit) 
+
+
+        
+##############################################
+# READ TASK
+##############################################
+
+class _ReadTask(_Task):
+
+    """
+    A ReadTask is executed after its associated filedescriptor
+    has become readable.
+    """
+    def __init__(self, task_runner, limit, file_descriptor, method, args):
+        _Task.__init__(self, task_runner, limit, method, args)
+        self._file_descriptor = file_descriptor
+        self._task_runner.enter_rd_set(self)
+    def fileno(self):
+        """Return file descriptor of IoTask."""
+        return self._file_descriptor
+    def cancel(self):
+        """Cancel ReadTask."""
+        self._task_runner.leave_rd_set(self) 
+        _Task.cancel(self)
+
+##############################################
+# WRITE TASK
+##############################################
+
+class _WriteTask(_Task):
+
+    """
+    A WriteTask is executed after its associated filedescriptor
+    has become writeable.
+    """
+    def __init__(self, task_runner, limit, file_descriptor, method, args):
+        _Task.__init__(self, task_runner, limit, method, args)
+        self._file_descriptor = file_descriptor
+        self._task_runner.enter_wr_set(self)
+    def fileno(self):
+        """Return file descriptor of IoTask."""
+        return self._file_descriptor
+    def cancel(self):
+        """Cancel WriteTask."""
+        self._task_runner.leave_wr_set(self)
+        _Task.cancel(self)
+
+
+##############################################
+# DELAY TASK
+##############################################
+
+class _DelayTask(_Task):
+
+    """
+    Delay Task are to be executed at a given point in time.
+
+    limit -- max number of executions
+    delay -- amount of time from now to expiry
+    period -- interval between executions
+    """
+
+    def __init__(self, task_runner, limit, delay, period, method, args):
+        _Task.__init__(self, task_runner, limit, method, args)
+        self._created = time.time()
+        self._delay = delay
+        self._period = period
+        self.register_timeout()
+
+    def execute(self):
+        """Overrides execute to register a new timeout just after the
+        execution."""
+        success = _Task.execute(self)
+        if success:
+            self.register_timeout()
+        return success
+
+    def register_timeout(self):
+        """Register a new timeout with task_runner."""
+        expiry = self._created + self._delay + self._counter*self._period 
+        self._task_runner.register_timeout(expiry, self)
+            
+
+
+##############################################
+# TIMEOUT
+##############################################
+
+class _Timeout:
+    
+    """
+    A Timeout represents a point in time where the
+    attached Task is due for execution.
+    
+    expiry -- absolute point in time ( time.time() )
+    task -- the Task
+    """
+
+    def __init__(self, expiry, task):
+        self.expiry = expiry
+        self.task = task
+    def __eq__(self, other):
+        return self.expiry == other.expiry
+    def __lt__(self, other):
+        return self.expiry < other.expiry
+    def __gt__(self, other):
+        return self.expiry > other.expiry
+    def __le__(self, other):
+        return self.expiry <= other.expiry
+    def __ge__(self, other):
+        return self.expiry >= other.expiry
+    def __ne__(self, other):
+        return self.expiry != other.expiry
+
+##############################################
+# DELAY TASK HEAP
+##############################################
+
+import heapq
+
+class _DelayTaskHeap:
+
+    """
+    Heap of Timeouts. Timeouts are kept sorted with respect to
+    expiry. Next Timeout first.
+    """
+
+    def __init__(self):
+        self._array = []
+
+    def push(self, expiry, task):
+        """Register a timeout for Task."""
+        timeout = _Timeout(expiry, task)
+        heapq.heappush(self._array, timeout)
+
+    def poll(self):
+        """Polls the TimeoutHeap to see if any Delayed Tasks
+        are due for execution."""
+        task_list = []
+        time_stamp = time.time()
+        while self._array and self._array[0].expiry < time_stamp:
+            timeout = heapq.heappop(self._array)
+            task_list.append(timeout.task)
+        return task_list
+
+##############################################
+# TASK RUNNER ERROR
+##############################################
+
+class TaskRunnerError (exceptions.Exception): 
+    """Error associated with running the TaskRunner. """
+    pass
+
+
+##############################################
+# TASK RUNNER
+##############################################
+
+class TaskRunner:
+
+    """
+    TaskRunner runs Tasks asynchronously (delayed).
+    
+    It supports four types of Tasks.
+    Task -- simple task for immediate execution.
+    ReadTask -- Task to be executed after its file descriptor became readable.
+    WriteTask -- Task to be executed after its file descriptor became writeable.
+    DelayTask -- Task to be executed at one (or multiple) specified points in time.
+    """
+
+    TASK_NO_LIMIT = -1
+
+    def __init__(self):
+        # Read Set
+        self._rd_set = []
+        # Write Set
+        self._wr_set = []
+        # Timeout Heap
+        self._delay_task_heap = _DelayTaskHeap()
+        # TaskQueue
+        self._task_queue = []
+        # Stop Flag
+        self._internal_stop_event = threading.Event()
+        self._external_stop_event = None
+        # Task ID
+        self._task_id = 0L
+        # Task Map
+        self._task_map = {}
+        # Task Runner Lock protecting critical sections.
+        # Conservative locking is implemented, using only
+        # one lock for all critical sections. 
+        self._lock = threading.Lock()
+        # Bad filedescriptor exists in read and/or writeset
+        self._bad_fd = False
+
+    ##############################################
+    # UTILITY (Used only by internal Tasks)
+    ##############################################
+
+    def enter_rd_set(self, io_task):
+        """Add filedescriptor of IoTask to read set."""
+        if not io_task in self._rd_set:
+            self._rd_set.append(io_task)
+            return True
+        else : return False
+
+    def leave_rd_set(self, io_task):
+        """Remove filedescriptor of IoTask from read set."""
+        if io_task in self._rd_set:
+            self._rd_set.remove(io_task)
+            return True
+        else : return False
+
+    def enter_wr_set(self, io_task):
+        """Add filedescriptor of IoTask to write set."""
+        if not io_task in self._wr_set:
+            self._wr_set.append(io_task)
+            return True
+        else : return False
+
+    def leave_wr_set(self, io_task):
+        """Remove filedescriptor of IoTask from write set."""
+        if io_task in self._wr_set:
+            self._wr_set.remove(io_task)
+            return True
+        else : return False
+
+    def register_timeout(self, expiry, task):
+        """Register a Timeout for a Delayed Task."""
+        self._delay_task_heap.push(expiry, task)
+
+    def register_task(self, task):
+        """Stores reference to Task internally."""
+        self._task_id += 1
+        task.tid = self._task_id
+        self._task_map[self._task_id] = task
+        return task.tid
+
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+    def add_task(self, method, args=()):
+        """Add Task. Returns TaskHandle."""
+        self._lock.acquire()
+        task = _Task(self, 1, method, args)
+        self._task_queue.append(task)
+        handle = TaskHandle(self, task.tid)
+        self._lock.release()
+        return handle
+
+    def add_read_task(self, file_descriptor, method, args=()):
+        """Add Read Task. Returns TaskHandle."""
+        self._lock.acquire()
+        task = _ReadTask(self, TaskRunner.TASK_NO_LIMIT, 
+                         file_descriptor, method, args)
+        handle = TaskHandle(self, task.tid)
+        self._lock.release()
+        return handle
+
+    def add_write_task(self, file_descriptor, method, args=()):
+        """Add Write Taks. Returns TaskHandle."""
+        self._lock.acquire()
+        task = _WriteTask(self, TaskRunner.TASK_NO_LIMIT, 
+                          file_descriptor, method, args)
+        handle = TaskHandle(self, task.tid)
+        self._lock.release()
+        return handle
+
+    def add_delay_task(self, delay, method, args=()):
+        """Add Delay Task. Returns TaskHandle."""
+        self._lock.acquire()
+        task = _DelayTask(self, 1, delay, 0, method, args)
+        handle = TaskHandle(self, task.tid)
+        self._lock.release()
+        return handle
+
+    def add_periodic_task(self, period, method, args=(), 
+                          delay=0, limit=None):
+        """Add Periodic Task. Returns TaskHandle."""
+        if limit == None: 
+            limit = TaskRunner.TASK_NO_LIMIT
+        self._lock.acquire()
+        task = _DelayTask(self, limit, delay, period, method, args)
+        handle = TaskHandle(self, task.tid)
+        self._lock.release()
+        return handle
+
+    def cancel_task(self, task_id):
+        """
+        Causes Task to be invalidated so that it 
+        will not be exectuted.
+        """
+        self._lock.acquire()
+        task = self._task_map.get(task_id, None)
+        if task:
+            task.cancel()
+            del self._task_map[task.tid]
+        self._lock.release()
+
+
+    ##############################################
+    # EXECUTION API
+    ##############################################
+        
+    def run_forever(self, frequency=.1, stop_event=None):
+        """Run the TaskRunner until it is stopped."""
+        self._external_stop_event = stop_event
+        while not self.is_stopped():
+            self.run_once(frequency)
+
+    def run_batch(self, limit=None, timeout=0):
+        """
+        Run the TaskRunner until it has got no more to do, or limit is reached.
+        timeout -- if no tasks are available immediately, block max timeout seconds
+        limit -- maximum number of tasks to execute (limit >= 1, None means no limit)
+        """
+        if limit <= 0:
+            limit = 1
+        count = 0
+        did_something = True
+        while not self.is_stopped() and did_something:
+            if count == 0:
+                did_something = self.run_once(timeout=timeout)
+            else:
+                did_something = self.run_once(timeout=0)
+            if did_something:
+                count += 1
+                if limit != None and count >= limit:
+                    return
+
+    def is_stopped(self):
+        """Returns true if the TaskRunner has been requested to stop."""
+        if self._internal_stop_event.is_set(): 
+            return True
+        if self._external_stop_event != None:
+            if self._external_stop_event.is_set():
+                return True
+        return False
+
+    def run_once(self, timeout=0):
+        """Run at most a single Task within the TaskRunner."""
+
+        if self.is_stopped(): 
+            return False
+
+        self._lock.acquire()
+
+        # Poll Timeouts
+        if not self._task_queue:
+            d_list = self._delay_task_heap.poll()
+            if d_list: 
+                self._task_queue += d_list
+
+        # Poll File Descriptors
+        if not self._task_queue:
+            # release lock before potentially blocking on select.
+            self._lock.release()
+            try:
+                lists = select.select(self._rd_set, self._wr_set, 
+                                      self._rd_set + self._wr_set, timeout)
+            except select.error:
+                # A bad file descriptor in readset and/or writeset
+                # This could happen if a read/write task is cancelled
+                # from another thread - and then the socket is closed
+                # immediately afterwards. This only-once type of error 
+                # can safely be ignored.
+                # However, if a socket is closed but the task is not cancelled,
+                # this is an error, and the programmer should be signaled.
+                if self._bad_fd == True:
+                    msg = "Read/Write Task with Bad File Descriptor."
+                    raise TaskRunnerError, msg
+                self._bad_fd = True
+                return False
+            else:
+                # Reset bad_fd flag is no error occured.
+                self._bad_fd = False
+
+
+            r_list, w_list, e_list = lists
+            if e_list: 
+                for task in e_list:
+                    print "Error", task
+                    self.cancel_task(task.tid)
+
+            self._lock.acquire()
+            if r_list: 
+                self._task_queue += r_list
+            if w_list: 
+                self._task_queue += w_list
+        
+        # Execute at most one Task
+        if self._task_queue:
+            task = self._task_queue.pop(0)
+            # Release lock before executing a task.
+            self._lock.release() 
+            return task.execute()
+        else:
+            self._lock.release()
+            return False
+        
+    def stop(self):
+        """Requests that the TaskRunner stops itself."""
+        self._internal_stop_event.set()
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    def tick(): 
+        """Tick Task."""
+        print "Tick", time.time()
+
+    def tack():
+        """Tack Task.""" 
+        print "Tack", time.time()
+
+    def tock(): 
+        """Tock Task."""
+        print "Tock", time.time()
+
+    TASK_RUNNER = TaskRunner()
+    TASK_RUNNER.add_periodic_task(1, tick)
+    TASK_RUNNER.add_delay_task(2, tack)
+    TASK_RUNNER.add_periodic_task(3, tock, delay=.5, limit=4)
+    try:
+        TASK_RUNNER.run_forever()
+    except KeyboardInterrupt:
+        pass
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/threadhotel.py b/instrumentation/next-share/BaseLib/UPnP/common/threadhotel.py
new file mode 100644 (file)
index 0000000..e0bca4d
--- /dev/null
@@ -0,0 +1,98 @@
+#############################################################
+# Author: Ingar M. Arntzen
+#############################################################
+
+"""
+This implements a thread hotel where threads block for asynchronous replies 
+after having made an non-blocking request. The thread hotel is typically used
+to build a blocking API on top of a non-blocking.
+"""
+
+import threading
+
+#############################################################
+# THREAD HOTEL
+#############################################################
+
+class ThreadHotel:
+
+    """Threads wait in the Thread Hotel until the reply associated with a given
+    request ID is available."""
+
+    class Room:
+        """Each thread is assigned his own room."""
+        def __init__(self):
+            self._bed = threading.Event()
+            self._reply = None
+            self._status = None
+        def goto_bed(self):
+            """Goto sleep."""
+            self._bed.wait()
+        def wakeup_call(self):
+            """Wake up sleeping thread."""
+            self._bed.set()
+        def get_reply(self):
+            """Get the asynchronous reply."""
+            return (self._status, self._reply)
+        def set_reply(self, status, reply):
+            """Leave the asynchronous reply."""
+            self._reply = reply
+            self._status = status
+
+    def __init__(self):
+        self._rooms = {}
+        self._lock = threading.Lock()
+
+    def _get_room(self, request_id):
+        """Get a room given a request_id. 
+        The request id is assumed to be unique. """
+        if not self._rooms.has_key(request_id):
+            self._rooms[request_id] = ThreadHotel.Room()
+        return self._rooms[request_id]
+        
+    def _leave_room(self, request_id):
+        """Leave room after having been woke up."""
+        del self._rooms[request_id]
+
+    def reserve(self, request_id):
+        """Reserve a room before the asynchronous request is
+        actually carried out."""
+        self._lock.acquire()
+        self._get_room(request_id)
+        self._lock.release()
+
+    def wait_reply(self, request_id):
+        """Wait for a reply given a unique request id."""
+        self._lock.acquire()
+        room = self._get_room(request_id)
+        status, reply = room.get_reply()
+        # Reply is available -> Return immedately
+        if not reply:
+            # Wait
+            self._lock.release()
+            room.goto_bed()            
+            # Wake up
+            self._lock.acquire()
+            status, reply = room.get_reply()
+        # Leave with Reply
+        self._leave_room(request_id)
+        self._lock.release()
+        return status, reply    
+
+    def wakeup(self, request_id, status, reply):
+        """Deliver reply for given request id, thereby waking up
+        sleeping thread."""
+        if self._rooms.has_key(request_id):
+            self._lock.acquire()
+            room = self._rooms[request_id]
+            room.set_reply(status, reply)
+            # Wake up potential visitor.
+            room.wakeup_call()
+            self._lock.release()
+            return True
+        else: return False
+
+    def is_waiting(self, request_id):
+        """Is threre a thread waiting for a given request id?"""
+        return self._rooms.has_key(request_id)
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/upnplogger.py b/instrumentation/next-share/BaseLib/UPnP/common/upnplogger.py
new file mode 100644 (file)
index 0000000..6a47f4d
--- /dev/null
@@ -0,0 +1,24 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""Logging module for UPnP Server and UPnPClient."""
+
+import sys
+
+class _UPnPLogger:
+    """UPnPLogger takes two tags and a msg.
+    First tag denotes the origin - UPnPServer or UPnPClient.
+    Second tag denotes the particular module within the given origin."""
+    def __init__(self):
+        pass
+
+    def log(self, tag1, tag2, msg):
+        """Logs to stderror."""
+        sys.stderr.write(tag1.ljust(12) + tag2.ljust(12) + msg + "\n")
+        sys.stderr.flush()
+
+_INSTANCE = _UPnPLogger()
+
+def get_logger():
+    """Get reference to logger intance."""
+    return _INSTANCE
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/upnpmarshal.py b/instrumentation/next-share/BaseLib/UPnP/common/upnpmarshal.py
new file mode 100644 (file)
index 0000000..43360f9
--- /dev/null
@@ -0,0 +1,139 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements marshaling and unmarshalling 
+between string values in and python objects, in
+accordance with the UPnP specification.
+"""
+
+import types
+import exceptions
+
+class MarshalError(exceptions.Exception): 
+    """
+    Error associated with marshalling and unmarshalling.
+    """
+    pass
+
+##############################################
+# LOADS
+##############################################
+
+def loads(type_, data):
+    """Load string data and return value of given type."""
+    if type_ == types.IntType:
+        return int(data)
+    elif type_ in types.StringTypes:
+        return str(data)
+    elif type_ == types.BooleanType:
+        if data in ['1', 'true', 'True', 'yes']: 
+            return True
+        elif data in ['0', 'false', 'False', 'no']: 
+            return False
+        else : 
+            raise MarshalError, "Loads: Boolean failed %s" % data
+    else: 
+        raise MarshalError, "Loads: Unsupported Type %s" % type_
+
+def loads_data_by_upnp_type(upnp_type_string, data_string):
+    """Loads string data into a python value given a string definition 
+    of the UPnP data type.
+    """
+    if upnp_type_string == 'boolean':
+        if data_string in ['1', 'true', 'True', 'yes']: 
+            return True
+        elif data_string in ['0', 'false', 'False', 'no']: 
+            return False
+    elif upnp_type_string == 'int':
+        return int(data_string)
+    elif upnp_type_string == 'string':
+        return str(data_string)
+    elif upnp_type_string == 'ui1':
+        # Unsigned 1 byte integer
+        return int(data_string) & 0xFF
+    elif upnp_type_string == 'ui2':
+        # Unsigned 2 byte integer
+        return int(data_string) & 0xFFFF
+    elif upnp_type_string == 'ui4':
+        # Unsigned 1 byte integer
+        return int(data_string) & 0xFFFFFFFF
+    else: 
+        raise MarshalError, "Loads: Unsupported Type %s" % upnp_type_string
+
+
+##############################################
+# DUMPS
+##############################################
+
+def dumps_by_upnp_type(upnp_type_string, value):
+    """Dumps python value into a string according to upnp_type_string."""
+    if isinstance(value, types.BooleanType) and upnp_type_string == "boolean":
+        return u'1' if value == True  else u'0'
+    elif isinstance(value, types.StringTypes) and upnp_type_string == 'string':
+        return unicode("<![CDATA[%s]]>" % value)
+    elif isinstance(value, types.IntType) and upnp_type_string == 'ui1':
+        return unicode(value & 0xFF)
+    elif isinstance(value, types.IntType) and upnp_type_string == 'ui2':
+        return unicode(value & 0xFFFF)
+    elif isinstance(value, types.IntType) and upnp_type_string == 'ui4':
+        return unicode(value & 0xFFFFFFFF)
+    elif isinstance(value, types.IntType) and upnp_type_string == 'int':
+        return unicode(value)
+    else: 
+        msg = "Dumps: Unsupported Type %s" % str(value)
+        raise MarshalError, msg
+    
+
+def dumps(value):
+    """Dump typed value to unicode string"""
+    if isinstance(value, types.BooleanType):
+        return u'1' if value == True  else u'0'
+    elif isinstance(value, types.StringTypes):
+        return unicode("<![CDATA[%s]]>" % value)
+    elif isinstance(value, types.IntType):
+        return unicode(value)
+    else: 
+        msg = "Dumps: Unsupported Type %s" % str(value)
+        raise MarshalError, msg
+
+##############################################
+# DATATYPES
+##############################################
+
+def dumps_data_type(python_type):
+    """Converts a python type object to a string,
+    according to UPnP specification."""
+    if python_type == types.BooleanType:
+        return u'boolean'
+    elif python_type == types.IntType:
+        return u'int'
+    elif python_type == types.StringType:
+        return u'string'
+    else: 
+        msg = "Dumps Datatype: Unsupported Type %s" % str(python_type)
+        raise MarshalError, msg
+
+def loads_python_type(type_string):
+    """Converts a UPnP variable type string to a python type object."""
+    if type_string == 'boolean':
+        return types.BooleanType
+    elif type_string in ['int', 'ui1', 'ui2', 'ui4']:
+        return types.IntType
+    elif type_string == u'string':
+        return types.StringType
+    else: 
+        msg = "Loads Datatype: Unsupported Type %s" % type_string
+        raise MarshalError, msg
+
+
+    
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+    pass
+    
+    
diff --git a/instrumentation/next-share/BaseLib/UPnP/common/upnpsoap.py b/instrumentation/next-share/BaseLib/UPnP/common/upnpsoap.py
new file mode 100644 (file)
index 0000000..076069e
--- /dev/null
@@ -0,0 +1,338 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements parsing and creation of soap messages
+specified by the UPnP specification, action requests, action responses,
+error responses and event messages.
+"""
+import xml.dom.minidom as minidom
+
+ERROR_CODES = {
+    401: "Invalid Action",
+    402: "Invalid Args",
+    501: "Action Failed",
+    600: "Argument Value Invalid",
+    601: "Argument Value Out of Range",
+    602: "Optional Action Not Implemented",
+    603: "Out of Memory",
+    604: "Human Intervention Required",
+    605: "String Argument Too Long",
+    606: "Action Not Authorized",
+    607: "Signature Failure",
+    608: "Signature Missing",
+    609: "Not Encrypted",
+    610: "Invalid Sequence",
+    611: "Invalid Control URL",
+    612: "No Such Session",
+}
+
+
+def _get_element_data(element):
+    """Get all data contained in element."""
+    data = ""
+    if element != None:
+        for node in element.childNodes:
+            if node.nodeType == node.TEXT_NODE:
+                data += str(node.data)
+            elif node.nodeType == node.ELEMENT_NODE:
+                data += str(node.toxml())
+            elif node.nodeType == node.CDATA_SECTION_NODE:
+                data += str(node.data)
+    return data
+
+##############################################
+# PARSE ACTION REQUEST
+##############################################
+
+class _ActionRequest:
+
+    """This implements parsing of action requests made by
+    UPnP control points. The soap message corresponds to
+    the body of the HTTP POST request."""
+
+    def __init__(self):
+        self._doc = None
+        self._action_name = ""
+        self._ns = ""
+        self._args = []
+
+    def parse(self, xmldata):
+        """This parses the xmldata and makes the included information
+        easily accessible."""
+        try:
+            doc = minidom.parseString(xmldata.replace('\n', ''))
+            envelope = doc.documentElement
+            body = envelope.firstChild
+            action_element = body.firstChild
+            args = []
+            for arg_element in action_element.childNodes:
+                data = _get_element_data(arg_element)
+                args.append((str(arg_element.tagName), data))
+        except (TypeError, AttributeError):
+            return False
+
+        self._doc = doc
+        self._action_name = action_element.localName
+        self._ns = action_element.namespaceURI
+        self._args = args
+        return True
+        
+    def get_action_name(self):
+        """Retrieve the name of the requested UPnP action."""
+        if self._doc: 
+            return self._action_name
+
+    def get_namespace(self):
+        """Retrieve the namespace of the requested UPnP action,
+        i.e. the service type it refers to."""
+        if self._doc: 
+            return self._ns
+
+    def get_arguments(self):
+        """Retrieve the in-arguments associated with 
+        the UPnP action request."""
+        if self._doc: 
+            return self._args
+
+    def reset(self):
+        """Reset so that a new soap message may be parsed by the
+        same instance."""
+        if self._doc:
+            self._doc.unlink()
+            self._doc = None
+        self._action_name = ""
+        self._ns = ""
+        self._args = []
+
+
+_INSTANCE = _ActionRequest()
+
+def parse_action_request(data):
+    """This function parses the soap xml request and
+    returns three values. The function hides the details of 
+    implementation.
+
+    action -- name of action
+    ns -- namespace (i.e. service type)
+    args -- action in-arguments.
+    """
+    success = _INSTANCE.parse(data)
+    if not success: 
+        return None
+    else:
+        action_name = _INSTANCE.get_action_name()
+        name_space = _INSTANCE.get_namespace()
+        args = _INSTANCE.get_arguments()
+        _INSTANCE.reset()
+        return action_name, name_space, args
+
+
+##############################################
+# CREATE ACTION REQUEST
+##############################################
+
+ACTION_REQUEST_FMT = """<?xml version="1.0"?>
+<s:Envelope 
+s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding" 
+xmlns:s="http://schemas.xmlsoap.org/soap/envelope">
+<s:Body>
+<u:%s 
+xmlns:u="%s">
+%s
+</u:%s>
+</s:Body>
+</s:Envelope>
+"""
+ACTION_REQUEST_FMT = ACTION_REQUEST_FMT.replace('\n', '')
+
+ARG_FMT = "<%s>%s</%s>"
+
+def create_action_request(service_type, action_name, args):
+    """Create action request. Returns string."""
+    data_str = ""
+    for arg_name, arg_value in args:
+        data_str += ARG_FMT % (arg_name, arg_value, arg_name)
+    return ACTION_REQUEST_FMT % (action_name, service_type, 
+                                 data_str, action_name)
+
+
+##############################################
+# CREATE ACTION RESPONSE
+##############################################
+
+ACTION_RESPONSE_FMT = u"""<?xml version="1.0"?>
+<s:Envelope 
+s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding" 
+xmlns:s="http://schemas.xmlsoap.org/soap/envelope">
+<s:Body>
+<u:%sResponse 
+xmlns:u="%s">
+%s
+</u:%sResponse>
+</s:Body>
+</s:Envelope>"""
+
+ACTION_RESPONSE_FMT = ACTION_RESPONSE_FMT.replace('\n', '')
+
+RESULT_FMT = u"<%s>%s</%s>"
+
+def create_action_response(name_space, action_name, result_list):
+    """This function creates a soap xml action response,
+    given three parameters.
+
+    name_space -- namespace (i.e. upnp service type)
+    action_name -- name of upnp action
+    result_list -- list of result values, i.e. (argName, argValue) - tuples
+    """
+    name_space = unicode(name_space)
+    action_name = unicode(action_name)
+    data_str = ""
+    for (result_name, result_value) in result_list:
+        result_name = unicode(result_name)
+        result_value = unicode(result_value)
+        data_str += RESULT_FMT % (result_name, result_value, result_name)
+    return ACTION_RESPONSE_FMT % (action_name, name_space, data_str, action_name)
+
+
+##############################################
+# PARSE ACTION RESPONSE
+##############################################
+def parse_action_response(xmldata):
+    """This parses the xmldata and makes the included information
+    easily accessible."""
+    try:
+        doc = minidom.parseString(xmldata.replace('\n', ''))
+        if doc == None: 
+            return None    
+        envelope_elem = doc.documentElement
+        body_elem = envelope_elem.firstChild
+        action_elem = body_elem.firstChild
+        args = []
+        for arg_elem in action_elem.childNodes:
+            data = _get_element_data(arg_elem)
+            args.append((str(arg_elem.tagName), data))
+    except (TypeError, AttributeError):
+        return None
+
+    result = {}
+    # actionNameResponse
+    result['action_name'] = str(action_elem.localName[:-8])
+    result['service_type'] = str(action_elem.namespaceURI)
+    result['arguments'] = args
+    return result
+
+
+##############################################
+# ERROR RESPONSE
+##############################################
+
+
+ERROR_RESPONSE_FMT = u"""<?xml version="1.0" ?>
+<s:Envelope 
+s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding" 
+xmlns:s="http://schemas.xmlsoap.org/soap/envelope">
+<s:Body>
+<s:Fault>
+<faultcode>s:Client</faultcode>
+<faultstring>UPnPError</faultstring>
+<detail>
+<UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
+<errorCode>%s</errorCode>
+<errorDescription>%s</errorDescription>
+</UPnPError>
+</detail>
+</s:Fault>
+</s:Body>
+</s:Envelope>
+"""
+ERROR_RESPONSE_FMT = ERROR_RESPONSE_FMT.replace('\n', '')
+
+
+def create_error_response(error_code, error_description):
+    """This function creates a soap xml UPnP error response."""
+    error_code = unicode(error_code)
+    error_description = unicode(error_description)
+    return ERROR_RESPONSE_FMT % (error_code, error_description)
+
+
+##############################################
+# CREATE EVENT MESSAGE
+##############################################
+
+_EVENT_MSG_XML_FMT = u"""<?xml version="1.0"?>
+<e:propertyset xmlns:e="urn:schemas-upnp-org:event-1-0">
+%s</e:propertyset>"""
+_EVENT_MSG_PROP_FMT = u"<e:property>\n<%s>%s</%s>\n</e:property>\n"
+
+def create_event_message(variables):
+    """This function creates a soap xml UPnP event message.
+
+    variables -- list of recently update state variables (name, data) tuples.
+    """
+    data_str = ""
+    for name, data in variables:
+        name = unicode(name)
+        data = unicode(data)
+        data_str += _EVENT_MSG_PROP_FMT % (name, data, name)
+    return _EVENT_MSG_XML_FMT % data_str
+
+
+##############################################
+# PARSE EVENT MESSAGE
+##############################################
+
+def parse_event_message(xmldata):
+    """This parses the xmldata and makes the included information
+    easily accessible."""
+    doc = minidom.parseString(xmldata.replace('\n', ''))
+    if doc == None: 
+        return None    
+    property_set_elem = doc.documentElement
+    tuples = []
+    for property_elem in property_set_elem.childNodes:
+        var_elem = property_elem.firstChild
+        data = _get_element_data(var_elem)
+        tuples.append((str(var_elem.tagName), data))
+    return tuples
+
+        
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    REQUEST_XML = """<?xml version="1.0"?>
+<s:Envelope 
+s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" 
+xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
+<s:Body>
+<ns0:SetTarget xmlns:ns0="urn:schemas-upnp-org:service:SwitchPower:1">
+<newTargetValue>True</newTargetValue>
+</ns0:SetTarget>
+</s:Body>
+</s:Envelope>"""
+
+    REQUEST_XML = REQUEST_XML.replace('\n', '')
+    print parse_action_request(REQUEST_XML)
+    print parse_action_request(REQUEST_XML)
+
+
+    SERVICE_TYPE = "urn:schemas-upnp-org:service:ServiceType:1"
+    ACTION_NAME = "GetTarget"
+    RESULT_LIST = [('result1', 'True'), ('result2', '4')]
+    RESPONSE_XML = create_action_response(SERVICE_TYPE, 
+                                          ACTION_NAME, RESULT_LIST)
+    print RESPONSE_XML
+    print create_error_response("501", "Action not implemented")
+
+    VARIABLES = [('var1', 'jalla'), ('var2', 'palla')]
+    EVENT_MSG = create_event_message(VARIABLES)
+    print EVENT_MSG
+    print parse_event_message(EVENT_MSG)
+
+    print create_action_request(SERVICE_TYPE, 
+                                ACTION_NAME, VARIABLES)
+
+    print parse_action_response(RESPONSE_XML)
diff --git a/instrumentation/next-share/BaseLib/UPnP/services/__init__.py b/instrumentation/next-share/BaseLib/UPnP/services/__init__.py
new file mode 100644 (file)
index 0000000..2509378
--- /dev/null
@@ -0,0 +1,5 @@
+"""This package contains implementations of UPnP Services."""
+
+from urlservice import URLService
+from switchpower import SwitchPower
+from bookmarkservice import BookmarkService
diff --git a/instrumentation/next-share/BaseLib/UPnP/services/bookmarkservice.py b/instrumentation/next-share/BaseLib/UPnP/services/bookmarkservice.py
new file mode 100644 (file)
index 0000000..66299bb
--- /dev/null
@@ -0,0 +1,78 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""
+This module implements a Bookmark Service on the local
+network, exported as a UPnP Service.
+"""
+
+import types
+from BaseLib.UPnP.upnpserver import UPnPService
+
+_DEFAULT_SERVICE_ID = "Bookmarks"
+
+_XML_DOC = '<?xml version="1.0"?>\n<bookmarks>\n%s</bookmarks>'
+_XML_ITEM = '<item>%s</item>\n'
+
+
+##############################################
+# BOOKMARK SERVICE
+##############################################
+
+class BookmarkService(UPnPService):
+
+    """
+    This class implements a Bookmark Service.
+    It is exported on the local network as an UPnP Service.
+
+    Essentially the service maintains a list of bookmarks.
+
+    Two actions:
+    - GET(): return the complete list as xml data.
+    - POST() : append a new url to the list
+
+    Events:
+    - UPDATE
+    
+    """
+    
+    def __init__(self, service_id=None, service_version=1):
+        if service_id == None:
+            service_id = _DEFAULT_SERVICE_ID
+        UPnPService.__init__(self, service_id, service_version)
+
+        self._bookmarks = []
+
+        # Define Event Variable
+        self._update_event = self.define_evented_variable("UPDATE",
+                                                          types.BooleanType, False)
+
+        # Define Actions
+        self.define_action(self.get, 
+                           out_args=[("BOOKMARKS", types.StringType)],
+                           name="GET")
+        self.define_action(self.post,
+                           in_args=[("BOOKMARK", types.StringType)],
+                           name="POST")
+
+    def post(self, bookmark):
+        self._bookmarks.append(bookmark)
+        self._on_update()
+
+    def get(self):
+        """Get the xml string representation of the entire list."""
+        items = ""
+        for bookmark in self._bookmarks:
+            items += _XML_ITEM % bookmark
+        return _XML_DOC % items
+
+    def _on_update(self):
+        """
+        Internal method: Toggles the value of update_event, 
+        in order to notify listeners. Used whenever the list of 
+        bookmarks is updated.
+        """
+        if self._update_event.get() == True:
+            self._update_event.set(False)
+        else:
+            self._update_event.set(True)
diff --git a/instrumentation/next-share/BaseLib/UPnP/services/switchpower.py b/instrumentation/next-share/BaseLib/UPnP/services/switchpower.py
new file mode 100644 (file)
index 0000000..2e0ab20
--- /dev/null
@@ -0,0 +1,58 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""This module implements a SwitchPower UPnP Server."""
+
+import types
+from BaseLib.UPnP.upnpserver import UPnPService
+
+##############################################
+# SWITCH POWER SERVICE
+##############################################
+
+class SwitchPower(UPnPService):
+
+    """This class implements a simple SwitchPower service.
+    The supported actions essentially allow clients to switch power
+    on and off on a virtual device."""
+
+    def __init__(self, service_id):
+        UPnPService.__init__(self, service_id, 'SwitchPower', 
+                             service_version=1)
+        boolean = types.BooleanType
+
+        # Define EventVariables
+        self._status = self.define_evented_variable("Status", 
+                                                    boolean, False)
+
+        # Define Actions
+        self.define_action(self.get_status, 
+                           out_args=[("ResultStatus", boolean )],
+                           name="GetStatus")
+        self.define_action(self.get_target, 
+                           out_args=[("RetTargetValue", boolean)],
+                           name="GetTarget")
+        self.define_action(self.set_target, 
+                           in_args=[("NewTargetValue", boolean)],
+                           name="SetTarget")
+
+        # Service State
+        self._target = False
+
+    def get_status(self):
+        """Get the power status of the switch."""
+        self.log("GetStatus %s" % self._status.get())
+        return self._status.get()
+
+    def get_target(self):
+        """Get the target power status for the switch."""
+        self.log("GetTarget %s" % self._target)
+        return self._target
+
+    def set_target(self, new_value):
+        """Set the target power status for the switch.
+        This also sets the status similarly at some point (immediately)."""
+        self.log("SetTarget %s" % new_value)
+        self._target = new_value
+        # Could delay this one
+        self._status.set(new_value)
diff --git a/instrumentation/next-share/BaseLib/UPnP/services/urlservice.py b/instrumentation/next-share/BaseLib/UPnP/services/urlservice.py
new file mode 100644 (file)
index 0000000..c349b45
--- /dev/null
@@ -0,0 +1,42 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""This module implements a URL UPnP Service."""
+
+import types
+from BaseLib.UPnP.upnpserver import UPnPService
+
+DEFAULT_URL = "http://vg.no"
+
+
+##############################################
+# URL SERVICE
+##############################################
+
+class URLService(UPnPService):
+
+    """This class implements a simple URL service."""
+
+    def __init__(self, service_id):
+        UPnPService.__init__(self, service_id, 'URLService', 
+                             service_version=1)
+
+        # Define Evented Variable
+        self._url = self.define_evented_variable("URL", types.StringType, 
+                                                 DEFAULT_URL)
+
+        # Define Actions
+        self.define_action(self.get_url,
+                           out_args=[("URL", types.StringType)],
+                           name="GetURL")
+        self.define_action(self.set_url,
+                           in_args=[("URL", types.StringType)],
+                           name="SetURL")        
+
+    def get_url(self):
+        """Get the URL."""
+        return self._url.get()
+
+    def set_url(self, new_url):
+        """Set the URL."""
+        self._url.set(new_url)
diff --git a/instrumentation/next-share/BaseLib/UPnP/ssdp/__init__.py b/instrumentation/next-share/BaseLib/UPnP/ssdp/__init__.py
new file mode 100644 (file)
index 0000000..3889f11
--- /dev/null
@@ -0,0 +1,4 @@
+"""SSDP package implementing a SSDPServer and a SSDPClient.
+Both SSDPServer and SSDPClient are daemon processes and they
+share some functionality. This is implemented in SSDPDaemon, which
+they both inherit."""
diff --git a/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpclient.py b/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpclient.py
new file mode 100644 (file)
index 0000000..36d08b8
--- /dev/null
@@ -0,0 +1,269 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements the SSDP deamon of a UPnP control point.
+"""
+import time
+import uuid as uuid_module
+import ssdpmessage
+import ssdpdaemon
+
+_LOG_TAG = "SSDPClient"
+
+##############################################
+# SSDP CLIENT DAEMON
+##############################################
+
+class SSDPClient(ssdpdaemon.SSDPDaemon):
+
+    """
+    This implements the SSDP deamon of UPnP control points.
+    
+    This class is implemented in a non-blocking, event-based manner.
+    Execution is outsourced to the given task_runner.
+    """
+
+    def __init__(self, task_runner, logger=None):
+
+        ssdpdaemon.SSDPDaemon.__init__(self, task_runner, logger)
+
+        # Devices
+        self._ssdp_devices = {} # uuid : SSDPDevice
+
+        # Event Handlers
+        self._add_handler = lambda uuid: None
+        self._remove_handler = lambda uuid: None
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+
+    def set_add_handler(self, handler):
+        """Add handler is executed whener a device is added."""
+        self._add_handler = handler
+
+    def set_remove_handler(self, handler):
+        """Remove handler is executed whener a device is removed."""
+        self._remove_handler = handler
+
+    def get_ssdp_device(self, uuid):
+        """Given a uuid, get reference to the local representation of a
+        remote SSDP root device."""
+        return self._ssdp_devices.get(uuid, None)
+
+    def search(self, target="upnp:rootdevice"):
+        """Multicast a SSDP search for root devices on the local network. """
+        msg = ssdpmessage.SearchMessage()
+        msg.init(max_delay=3, st=target)
+        self.multicast(msg)
+
+    def close(self):
+        """Close sockets and cancel tasks."""
+        ssdpdaemon.SSDPDaemon.close(self)
+        for device in self._ssdp_devices.values():
+            device.close()
+
+    ##############################################
+    # OVERRIDE HANDLERS
+    ##############################################
+
+    def startup(self):
+        """Extending Startup by adding Search."""
+        ssdpdaemon.SSDPDaemon.startup(self)
+        self.search()
+
+    def handle_search(self, msg, sock_addr):
+        """Handlers the receipt of a SSDP Search message."""
+        self.log("IGNORE %s from %s" % (msg.type, sock_addr))
+
+    def handle_reply(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Reply message."""
+        self._handle_announce_or_reply(msg, sock_addr)
+
+    def handle_announce(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Announce message."""
+        self._handle_announce_or_reply(msg, sock_addr)
+
+    def _handle_announce_or_reply(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Announce message
+        or a SSDP Reply message."""
+        # uuid
+        tokens = msg.usn.split(":")
+        if len(tokens) != 5:
+            # use only those announce messages that has a specific
+            # structure :
+            # "uuid:<uuid>::upnp:rootdevice"
+            self.log("IGNORE %s [%s]" % (msg.type, sock_addr[0]))
+            return
+        uuid = uuid_module.UUID(tokens[1])
+        # renew
+        if self._ssdp_devices.has_key(uuid):
+            self._renew_device(uuid, msg.max_age)
+        # new
+        else:
+            # target
+            if isinstance(msg, ssdpmessage.ReplyMessage):
+                target = msg.st
+            elif isinstance(msg, ssdpmessage.AnnounceMessage):
+                target = msg.nt
+            ssdp_device = SSDPDevice(self.task_runner,
+                                     uuid, msg.max_age, 
+                                     msg.location, target, 
+                                     msg.osversion, 
+                                     msg.productversion)
+            self._add_device(ssdp_device)
+
+    def handle_unannounce(self, msg, sock_addr):
+        """Handles the receipt of a SSDP UnAnnounce message."""
+        # Handle UnAnnounces for root devices exclusively.
+        # usn = "uuid:73721e4e-0a84-4985-97e2-974b2c50323b"
+        tokens = msg.usn.split(":")
+        if len(tokens) != 2:
+            self.log("IGNORE %s [%s]" % (msg.type, sock_addr[0]))
+            return
+        uuid = uuid_module.UUID(tokens[1])
+        self._remove_device(uuid)
+
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+       
+    def _handle_expiry(self, uuid):
+        """A device has expired, causing it to be removed."""
+        self._remove_device(uuid)
+
+    def _add_device(self, ssdp_device):
+        """Add new SSDP root device."""
+        uuid = ssdp_device.uuid
+        self._ssdp_devices[uuid] = ssdp_device
+        ssdp_device.set_expiry_handler(self._handle_expiry)
+        self.log("ADD [%d] %s" % (ssdp_device.max_age, uuid))
+        # Publish Event ADD
+        self.task_runner.add_task(self._add_handler, 
+                                  args=(uuid,ssdp_device.location))
+
+    def _renew_device(self, uuid, max_age):
+        """Receive announce from already known device."""
+        self._ssdp_devices[uuid].alive(max_age)
+        self.log("ALIVE [%d] %s" % (max_age, uuid))
+        
+    def _remove_device(self, uuid):
+        """Remove device."""
+        if self._ssdp_devices.has_key(uuid):
+            del self._ssdp_devices[uuid]
+            self.log("REMOVE %s" % (uuid))
+            # Publish Event REMOVE            
+            self.task_runner.add_task(self._remove_handler, 
+                                      args=(uuid,))
+
+
+##############################################
+# SSDP DEVICE
+##############################################
+
+class SSDPDevice:
+
+    """This represents a local view of a remote SSDP root device."""
+
+    def __init__(self, task_runner, 
+                 uuid, max_age, location, search_target, 
+                 os_version, product_version):
+
+        self.uuid = uuid
+        self.location = location
+        self.search_target = search_target
+        self.os_version = os_version
+        self.product_version = product_version
+        self.max_age = max_age
+        self.expiry = None
+        self._expired = False
+
+        self._task_runner = task_runner
+        self._expiry_handler = lambda uuid: None
+        self._task = None
+        self._new_timeout(max_age)
+
+    # Private Methods
+
+    def _new_timeout(self, max_age):
+        """Register a new liveness timeout for device."""
+        # Cancel old timeout.
+        if self._task:
+            self._task.cancel()
+        # Update expire
+        self.expiry = time.time() + max_age
+        # Create new timeout
+        self._task = self._task_runner.add_delay_task(
+            max_age, self._handle_timeout)
+        
+    def _handle_timeout(self):
+        """Timeout handler."""
+        self._expired = True
+        self._expiry_handler(self.uuid)
+
+    # Public Methods
+
+    def set_expiry_handler(self, handler):
+        """Set handler to be executed whenever device has been
+        timed out without any signs of liveness."""
+        self._expiry_handler = handler
+
+    def alive(self, max_age):
+        """Invoked whenever a signal is received that 
+        suggests that the remote device is live and well."""
+        self._new_timeout(max_age)
+
+    def is_alive(self):
+        """Check if device is alive (local view)."""
+        return not self._expired
+
+    def close(self):
+        """Cancel timeout task associated with device, if any. """
+        if self._task:
+            self._task.cancel()
+        
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == "__main__":
+    
+
+    class TestClient:
+        """TestClient wraps SSDPClient to add some event handlers."""
+
+        def __init__(self, ssdp_client):
+            self._ssdp_client = ssdp_client
+            self._ssdp_client.set_add_handler(self.add_handler)
+            self._ssdp_client.set_remove_handler(self.remove_handler)
+
+        def add_handler(self, uuid, location):
+            """Executed when device with given uuid has been added."""
+            print "ADD %s %s" % (uuid, location)
+
+        def remove_handler(self, uuid):
+            """Executed when device with given uuid has been removed."""
+            print "REMOVE %s" % uuid
+
+    class MockLogger:
+        """Mockup Logger object."""
+        def __init__(self):
+            pass
+        def log(self, log_tag, msg):
+            """Log to std out."""
+            print log_tag, msg
+
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TR = taskrunner.TaskRunner()
+    CLIENT = SSDPClient(TR, MockLogger())
+    TEST = TestClient(CLIENT)
+    TR.add_task(CLIENT.startup)
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt:
+        print
+        CLIENT.close()
diff --git a/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpdaemon.py b/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpdaemon.py
new file mode 100644 (file)
index 0000000..285208f
--- /dev/null
@@ -0,0 +1,223 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements a base SSDP Deamon, 
+part of the UPnP architecture.
+"""
+import socket
+import struct
+import ssdpmessage
+
+_MCAST_HOST = '239.255.255.250'
+_MCAST_PORT = 1900
+_MCAST_TTL = 4
+_LOG_TAG = "SSDP"
+
+##############################################
+# SSDP DAEMON
+##############################################
+
+class SSDPDaemon:
+
+    """
+    This implements the base SSDP deamon, part of the UPnP architecture.
+    
+    This class is implemented in a non-blocking, event-based manner.
+    Execution is outsourced to the given task_runner.
+    """
+
+    def __init__(self, task_runner, logger=None):
+
+        # TaskRunner
+        self.task_runner = task_runner
+        
+        # Logger
+        self._logger = logger
+        self._log_tag = _LOG_TAG
+
+        # Socket (unicast send/recv + multicast send)
+        self._sock = socket.socket(socket.AF_INET, 
+                                   socket.SOCK_DGRAM)
+        self._sock.setsockopt(socket.SOL_SOCKET, 
+                              socket.SO_REUSEADDR, 1)
+        self._sock.setsockopt(socket.SOL_SOCKET, 
+                              socket.SO_BROADCAST, 1)
+        self._sock.bind(('', 0))
+
+        # Socket Multicast Recv
+        self._mcast_recv_sock = socket.socket(socket.AF_INET, 
+                                              socket.SOCK_DGRAM,
+                                              socket.IPPROTO_UDP)
+
+        self._mcast_recv_sock.setsockopt(socket.SOL_SOCKET, 
+                                         socket.SO_REUSEADDR, 1)
+        try:
+            self._mcast_recv_sock.setsockopt(socket.SOL_SOCKET, 
+                                             socket.SO_REUSEPORT, 1)
+        except AttributeError:
+            pass # Some systems don't support SO_REUSEPORT
+
+        mreq = struct.pack("4sl", socket.inet_aton(_MCAST_HOST), 
+                           socket.INADDR_ANY)
+        self._mcast_recv_sock.setsockopt(socket.IPPROTO_IP, 
+                                         socket.IP_ADD_MEMBERSHIP, mreq)
+        self._mcast_recv_sock.setsockopt(socket.IPPROTO_IP, 
+                                         socket.IP_MULTICAST_TTL, _MCAST_TTL)
+        self._mcast_recv_sock.setsockopt(socket.SOL_IP, 
+                                         socket.IP_MULTICAST_TTL, _MCAST_TTL)
+        self._mcast_recv_sock.setsockopt(socket.SOL_IP, 
+                                         socket.IP_MULTICAST_LOOP, True)
+
+        self._mcast_recv_sock.bind(('', _MCAST_PORT))
+
+        # Host & Port
+        self._host = socket.gethostbyname(socket.gethostname())
+        self._port = self._sock.getsockname()[1]
+
+
+        # Register Tasks for Execution
+        self._rd_task_1 = self.task_runner.add_read_task(self._sock.fileno(), 
+                                                 self._handle_unicast)
+        self._rd_task_2 = self.task_runner.add_read_task(
+            self._mcast_recv_sock.fileno(), 
+            self._handle_multicast)
+
+    ##############################################
+    # PUBLIC PROTOCOL OPERATIONS
+    ##############################################
+
+    def startup(self):
+        """Startup"""
+        fmt = "START Port %d and Port %d (Recv Local Multicast)"
+        self.log( fmt % (self._port, _MCAST_PORT))
+
+    def log(self, msg):
+        """Logger object."""
+        if self._logger:
+            self._logger.log(self._log_tag, msg)
+
+    def multicast(self, msg):
+        """Multicast a SSDP message."""
+        if self._sock != None:
+            self.log("MULTICAST %s" % msg.type)
+            self._sock.sendto(msg.dumps(), (_MCAST_HOST, _MCAST_PORT))
+
+    def unicast(self, msg, sock_addr):
+        """Unicast a SSDP message."""
+        if self._sock != None:
+            self.log("UNICAST %s to %s" % (msg.type, str(sock_addr)))
+            self._sock.sendto(msg.dumps(), sock_addr)
+
+    def get_sock(self):
+        """Return unicast receive socket."""
+        return self._sock
+    
+    def get_mcast_sock(self):
+        """Return multicast receive socket."""
+        return self._mcast_recv_sock
+
+    def get_port(self):
+        """Return SSDP port."""
+        return self._port
+
+    def is_closed(self):
+        """Returns true if SSDPDaemon has been closed."""
+        return True if self._sock == None else False
+
+    def close(self):
+        """Close the SSDP deamon."""
+        self._rd_task_1.cancel()
+        self._rd_task_2.cancel()
+        if self._mcast_recv_sock:
+            self._mcast_recv_sock.close()
+            self._mcast_recv_sock = None
+        if self._sock:
+            self._sock.close()
+            self._sock = None
+        self.log("CLOSE")
+
+    ##############################################
+    # MESSAGE HANDLERS
+    ##############################################
+
+    def _handle_multicast(self):
+        """Handles the receipt of a multicast SSDP message."""
+        res = self._mcast_recv_sock.recvfrom(1500)
+        if res:
+            data, sock_addr = res
+            # Ignore Multicast Messages from Self
+            if sock_addr != (self._host, self._port): 
+                self._handle_message(data, sock_addr)
+
+    def _handle_unicast(self):
+        """Handles the receipt of a unicast SSDP message."""
+        res = self._sock.recvfrom(1500)
+        if res:
+            data, sock_addr = res
+            self._handle_message(data, sock_addr)
+
+    def _handle_message(self, data, sock_addr):
+        """Handles the receipt of both multicast and unicast SSDP 
+        messages."""
+        try:
+            msg = ssdpmessage.message_loader(data)
+        except Exception, error: 
+            print "Exception Handle Message %s\n%s\n" % (error, data)
+            raise
+
+        # Handle Message
+        if isinstance(msg, ssdpmessage.SearchMessage):
+            self.handle_search(msg, sock_addr)
+        elif isinstance(msg, ssdpmessage.AnnounceMessage):
+            self.handle_announce(msg, sock_addr)
+        elif isinstance(msg, ssdpmessage.UnAnnounceMessage):
+            self.handle_unannounce(msg, sock_addr)
+        elif isinstance(msg, ssdpmessage.ReplyMessage):
+            self.handle_reply(msg, sock_addr)
+
+    def handle_search(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Search message.
+        To be overridden by subclass."""
+        pass        
+
+    def handle_reply(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Reply message.
+        To be overridden by subclass."""
+        pass
+
+    def handle_announce(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Announce message.
+        To be overridden by subclass."""
+        pass
+
+    def handle_unannounce(self, msg, sock_addr):
+        """Handles the receipt of a SSDP UnAnnounce message.
+        To be overridden by subclass."""
+        pass
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TR = taskrunner.TaskRunner()
+
+    class _MockLogger:
+        """Mock Logger object."""
+        def log(self, log_tag, msg):
+            """Log to std out. """
+            print log_tag, msg
+
+    DAEMON = SSDPDaemon(TR, _MockLogger())
+    TR.add_task(DAEMON.startup)
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt:
+        print
+    TR.stop()
+    DAEMON.close()
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpmessage.py b/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpmessage.py
new file mode 100644 (file)
index 0000000..07fd5c6
--- /dev/null
@@ -0,0 +1,277 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements the creation and parsing of messages
+required by the SSDP protocol, part of the UPnP architecture.
+"""
+
+##############################################
+# UNPN MESSAGE LOADER
+##############################################
+
+# Message Loader
+def message_loader(data):
+    """
+    Inflates a python SSDP message object from given string. 
+    """
+    header = data.split('\r\n\r\n')[0]
+    lines = header.split("\r\n")
+
+    # Put Header Elements in a Map
+    map_ = {}
+    for line in lines[1:]:
+        if len(line.strip()) > 0:
+            elem_name, elem_value = line.split(":", 1)
+            map_[elem_name.strip().lower()] = elem_value.strip()
+    
+    # Message Type
+    lowercase_firstline = lines[0].lower()
+    if lowercase_firstline.startswith('notify'):
+        if map_['nts'].find("ssdp:alive") >= 0:
+            msg = AnnounceMessage()
+        elif map_['nts'].find("ssdp:byebye") >= 0:
+            msg = UnAnnounceMessage()
+        else:
+            raise Exception, "Unrecognized Message Type"
+
+    elif lowercase_firstline.startswith('http/1.1 200 ok'):        
+        msg = ReplyMessage()
+    elif lowercase_firstline.startswith('m-search'):
+        msg = SearchMessage()
+    else:
+        raise Exception, "Unrecognized Message Type"
+
+    msg.loads(map_)
+    
+    return msg
+
+
+##############################################
+# SSDP MESSAGES
+##############################################
+
+class SearchMessage:
+
+    """This implements a SSDP Search message."""
+
+    FMT  = """M-SEARCH * HTTP/1.1\r
+Host: 239.255.255.250:1900\r
+MAN: ssdp:discover\r
+MX: %(max_delay)d\r
+ST: %(st)s\r\n\r\n"""
+
+    def __init__(self):
+        self.type = "SSDP:Search"
+        self.max_delay = 10
+        self.st = "ssdp:all"
+    
+    def init(self, max_delay=10, st="ssdp:all"):
+        """Initialise"""
+        self.max_delay = max_delay
+        self.st = st
+
+    def loads(self, hdr_elements):
+        """Inflate SSDP message from string."""
+        self.max_delay = int(hdr_elements['mx'])
+        self.st = hdr_elements['st']
+
+    def dumps(self):
+        """Dump SSDP message to string."""
+        return SearchMessage.FMT % self.__dict__
+
+    def __str__(self):
+        return self.dumps()
+
+
+
+class ReplyMessage:
+
+    """This implements a SSDP Reply message."""
+
+    FMT = """HTTP/1.1 200 OK\r
+Cache-Control: max-age=%(max_age)d\r
+EXT: \r
+Location: %(location)s\r
+Server: %(osversion)s UPnP/1.0 %(productversion)s\r
+ST: %(st)s\r
+USN: %(usn)s\r\n\r\n"""
+
+    def __init__(self):
+        self.type = "SSDP:Reply"        
+        self.max_age = 1800
+        self.location = ""
+        self.st = ""
+        self.osversion = ""
+        self.productversion = ""
+        self.usn = ""
+
+    def init(self, max_age=1800, location="", st="", 
+             osversion="", productversion="", usn=""):
+        """Initialise"""
+        self.max_age = max_age
+        self.location = location
+        self.st = st
+        self.osversion = osversion
+        self.productversion = productversion
+        self.usn = usn
+
+    def dumps(self):
+        """Dump SSDP message to string."""
+        return ReplyMessage.FMT % self.__dict__
+
+    def loads(self, hdr_elements):
+        """Inflate SSDP message from string."""
+        value = hdr_elements['cache-control'].split("=", 1)[1]
+        self.max_age = int(value)
+        self.location = hdr_elements['location']
+        delimiter = " UPnP/1.0 "
+        server = hdr_elements['server']
+        offset = server.find(delimiter)        
+        self.st = hdr_elements['st']
+        self.osversion = server[:offset]
+        self.productversion = server[offset + len(delimiter):]
+        self.usn = hdr_elements['usn']
+
+    def __str__(self):
+        return self.dumps()
+
+
+
+class AnnounceMessage:
+
+    """This implements a SSDP Announce message."""
+
+    FMT = """NOTIFY * HTTP/1.1\r
+Host: 239.255.255.250:1900\r
+Cache-Control: max-age=%(max_age)d\r
+Location: %(location)s\r
+Server: %(osversion)s UPnP/1.0 %(productversion)s\r
+NTS: ssdp:alive\r
+NT: %(nt)s\r
+USN: %(usn)s\r\n\r\n"""
+
+    def __init__(self):
+        self.type = "SSDP:Announce"        
+        self.max_age = 1800
+        self.location = ""
+        self.nt = ""
+        self.osversion = ""
+        self.productversion = ""
+        self.usn = ""
+
+    def init(self, max_age=1800, location="", nt="", 
+             osversion="", productversion="", usn=""):
+        """Initialise"""
+        self.max_age = max_age
+        self.location = location
+        self.nt = nt
+        self.osversion = osversion
+        self.productversion = productversion
+        self.usn = usn        
+
+    def dumps(self):
+        """Dump SSDP message to string."""
+        return AnnounceMessage.FMT % self.__dict__
+        
+    def loads(self, hdr_elements):
+        """Inflate SSDP message from string."""
+        value = hdr_elements['cache-control'].split("=", 1)[1]
+        self.max_age = int(value)
+        self.location = hdr_elements['location']
+        delimiter = " UPnP/1.0 "
+        server = hdr_elements['server']
+        offset = server.find(delimiter)        
+        self.osversion = server[:offset]
+        self.productversion = server[offset + len(delimiter) :]
+        self.usn = hdr_elements['usn']
+        self.nt = hdr_elements['nt']
+
+    def __str__(self):
+        return self.dumps()
+
+
+
+
+class UnAnnounceMessage:
+    
+    """This implements a SSDP UnAnnounce message."""
+
+    FMT = """NOTIFY * HTTP/1.1\r
+Host: 239.255.255.250:1900\r
+NTS: ssdp:byebye\r
+NT: %(nt)s\r
+USN: %(usn)s\r\n\r\n"""
+
+    def __init__(self):
+        self.type = "SSDP:UnAnnounce"        
+        self.nt = ""
+        self.usn = ""
+
+    def init(self, nt="", usn=""):
+        """Initialise"""
+        self.nt = nt
+        self.usn = usn
+
+    def dumps(self):
+        """Dump SSDP message to string."""
+        return UnAnnounceMessage.FMT % self.__dict__
+
+    def loads(self, hdr_elements):
+        """Inflate SSDP message from string."""
+        self.nt = hdr_elements['nt']
+        self.usn = hdr_elements['usn']
+
+    def __str__(self):
+        return self.dumps()
+
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == "__main__":
+
+    # Test SearchMessage
+    SEARCH_MSG = SearchMessage()
+    SEARCH_MSG.init(10)
+    S = SEARCH_MSG.dumps()
+    print S
+    print message_loader(S)
+
+
+    # Test ReplyMessage
+    REPLY_MSG = ReplyMessage()
+    REPLY_MSG.init(location="http://10.0.0.138:80/IGD.xml", 
+                  osversion="SpeedTouch 510 4.0.0.9.0",
+                  productversion="DG233B00011961",
+                  usn="uuid:UPnP-SpeedTouch510::urn:schemas-upnp-org:" \
+                       + "service:WANPPPConnection:1")    
+    S = REPLY_MSG.dumps()
+    print S
+    print message_loader(S)
+
+    # Test AnnounceMessage
+    ANNOUNCE_MSG = AnnounceMessage()
+    ANNOUNCE_MSG.init(location="http://10.0.0.138:80/IGD.xml", 
+                     osversion="SpeedTouch 510 4.0.0.9.0",
+                     productversion="(DG233B00011961)",
+                     usn="uuid:UPnP-SpeedTouch510::urn:schemas-upnp-org:" \
+                          + "service:WANPPPConnection:1",
+                     nt="urn:schemas-upnp-org:service:WANPPPConnection:1")
+    S = ANNOUNCE_MSG.dumps()
+    print S
+    print message_loader(S)
+
+
+    # Test UnAnnounceMessage
+    UNANNOUNCE_MSG = UnAnnounceMessage()
+    USN = "uuid:UPnP-SpeedTouch510::urn:schemas-upnp-org:" \
+        + "service:WANPPPConnection:1"
+    NT = "urn:schemas-upnp-org:service:WANPPPConnection:1"
+    UNANNOUNCE_MSG.init( usn=USN, nt=NT)
+                     
+    S = UNANNOUNCE_MSG.dumps()
+    print S
+    print message_loader(S)
diff --git a/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpserver.py b/instrumentation/next-share/BaseLib/UPnP/ssdp/ssdpserver.py
new file mode 100644 (file)
index 0000000..48ffe5f
--- /dev/null
@@ -0,0 +1,283 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements the SSDP Server deamon, 
+part of the UPnP archictecture.
+"""
+import ssdpmessage
+import ssdpdaemon
+
+# SSDP Message Configuration
+
+_ROOT_CONF_TEMPL =  { 
+    'target':  "upnp:rootdevice", 
+    'usn': "uuid:%(uuid)s::upnp:rootdevice",
+    }
+
+_DEVICE_CONF_TEMPL_1 = {
+    'target': "uuid:%(uuid)s",
+    'usn': "uuid:%(uuid)s",
+    }
+
+_DEVICE_CONF_TEMPL_2 = {
+    'target':  "urn:%(device_domain)s:device:" + \
+        "%(device_type)s:%(device_version)s", 
+    'usn': "uuid:%(uuid)s::urn:%(device_domain)s:device:" + \
+        "%(device_type)s:%(device_version)s",
+    }
+
+_SERVICE_CONF_TEMPL = {
+    'target':  "urn:schemas-upnp-org:service:" + \
+        "%(service_type)s:%(service_version)s", 
+    'usn': "uuid:%(uuid)s::urn:schemas-upnp-org:service:" + \
+        "%(service_type)s:%(service_version)s",
+    }
+
+_MAX_DELAY = 4
+_MAX_AGE = 1800
+_REANNOUNCE_AGE = _MAX_AGE * 0.9
+_SSDP_SERVER_CONFIG = {
+    'max_delay': _MAX_DELAY,
+    'max_age': _MAX_AGE,
+    }
+
+_LOG_TAG = "SSDPServer"
+
+
+def _create_msg_config(config_template, kwargs):
+    """Create a single message config dict from a template and 
+    some keywords."""
+    return {
+        'target': config_template['target'] % kwargs,
+        'usn': config_template['usn'] % kwargs
+        }
+
+def _create_msg_configs(root_device):
+    """Create all message configs for all devices and services."""
+    # Expect rootdevice to be the root of a dictionary hierarchy,
+    # representing the nested organisation of devices and services.
+    # Create 1 special message config for root device
+    configs = [_create_msg_config(_ROOT_CONF_TEMPL, root_device.__dict__)]
+    # Walk the device/service hierarchy (incl. rootdevice)
+    device_queue = [root_device]
+    while device_queue:
+        device = device_queue.pop()
+        # Iterate recursively over all devices (top-down/breath-first)        
+        device_queue += device.get_devices()
+        # Create two messages configs per device
+        conf_1 = _create_msg_config(_DEVICE_CONF_TEMPL_1, device.__dict__)
+        conf_2 = _create_msg_config(_DEVICE_CONF_TEMPL_2, device.__dict__)
+        configs += [conf_1, conf_2]
+        # Create one message config per service in device
+        # todo : should really only create one message per service type
+        for service in device.get_services():
+            service.uuid = device.uuid
+            conf = _create_msg_config(_SERVICE_CONF_TEMPL, service.__dict__)
+            configs.append(conf)
+    return configs
+
+
+def _initialise_message(ssdp_config, msg):
+    """Utility method for initialising SSDP messages with common data."""
+    msg.init (
+        location=ssdp_config['location'], 
+        osversion=ssdp_config['osversion'],
+        productversion = ssdp_config['productversion'],
+        max_age=ssdp_config['max_age']
+        )
+    return msg
+
+
+##############################################
+# SSDP SERVER
+##############################################
+
+class SSDPServer(ssdpdaemon.SSDPDaemon):
+
+    """
+    This implements the SSDP server deamon, part of the UPnP architecture.
+    
+    This class is implemented in a non-blocking, event-based manner.
+    Execution is outsourced to the given task_runner.
+    """
+
+    def __init__(self, task_runner, logger=None):
+        ssdpdaemon.SSDPDaemon.__init__(self, task_runner, logger)
+        # Service Manager
+        self._sm = None
+        # Announce Timeout Task
+        self._timeout_task = None
+
+        self._root_device = None
+        self._config = None
+
+    def set_service_manager(self, service_manager):
+        """The service manger initialises SSDPServer with a
+        reference to itself."""
+        self._sm = service_manager
+
+    ##############################################
+    # PRIVATE PROTOCOL OPERATIONS
+    ##############################################
+
+    def startup(self):
+        """Extends superclass startup  when taskrunner starts."""
+        ssdpdaemon.SSDPDaemon.startup(self)
+        # RootDevice
+        self._root_device = self._sm.get_root_device()
+        # Config
+        self._config = _SSDP_SERVER_CONFIG
+        self._config['location'] = self._sm.get_description_url()
+        self._config['osversion'] = self._sm.get_os_version()
+        self._config['productversion'] = self._sm.get_product_version()
+        # Initial Announce
+        self.announce()
+
+    ##############################################
+    # OVERRIDE HANDLERS 
+    ##############################################
+
+    def handle_search(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Search message."""
+
+        # Create Reply Message
+        reply_message = ssdpmessage.ReplyMessage()
+        _initialise_message(self._config, reply_message)
+
+        # Decide the number of reply messages, and their configuration.
+
+        if msg.st == "ssdp:all":
+            # Reply messages for all devices and services
+            configs = _create_msg_configs(self._root_device) 
+        elif msg.st == "upnp:rootdevice":
+            # Reply only single special message for root device
+            configs = [_create_msg_config(_ROOT_CONF_TEMPL, \
+                                          self._root_device.__dict__)]
+        else:
+            device_type = msg.st.split(':')[-2]
+            self.log("IGNORE %s %s [%s]" % (msg.type, 
+                                            device_type, sock_addr[0]))
+            return
+        
+        device_type = msg.st.split(':')[-2]
+        self.log("RECEIVE %s %s [%s]" % (msg.type, 
+                                         device_type, sock_addr[0]))
+
+        # Send Replies
+        for conf in configs:
+            reply_message.st = conf['target']
+            reply_message.usn = conf['usn']
+            self.unicast(reply_message, sock_addr)
+
+    def _handle_reply(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Reply message."""
+        self.log("IGNORE %s from %s" % (msg.type, sock_addr))
+
+    def _handle_announce(self, msg, sock_addr):
+        """Handles the receipt of a SSDP Announce message."""
+
+        self.log("IGNORE %s from %s" % (msg.type, sock_addr))
+
+    def _handle_unannounce(self, msg, sock_addr):
+        """Handles the receipt of a SSDP UnAnnounce message."""
+        self.log("IGNORE %s from %s" % (msg.type, sock_addr))
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+        
+    def announce(self):
+        """Multicast SSDP announce messages."""
+        # Reset timeout task for next announce
+        if self._timeout_task:
+            self._timeout_task.cancel()
+        # Register new announce timeout
+        self._timeout_task = self.task_runner.add_delay_task(
+            _REANNOUNCE_AGE, 
+            self.announce
+            )
+
+        msg = ssdpmessage.AnnounceMessage()
+        _initialise_message(self._config, msg)
+        for conf in _create_msg_configs(self._root_device):
+            msg.nt = conf['target']
+            msg.usn = conf['usn']
+            self.multicast(msg)
+
+    def unannounce(self):
+        """Multicast SSDP unannounce messages."""
+        msg = ssdpmessage.UnAnnounceMessage()
+        for conf in _create_msg_configs(self._root_device):
+            msg.nt = conf['target']
+            msg.usn = conf['usn']
+            self.multicast(msg)
+               
+    def close(self):
+        """Close the SSDP Server deamon. Send unannounce messages."""
+        self.unannounce()
+        ssdpdaemon.SSDPDaemon.close(self)
+       
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    import uuid
+    class MockRootDevice:
+        """Mockup root device."""
+        def __init__(self):
+            self.uuid = uuid.uuid1()
+            self.device_domain = "schemas-upnp-org"
+            self.device_type = "Basic"
+            self.device_version = 1
+        def get_devices(self):
+            """Get mock devices."""
+            return []
+        def get_services(self):
+            """Get mock services."""
+            return []
+
+    class MockServiceManager:
+        """Mock up service manager."""
+        def __init__(self):
+            pass
+        def get_root_device(self):
+            """Get mock root device."""
+            return MockRootDevice()
+        def get_description_url(self):
+            """Get mock description URL."""
+            return "http://192.168.1.235:44444/description.xml"
+        def get_os_version(self):
+            """Get mock os version."""
+            return "linux 1.0"
+        def get_product_version(self):
+            """Get mock product version."""
+            return "product 1.0"
+        def set_ssdp_port(self, port):
+            """Set mock Port."""
+            pass
+
+    class MockLogger:
+        """MockLogger object."""
+        def __init__(self):
+            pass
+        def log(self, log_tag, msg):
+            """Log to std out."""
+            print log_tag, msg
+
+    
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TR = taskrunner.TaskRunner()
+    SM = MockServiceManager()
+    SERVER = SSDPServer(TR, logger=MockLogger())
+    SERVER.set_service_manager(SM)
+    TR.add_task(SERVER.startup)
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt:
+        print
+    TR.stop()
+    SERVER.close()
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpclient/__init__.py b/instrumentation/next-share/BaseLib/UPnP/upnpclient/__init__.py
new file mode 100644 (file)
index 0000000..2ad293a
--- /dev/null
@@ -0,0 +1,3 @@
+"""UPnPClient package."""
+
+from upnpclient import UPnPClient
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpclient/httpserver.py b/instrumentation/next-share/BaseLib/UPnP/upnpclient/httpserver.py
new file mode 100644 (file)
index 0000000..d150497
--- /dev/null
@@ -0,0 +1,104 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""This module implements a HTTP Server for the UPnP Client 
+(UPnP Control Point)."""
+
+import urlparse
+import socket
+import uuid
+import BaseLib.UPnP.common.upnpsoap as upnpsoap
+import BaseLib.UPnP.common.asynchHTTPserver as httpserver
+
+##############################################
+# REQUEST HANDLER
+##############################################
+
+class _RequestHandler(httpserver.AsynchHTTPRequestHandler):
+    """HTTP Request Handler for UPnP Client."""
+
+    def do_NOTIFY(self):
+        """Respond to NOTIFY request."""
+        url = urlparse.urlparse(self.path)
+        path = url.path.strip('/')
+        tokens = path.split('/')
+        if len(tokens) == 2: 
+
+            # Parse Request
+            device_uuid = uuid.UUID(tokens[0])
+            service_id = tokens[1]
+
+            # Sid
+            sid = None
+            if self.headers.has_key('sid'):
+                tokens =  self.headers['sid'].split(':')
+                if len(tokens) == 2:
+                    sid = uuid.UUID(tokens[1])
+
+            # Seq                
+            seq = int(self.headers.get('seq', '-1'))
+
+            # Body
+            body_bytes = int(self.headers.get('content-length', '0'))
+            body = self.rfile.read(body_bytes) 
+            var_list = upnpsoap.parse_event_message(body)
+
+            # Process Notification
+            self.server.handle_notification(device_uuid, service_id, 
+                                            sid, seq, var_list)
+
+            # Log
+            msg = "NOTIFY %s [%s]" % (service_id, self.client_address[0])
+            self.server.log(msg)
+
+            # Send Response
+            try:
+                self.send_response(200)
+                self.end_headers()
+                self.request.close()
+            except socket.error:
+                pass
+
+        else:
+            try:
+                self.send_response(500)
+                self.end_headers()
+                self.request.close()
+            except socket.error:
+                pass
+
+
+
+##############################################
+# HTTP SERVER
+##############################################
+
+_HTTP_PORT = 44445
+
+class HTTPServer(httpserver.AsynchHTTPServer):
+    """HTTP Server for the UPnP Client."""
+
+    def __init__(self, upnp_client, task_runner, logger=None):
+
+        httpserver.AsynchHTTPServer.__init__(self, task_runner,
+                                             _HTTP_PORT,
+                                             _RequestHandler, logger=logger)
+
+        self._upnp_client = upnp_client
+        self._base_event_url = "http://%s:%d/" % (self.get_host(), 
+                                                  self.get_port())
+
+
+    def startup(self):
+        """Extending Startup."""
+        httpserver.AsynchHTTPServer.startup(self)
+        self.log("URL %s" % self._base_event_url)
+
+    def handle_notification(self, device_uuid, service_id, sid, seq, var_list):
+        """Notification forwarded to UPnPClient."""
+        self._upnp_client.handle_notification(device_uuid, service_id, 
+                                               sid, seq, var_list)
+
+    def get_base_event_url(self):
+        """Get base event URL."""
+        return self._base_event_url
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpclient.py b/instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpclient.py
new file mode 100644 (file)
index 0000000..7a2f13d
--- /dev/null
@@ -0,0 +1,479 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""UPnP Client (Control Point).""" 
+
+
+import urlparse
+import BaseLib.UPnP.ssdp.ssdpclient as ssdpclient
+import BaseLib.UPnP.common.asynchHTTPclient as httpclient
+import xmldescriptionparser as xmlparser
+import upnpservicestub
+import httpserver
+
+_HTTP_GET_REQUEST_FMT = """GET %s HTTP/1.1\r
+HOST: %s:%s\r
+ContentLength: 0\r\n\r\n"""
+
+_HTTP_200_RESPONSE = "HTTP/1.1 200 OK"
+
+_LOG_TAG = "UPnPClient"
+
+class _Logger:
+    """Internal Logger presented to internal modules."""
+    def __init__(self, logger):
+        self._logger = logger
+    def log(self, log_tag, msg):
+        """
+        UPnPClient logtag is atted to log info from 
+        internal modules."""
+        if self._logger:
+            self._logger.log(_LOG_TAG, log_tag, msg)
+
+##############################################
+# SERVICE STUB CACHE
+##############################################
+
+class ServiceStubCache:
+
+    """
+    A ServiceStub is uniquely defined by device_uuid and
+    service_id. This cash holds a list of unique service stub instances. 
+    """
+
+    def __init__(self):
+        self._stubs = []
+
+    def exists(self, device_uuid, service_id):
+        """Returns true if given stub identified by device_uuid and service_id
+        is available in the cache."""
+        return True if self._get_index(device_uuid, service_id) > -1 else False
+
+    def lookup(self, device_uuid, service_id):
+        """Lookup ServiceStub in Cache."""
+        index = self._get_index(device_uuid, service_id)
+        if index == None:
+            return None
+        else:
+            return self._stubs[index]
+
+    def insert(self, stub):
+        """
+        Insert Service Stub in place if Service Stub 
+        already exists in cache.
+        """
+        index = self._get_index(stub.get_device_uuid(), stub.get_service_id())
+        if index == None:
+            self._stubs.append(stub)
+        else:
+            self._stubs.remove(index)
+            self._stubs.insert(index, stub)
+
+    def remove(self, device_uuid, service_id):
+        """Remove Service Stub from cache."""
+        index = self._get_index(device_uuid, service_id)
+        if index != None:
+            self._stubs.remove(index)
+
+    def _get_index(self, device_uuid, service_id):
+        """
+        Get the index of Service Stub with matching 
+        device_uuid and service_id.
+        If none exists return None. Else return index.
+        """
+        for i in range(len(self._stubs)):
+            if self._stubs[i].get_device_uuid() != device_uuid: 
+                continue
+            if self._stubs[i].get_service_id() != service_id: 
+                continue
+            return i
+        return None
+            
+
+
+##############################################
+# UPNP CLIENT
+##############################################
+
+class UPnPClient:
+    
+    """UPnP Client (Control Point) keeps an update view of the local network,
+    in terms of visible UPnP devices and services. UPnP client also provides 
+    stub implementeation for remote services, through which actions and events
+    are communicated."""
+    
+    def __init__(self, task_runner, logger=None):
+
+        # Logging
+        self.logger = _Logger(logger)
+
+        # Task Runner
+        self.task_runner = task_runner
+
+        # HTTP Server
+        self._https = httpserver.HTTPServer(self, task_runner, 
+                                            logger=self.logger)
+
+        # SSDP Client
+        self._ssdpc = ssdpclient.SSDPClient(task_runner, logger=self.logger)
+        self._ssdpc.set_add_handler(self._handle_ssdpc_add_device)
+        self._ssdpc.set_remove_handler(self._handle_ssdpc_remove_device)
+
+        # Non-blocking HTTP Client
+        self._asynch_httpc = httpclient.AsynchHTTPClient(task_runner)
+
+        # Blocking HTTP Client
+        self.synch_httpc = httpclient.SynchHTTPClient(self._asynch_httpc)
+
+        # Pending Non-blocking HTTP Requests
+        self._pending = {} # rid: uuid
+
+        # UPnPDevice Specifications
+        self._device_map = {} # uuid:{}
+
+        # Service Stubs (cache)
+        self._stub_cache = ServiceStubCache()
+
+        # Startup by TaskRunner
+        self.task_runner.add_task(self.startup)
+    
+    def startup(self):
+        """Startup UPnP Client, by starting internal modules http server and
+        ssdpclient."""
+        self._https.startup()
+        self._ssdpc.startup()
+
+    def search(self):
+        """Submit a new search for devices. Non-blocking."""
+        self.task_runner.add_task(self._ssdpc.search)
+        
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+    def get_base_event_url(self):
+        """Get URL where notifications from remote services will be accepted."""
+        return self._https.get_base_event_url()
+
+    def get_device_uuids(self):
+        """Get uuids of all known devices."""
+        return self._device_map.keys()
+
+    def get_service_types(self):
+        """Get list of unique service types of live services discovered by the 
+        UPnPClient."""
+        list_ = []
+        for d_uuid, s_id, s_type in self._get_all_services():
+            if not s_type in list_:
+                list_.append(s_type)
+        return list_
+
+    def get_service_ids(self):
+        """Get all service ids of live services discovered by the UPnPClient."""
+        list_ = []
+        for d_uuid, s_id, s_type in self._get_all_services():
+            if not s_id in list_:
+                list_.append(s_id)
+        return list_
+    
+    def get_device(self, uuid):
+        """Given uuid.UUID return device representation (dictionary) - 
+        if such a device has been discovered."""
+        return self._device_map.get(uuid, None)
+
+    def get_services_by_type(self, service_type):
+        """Get all service stubs of live services, given service type."""
+        # TODO : By using the non-blocking HTTPClient the
+        # underlying http requests could be made in paralell.
+        stub_list = []
+        for d_uuid, s_id, s_type in self._get_all_services():
+            if s_type == service_type:
+                stub = self._get_service_stub(d_uuid, s_id)
+                if stub:
+                    stub_list.append(stub)
+        return stub_list
+            
+    def get_services_by_short_id(self, short_service_id):
+        """Get all service stubs of live services, given short service id."""
+        service_id = "urn:upnp-org:serviceId:" + short_service_id
+        return  self.get_services_by_id(service_id)
+
+    def get_services_by_id(self, service_id):
+        """Get all service stubs of live services, given service id."""
+        # TODO : By using the non-blocking HTTPClient the
+        # underlying http requests could be made in paralell.
+        # Alternatively, do the common thing first and then only
+        # if that fails, do the uncommon thing.
+        stub_list = []
+        for d_uuid, s_id, s_type in self._get_all_services():
+            if s_id == service_id:
+                stub = self._get_service_stub(d_uuid, s_id)
+                if stub:
+                    stub_list.append(stub)
+        return stub_list
+
+    def get_service(self, device_uuid, service_id):
+        """Get service stub uniquely defined by device_uuid 
+        (uuid.UUID) and full service_id"""
+        return self._get_service_stub(device_uuid, service_id)
+
+    def close(self):
+        """Close UPnPClient."""
+        self._https.close()
+        self._ssdpc.close()
+        self._asynch_httpc.close()
+
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+
+    def _get_all_services(self):
+        """Return all services know to UPnPClient. Return tuples of
+        device_uuid, service_id, service_type."""
+        tuples = []
+        for device in self._device_map.values():            
+            for service in device['services']:
+                tuples.append((device['uuid'], service['serviceId'], 
+                               service['serviceType']))
+        return tuples
+
+    def _get_cached_stub(self, device_uuid, service_id):
+        """Get service stub instance from cache."""
+        # Check Device Map
+        if not self._device_map.has_key(device_uuid):
+            return None
+        # Check Cache
+        return self._stub_cache.lookup(device_uuid, service_id)
+
+    def _get_device_and_service(self, device_uuid, service_id):
+        """Get device description (dictionary) and service description
+        (dictionary)."""
+        # Check Device Map
+        if not self._device_map.has_key(device_uuid):
+            return None, None
+        # Check if Device has Service with given service_id
+        service = None
+        device = self._device_map[device_uuid]
+        for service_dict in device['services']:
+            if service_dict['serviceId'] == service_id:
+                service = service_dict
+                break
+        return device, service
+        
+    def _get_service_stub(self, device_uuid, service_id):
+        """Get service stub. If necessary, download service
+        description, parse it and instantiate service stub."""
+        # Check Cache
+        stub = self._get_cached_stub(device_uuid, service_id)
+        if stub != None:
+            return stub
+
+        # Get device description and service description from device description
+        device, service = self._get_device_and_service(device_uuid, service_id)
+        if service == None:
+            return None
+
+        # Fetch Service Description and build ServiceStub (Blocking)
+        url = urlparse.urlparse(service['SCPDURL'])
+        http_request = _HTTP_GET_REQUEST_FMT % (url.path, 
+                                                url.hostname, url.port)
+        status, reply = self.synch_httpc.request(url.hostname, 
+                                                  url.port, http_request)
+        xml_data = ""
+        if status == httpclient.SynchHTTPClient.OK:
+            header, xml_data = reply
+            if not header[:len(_HTTP_200_RESPONSE)] == _HTTP_200_RESPONSE:
+                return None
+        elif status == httpclient.SynchHTTPClient.FAIL:
+            return None
+
+        # Parse XML Data.
+        service_spec = xmlparser.parse_service_description(xml_data)
+
+        # Create Service Stub
+        stub = upnpservicestub.UPnPServiceStub(self, device, 
+                                               service, service_spec)
+        self._stub_cache.insert(stub)
+        return stub
+
+
+    ##############################################
+    # PRIVATE HANDLERS
+    ##############################################
+
+    def _handle_ssdpc_add_device(self, uuid, location):
+        """A new device has been added by the SSDP client."""
+         # Check Location
+        url = urlparse.urlparse(location)
+        if (url.hostname == None): 
+            return
+        if (url.port == None): 
+            return        
+        # Dispatch Request Device Description
+        # The UPnP specification specifies that path is sent
+        # in the first line of the request header, as opposed 
+        # to the full location. Still, at least one 3'rd party 
+        # implementation expects the full location. Therefore 
+        # we send two requests to be sure. 
+        # TODO: only send the second if the first fails.
+        request_1 = _HTTP_GET_REQUEST_FMT % (url.path, url.hostname, url.port)
+        request_2 = _HTTP_GET_REQUEST_FMT % (location, url.hostname, url.port)
+        rid_1 = self._asynch_httpc.get_request_id()
+        rid_2 = self._asynch_httpc.get_request_id()
+
+        self._asynch_httpc.request(rid_1, url.hostname, url.port, 
+                                   request_1, self._handle_httpc_abort, 
+                                   self._handle_httpc_response)
+        self._asynch_httpc.request(rid_2, url.hostname, url.port, 
+                                   request_2, self._handle_httpc_abort, 
+                                   self._handle_httpc_response)
+
+        # Pending
+        self._pending[rid_1] = (uuid, location)
+        self._pending[rid_2] = (uuid, location)
+
+    def _handle_ssdpc_remove_device(self, uuid):
+        """A device has been removed by the SSDP."""
+       # Check if a request happens to be pending
+        found_rids = []
+        for rid, (uuid_, loc) in self._pending.items():
+            if uuid_ == uuid: 
+                found_rids.append(rid)
+        if found_rids:
+            for rid in found_rids:
+                del self._pending[rid]
+        # Remove from deviceMap
+        if self._device_map.has_key(uuid):
+            del self._device_map[uuid]
+
+    def _handle_httpc_response(self, rid, header, body):
+        """A http response is received by the 
+        asynchronous http client."""
+        uuid, location = self._pending[rid]
+        del self._pending[rid]
+
+        if self._device_map.has_key(uuid):
+            # Second response
+            return
+
+        # Check 200 OK
+        if header[:len(_HTTP_200_RESPONSE)] == _HTTP_200_RESPONSE:
+            device = xmlparser.parse_device_description(body, location)
+            # Check that announce uuid matches uuid from device description
+            if (uuid == device['uuid']):        
+                self._device_map[uuid] = device
+
+    def _handle_httpc_abort(self, rid, error, msg):
+        """The asynchronous http client reports the abort of a http request. """
+        del self._pending[rid]
+
+    def handle_notification(self, device_uuid, service_id, sid, seq, var_list):
+        """Httpserver delivers an event notification. UPnP delegates it to
+        the appropriate stub."""
+        stub = self._get_cached_stub(device_uuid, service_id)
+        stub.notify(sid, seq, var_list)
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == "__main__":
+
+    import BaseLib.UPnP.common.upnplogger as upnplogger
+    LOGGER = upnplogger.get_logger()
+    
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TR = taskrunner.TaskRunner()
+    CLIENT = UPnPClient(TR, logger=LOGGER)
+    
+    import threading
+    import time
+    import exceptions
+    import traceback
+
+    def test_reflection(client):
+        """Test reflection-like api."""
+        for uuid in client.get_all_device_uuids():
+            print client.get_device(uuid)
+        print client.get_all_service_types()
+        print client.get_all_service_ids()
+
+    def test_service_id(client):
+        """Test service_id api."""
+        list_ = []
+        list_ += client.get_services_by_short_id("URLService")
+        list_ += client.get_services_by_short_id("MySwitchPower")
+        list_ += client.get_services_by_short_id("Dimming:1")
+        list_ += client.get_services_by_short_id("SwitchPower:1")
+        return list_
+
+    def test_service_type(client):
+        """Test service_type api."""
+        list_ = []
+        type_1 = "urn:schemas-upnp-org:service:URLService:1"
+        type_2 = "urn:schemas-upnp-org:service:SwitchPower:1"
+        type_3 = "urn:schemas-upnp-org:service:Dimming:1"
+        list_ += client.get_services_by_type(type_1)
+        list_ += client.get_services_by_type(type_2)
+        list_ += client.get_services_by_type(type_3)
+        return list_
+
+    def print_stub_list(list_):
+        """Print the given list of stubs."""
+        for stub in list_:
+            for sv_name in stub.get_sv_names():
+                print stub.get_sv_def(sv_name)
+            for action_name in stub.get_action_names():
+                print stub.get_action_def(action_name)
+
+    def event_handler(sv_name, seq, obj):
+        """Simple event handler."""
+        LOGGER.log("TEST", "", "Event %s %d %s" % (sv_name, seq, str(obj)))
+
+    def test_swp_action(client):
+        """Test SwitchPower action api."""
+        services = client.get_services_by_short_id("MySwitchPower")
+        #services = client.get_services_by_short_id("SwitchPower:1")
+        if not services: 
+            return
+        swp_service = services[0]
+
+        swp_service.subscribe(event_handler)
+        swp_service.renew()
+        swp_service.action("SetTarget", [True])
+        swp_service.action("GetStatus")
+        swp_service.unsubscribe(event_handler)
+
+    class Test:
+        """Tester."""
+        def __init__(self, client):
+            self.client = client
+
+        def run(self):
+            """Run testeer."""
+            LOGGER.log("TEST", "", "Start")
+            time.sleep(4)
+            #test_reflection(self.client)
+            #stub_list = test_service_id(self.client)
+            #stub_list = test_service_type(self.client)
+            #print_stub_list(stub_list)
+            test_swp_action(self.client)
+            time.sleep(4)
+            LOGGER.log("TEST", "", "Stop")
+
+
+    TEST = Test(CLIENT)
+    THREAD = threading.Thread(target=TEST.run)
+    THREAD.setDaemon(True)
+    THREAD.start()
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt, w:
+        print
+    except exceptions.Exception, e:
+        traceback.print_exc()
+    CLIENT.close()
+    TR.stop()
+    THREAD.join()
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpclientconsole.py b/instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpclientconsole.py
new file mode 100644 (file)
index 0000000..35d2126
--- /dev/null
@@ -0,0 +1,68 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements a console interface to a UPnP Client.
+"""
+
+##############################################
+# UPNP CLIENT CONSOLE
+##############################################
+
+from upnpclient import UPnPClient
+from BaseLib.UPnP.common.objectconsole import ObjectConsole
+from BaseLib.UPnP.common.taskrunner import TaskRunner
+
+class UPnPClientConsole:
+
+    """This class wraps ObjectConsole to implement a 
+    custom console for UPnP Client (Control Point)."""
+
+    def __init__(self):
+
+        # Task Runner
+        self._task_runner = TaskRunner()
+
+        # UPnP Client      
+        self._client = UPnPClient(self._task_runner)
+        
+        # Console Namespace
+        name_space = {}
+        name_space['get_device_uuids'] = self._client.get_device_uuids
+        name_space['get_service_types'] = self._client.get_service_types
+        name_space['get_service_ids'] = self._client.get_service_ids
+        name_space['get_device'] = self._client.get_device
+        name_space['get_services_by_type'] = self._client.get_services_by_type
+        name_space['get_services_by_id'] = self._client.get_services_by_id
+        name_space['get_services_by_short_id'] = \
+            self._client.get_services_by_short_id
+        name_space['get_service'] = self._client.get_service
+        name_space['search'] = self._client.search
+
+        self._console = ObjectConsole(self, name_space, 
+                                      run="_run", 
+                                      stop="_stop", name="UPnP Client")
+
+    def _run(self):
+        """Run the TaskRunner."""
+        self._task_runner.run_forever()
+
+    def _stop(self):
+        """Stup UPnPClient and TaskRunner."""
+        self._client.close()
+        self._task_runner.stop()
+
+    def run(self):
+        """Runs the UPnP Console."""
+        self._console.run()
+
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    UPnPClientConsole().run()
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpservicestub.py b/instrumentation/next-share/BaseLib/UPnP/upnpclient/upnpservicestub.py
new file mode 100644 (file)
index 0000000..9e46696
--- /dev/null
@@ -0,0 +1,601 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""UPnP Service Stub implements a local stub for a remote UPnPService. """
+
+import time
+import exceptions
+import types
+import urlparse
+import uuid
+import BaseLib.UPnP.common.upnpmarshal as upnpmarshal
+import BaseLib.UPnP.common.upnpsoap as upnpsoap
+import BaseLib.UPnP.common.asynchHTTPclient as httpclient
+
+_HTTP_200_RESPONSE = "HTTP/1.1 200 OK"
+
+def _parse_sub_response_header(response_header):
+    """Parse Subscription response header. Response header is
+    assumed to be a string."""
+    if response_header == None:
+        return None
+    lines = response_header.split("\r\n")
+    if lines[0] != _HTTP_200_RESPONSE:
+        return None
+    header_map = {}
+    for line in lines[1:]:
+        if len(line.strip()) > 0:
+            elem_name, elem_value = line.split(":", 1)
+            header_map[elem_name.strip().lower()] = elem_value.strip()    
+    duration = _get_duration(header_map)
+    sid = _get_sid(header_map)
+    return sid, duration
+
+def _get_duration(header_map):
+    """
+    Get the Subscription duration (int) from 
+    header_map (dictionary).
+    """
+    if header_map.has_key('timeout'):
+        duration = header_map['timeout'].split('-')[-1]
+        if duration == 'infinite': 
+            return 0
+        else: 
+            return int(duration)
+    else : 
+        return None
+
+def _get_sid(header_map):
+    """
+    Get the Subscription ID (uuid.UUID) from 
+    header_map (dictionary).
+    """
+    if header_map.has_key('sid'):
+        return uuid.UUID(header_map['sid'].split(':')[-1])
+    else : 
+        return None
+
+
+##############################################
+# ACTION REQUEST HTTP HEADER
+##############################################
+
+_ACTION_REQUEST_HDR_FMT = """POST %s HTTP/1.1\r
+HOST: %s:%d\r
+Content-Length: %d\r
+Content-Type: text/xml; charset="utf-8"\r
+SOAPACTION: "%s#%s"\r\n\r\n"""
+
+def _get_action_request_hdr(url, length, service_type, action_name):
+    """Return action request http header as string."""
+    return _ACTION_REQUEST_HDR_FMT % (url.path, 
+                                      url.hostname, 
+                                      url.port, length, 
+                                      service_type, 
+                                      action_name)
+
+##############################################
+# SUBSCRIBE REQUEST HTTP HEADER
+##############################################
+
+_SUBSCRIBE_REQUEST_HDR_FMT = """SUBSCRIBE %s HTTP/1.1\r
+HOST: %s:%d\r
+CALLBACK: <%s>\r
+NT: upnp:event\r
+TIMEOUT: Second-%d\r\n\r\n"""
+
+def _get_subscription_request_hdr(url, callback_url, seconds):
+    """Return subscription request http header as string."""
+    return _SUBSCRIBE_REQUEST_HDR_FMT % (url.path, url.hostname, 
+                                         url.port, callback_url, seconds)
+
+
+##############################################
+# RENEW REQUEST HTTP HEADER
+##############################################
+
+_RENEW_REQUEST_HDR_FMT = """SUBSCRIBE %s HTTP/1.1\r
+HOST: %s:%d\r
+SID: uuid:%s\r
+TIMEOUT: Second-%d\r\n\r\n"""
+
+def _get_renew_request_hdr(url, sid, seconds):
+    """Return subscription renewal request http header as string."""
+    return _RENEW_REQUEST_HDR_FMT % (url.path, url.hostname, 
+                                     url.port, sid, seconds)
+
+
+##############################################
+# UNSUBSCRIBE REQUEST HTTP HEADER
+##############################################
+
+_UNSUBSCRIBE_REQUEST_HDR_FMT = """UNSUBSCRIBE %s HTTP/1.1\r
+HOST: %s:%d\r
+SID: uuid:%s\r\n\r\n"""
+
+def _get_unsubscribe_request_hdr(url, sid):
+    """Return unsubscribe request http header as string."""
+    return _UNSUBSCRIBE_REQUEST_HDR_FMT % (url.path, 
+                                           url.hostname, url.port, sid)
+
+
+
+##############################################
+# ACTION ERROR
+##############################################
+
+class ActionError (exceptions.Exception): 
+    """Error associated with invoking actions on a remote UPnP 
+    Service. """
+    pass
+
+
+##############################################
+# ACTION WRAPPER
+##############################################
+
+class ActionWrapper:
+    """
+    Convenience wrapper of action invokations.
+    This allows the actions to appear as named methods
+    of the ServiceStub.
+    res = stub.action_name(parameters)
+    """
+    def __init__(self, stub, action_name):
+        self._stub = stub
+        self._action_name = action_name
+
+    def __call__(self, *args):
+        """Callable object refers to action method of stub."""
+        res = self._stub.action(self._action_name, list(args))
+        if res == None:
+            raise ActionError, "Some Error"
+        elif len(res) == 0:
+            return None
+        elif len(res) == 1:
+            return res[0]
+        else:
+            return tuple(res)
+
+##############################################
+# SUBSCRIPTION
+##############################################
+
+class Subscription:
+    """
+    A Service Stub may hold a single subscription for events
+    from the remote service. This is the local representation
+    of the state of that subscription."""
+    def __init__(self):
+        self._sid = None
+        self._expiry = None
+
+    def is_valid(self):
+        """Is subscription currently valid?"""
+        if self._sid != None and self._expiry != None:
+            if self._expiry < time.time():
+                return True
+        return False
+
+    def cancel(self):
+        """Cancel subscription."""
+        self._sid = None
+        self._expiry = None
+
+    def set(self, sid, duration):
+        """Update subscription by setting new sid/duration."""
+        self._sid = sid
+        self._expiry = time.time() + duration
+
+    def get_sid(self):
+        """Get Subscription ID."""
+        return self._sid
+
+    def get_expiry(self):
+        """Get expiry timestamp."""
+        return self._expiry
+
+
+##############################################
+# EVENT DEF
+##############################################
+
+class EventDef:
+    """Event Definition."""
+    def __init__(self, service_stub, sv_def):
+        self._sv_def = sv_def
+        self._service_stub = service_stub
+
+    FMT = "EventDef: %s -> %s\n\tPyType(%s), UPnPType(%s)"
+    def __str__(self):
+        """String representation."""
+        return EventDef.FMT % (
+            self._service_stub.get_short_service_id(),
+            self._sv_def['name'], self._sv_def['pyType'], 
+            self._sv_def['upnpType'])
+        
+
+##############################################
+# SV DEF
+##############################################
+
+class SvDef:
+
+    """State Variable Definition."""
+    def __init__(self, service_stub, sv_def):
+        self._sv_def = sv_def
+        self._service_stub = service_stub
+
+    def get_name(self):
+        """Get name of State Variable."""
+        return self._sv_def['name']
+
+    def get_upnp_type(self):
+        """"Get UPnP type of State Variable (string)."""
+        return self._sv_def['upnpType']
+
+    def get_python_type(self):
+        """Get python type<object> of State Variable."""
+        return self._sv_def['pyType']
+
+    def get_default_value(self):
+        """Get default value of State Variable."""
+        return self._sv_def['defaultValue']
+
+    def is_evented(self):
+        """Return true if State Variable is evented."""
+        return self._sv_def['sendEvents']
+
+    FMT = "SvDef: %s -> %s\n\tPyType(%s), "
+    FMT += "UPnPType(%s), Default(%s), Evented(%s)\n"
+    def __str__(self):
+        """String representation."""
+        return SvDef.FMT % (self._service_stub.get_short_service_id(),
+                            self.get_name(), self.get_python_type(), 
+                            self.get_upnp_type(), str(self.get_default_value()),
+                            str(self.is_evented()))
+
+
+##############################################
+# ACTION DEF
+##############################################
+
+class ActionDef:
+    """Action Definition. Referenset to input arguments and output
+    results."""
+    
+    def __init__(self, service_stub, action_def):
+        self._action_def = action_def
+        self._service_stub = service_stub
+
+    def get_name(self):
+        """Get name of Action Definition."""
+        return self._action_def['name']
+
+    def get_inargs(self):
+        """Get list of input arguments of Action Definition. Tuples of
+        (name, pyType and upnpType)."""
+        return [(arg['name'], arg['rsv']['pyType'], arg['rsv']['upnpType'])\
+                    for arg in self._action_def['inargs']]
+
+    def get_outargs(self):
+        """Get list of result arguments of Action Definition. Tuples of
+        (name, pyType and upnpType)."""
+        return [(arg['name'], arg['rsv']['pyType'], arg['rsv']['upnpType']) \
+                    for arg in self._action_def['outargs']]
+
+    FMT = "ActionDef: %s -> %s\n\tInArgs: %s\n\tOutArgs: %s\n" 
+    def __str__(self):
+        """String representation of Action Definition."""
+        return ActionDef.FMT % \
+            (self._service_stub.get_short_service_id(), 
+             self.get_name(), self.get_inargs(), self.get_outargs())
+
+
+##############################################
+# UPNP SERVICE STUB
+##############################################
+
+class UPnPServiceStub:
+
+    """UPnPServiceStub is a stub that allows easy interaction with 
+    remote UPnPService."""
+
+    def __init__(self, upnp_client, device, service, service_spec):
+        # Device is dictionary containing device description
+        # originating from xml device description.
+        # Service is dictionary containing specifig service specification 
+        # originating from xml device description.
+        # Service Spec is dictionary containing service specification
+        # from xml service description.
+        self._upnp_client = upnp_client
+        self._synch_httpc = upnp_client.synch_httpc
+        self._device = device
+        self._service = service
+        self._sv_def_map = {} # name: svdef
+        self._action_def_map = {} # actionName: actiondef
+        self._subscription = Subscription()
+        self._base_callback_url = self._upnp_client.get_base_event_url()
+        self._notify_handlers = []
+
+        for sv_spec in service_spec['stateVariables']:
+            self._define_state_variable(sv_spec)
+
+        for action_spec in service_spec['actions']:
+            self._define_action(action_spec)
+
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+
+    def _define_state_variable(self, sv_spec):
+        """Define a state variable for the stub. Called as a result of parsing
+        the xml service description."""
+        try:
+            sv_def = {}
+            sv_def['name'] = name = sv_spec['name']
+            sv_def['upnpType'] = upnp_type = str(sv_spec['dataType'])
+            sv_def['pyType'] = upnpmarshal.loads_python_type(upnp_type)
+            dvalue = sv_spec['defaultValue']
+            if  dvalue != None:
+                dvalue = upnpmarshal.loads_data_by_upnp_type(upnp_type, dvalue)
+            sv_def['defaultValue'] = dvalue
+            sv_def['sendEvents'] = upnpmarshal.loads(types.BooleanType, 
+                                                     sv_spec['sendEvents'])
+            self._sv_def_map[name] = sv_def    
+        except upnpmarshal.MarshalError, why:
+            print why
+            return
+    
+    def _define_action(self, action_spec): 
+        """Define an action for the stub. Called as a result of parsing
+        the xml service description."""
+        # Set references to sv_def
+        for arg_spec in action_spec['inargs'] + action_spec['outargs']:
+            stv = self._sv_def_map[arg_spec['rsv']]
+            arg_spec['rsv'] = stv
+        self._action_def_map[action_spec['name']] = action_spec
+
+    def _create_action_request(self, action_name, inargs):
+        """Build action request as string given name and input arguments."""
+        # Check inargs
+        if not self._action_def_map.has_key(action_name):
+            return None, None
+        action_def = self._action_def_map[action_name]
+        if not len(inargs) == len(action_def['inargs']):
+            return None, None
+
+        # Convert inargs from python objects to (name, value) strings 
+        args = [] # (name, data)
+        for i in range(len(inargs)):
+            inargdef = action_def['inargs'][i]
+            stv = inargdef['rsv']
+            # Dump python value according to upnptype.
+            data = upnpmarshal.dumps_by_upnp_type(stv['upnpType'], inargs[i])
+            args.append((inargdef['name'], data))
+
+        # Create HTTP SOAP Request
+        service_type = self.get_service_type()
+        xmldata = upnpsoap.create_action_request(
+            service_type, action_name, args)
+        if xmldata == None: 
+            return None, None
+        url = urlparse.urlparse(self._service['controlURL'])        
+        header = _get_action_request_hdr(url, len(xmldata), 
+                                         service_type, action_name)
+        return action_def, header + xmldata
+
+    def _http_request(self, url, http_request):
+        """Blocking HTTP Request. Return HTTP Response (header, body)."""
+        status, reply = self._synch_httpc.request(url.hostname, 
+                                                  url.port, http_request)
+        if status == httpclient.SynchHTTPClient.OK:
+            header, body = reply
+            if header[:len(_HTTP_200_RESPONSE)] == _HTTP_200_RESPONSE:
+                return header, body
+            else: return None, None
+        elif status == httpclient.SynchHTTPClient.FAIL:
+            return None, None
+    
+    def _parse_action_response(self, action_def, xml_data):
+        """Parse xml action response response. Return out arguments."""
+
+        # Parse Response XML
+        dictionary = upnpsoap.parse_action_response(xml_data)
+
+        # Check Response
+        if dictionary['service_type'] != self.get_service_type():
+            return None
+        if dictionary['action_name'] != action_def['name']:
+            return None        
+        if len(dictionary['arguments']) != len(action_def['outargs']):
+            return None
+
+        # Convert result arguments (name, data) to python objects.
+        outargs = []
+        for i in range(len(action_def['outargs'])):
+            outargdef = action_def['outargs'][i]
+            stv = outargdef['rsv']
+            if outargdef['name'] != dictionary['arguments'][i][0]:
+                return None
+            data = dictionary['arguments'][i][1]
+            obj = upnpmarshal.loads_data_by_upnp_type(
+                stv['upnpType'], data)
+            outargs.append(obj)
+        return outargs
+
+    def notify(self, sid, seq, var_list):
+        """Event notification delivered by UPnPClient."""
+        for var_name, data in var_list:
+            if self._sv_def_map.has_key(var_name):
+                stv = self._sv_def_map[var_name]
+                obj = upnpmarshal.loads_data_by_upnp_type(
+                    stv['upnpType'], data)
+                for handler in self._notify_handlers:
+                    handler(var_name, int(seq), obj)
+
+    def _subscribe(self, opname, seconds=1800):
+        """Common logic for Subscribe, Renew and Unsubscribe."""
+        url = urlparse.urlparse(self._service['eventSubURL'])
+        # Create Request
+        if opname == "subscribe":
+            request = _get_subscription_request_hdr(url, 
+                                                    self.get_callback_url(), 
+                                                    seconds)
+        elif opname == "renew":
+            request = _get_renew_request_hdr(url, 
+                                             self._subscription.get_sid(), 
+                                             seconds)
+        elif opname == "unsubscribe":
+            request = _get_unsubscribe_request_hdr(url, 
+                                                   self._subscription.get_sid())
+        response_header = self._http_request(url, request)[0]
+        # Parse Response
+        res = _parse_sub_response_header(response_header)
+        if res == None:
+            return False
+        if opname == "unsubscribe":
+            self._subscription.cancel()
+            return True
+        elif opname in ["subscribe", "renew"]:
+            sid, duration = res
+            if sid != None and duration != None:
+                self._subscription.set(sid, duration)
+                return True
+            else: 
+                return False
+        return False
+
+
+
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+
+    def get_service_id(self):
+        """Return full service id of remote UPnPService."""
+        return self._service['serviceId']
+
+    def get_short_service_id(self):
+        """Return short service id of remote UPnPService."""
+        return  self._service['serviceId'].split(":", 3)[3]
+
+    def get_service_type(self):
+        """Return service type of remote UPnPService."""
+        return self._service['serviceType']
+
+    def get_device_uuid(self):
+        """Return uuid of parent device of remote UPnPService."""
+        return self._device['uuid']
+
+    def get_action_names(self):
+        """Return action names supported by remote UPnPService."""
+        return self._action_def_map.keys()
+
+    def get_callback_url(self):
+        """Get the URL where UPnPClient expects event notifications
+        to be delivered.(String)"""
+        return self._base_callback_url + "%s/%s" % (self.get_device_uuid(), 
+                                                    self.get_service_id())
+
+    def get_sv_names(self):
+        """Get names of state variables. (List of Strings)"""
+        return self._sv_def_map.keys()
+
+    def get_event_names(self):
+        """Get names of evented state variables."""
+        names = []
+        for sv_def in self._sv_def_map.values():
+            if sv_def['sendEvents']:
+                names.append(sv_def['name'])
+        return names
+
+    def get_event_def(self, event_name):
+        """Get Event Definition given name."""
+        if event_name in self.get_event_names():
+            return EventDef(self, self._sv_def_map[event_name])
+
+    def get_sv_def(self, sv_name):
+        """Get state variable definition given name."""
+        if self._sv_def_map.has_key(sv_name):
+            return SvDef(self, self._sv_def_map[sv_name])
+
+    def get_action_def(self, action_name):
+        """Get action definition given name. """
+        if self._action_def_map.has_key(action_name):            
+            return ActionDef(self, self._action_def_map[action_name])
+
+    def get_action(self, action_name):
+        """Get callable action object given name."""
+        if action_name in self.get_action_names():
+            return ActionWrapper(self, action_name)
+
+    def action(self, action_name, inargs=None):
+        """Invoked to carry out action against remote UPnPService. Blocking."""
+        if inargs == None:
+            inargs = []
+        action_def, request = self._create_action_request(action_name, inargs)
+        if request == None:
+            return None
+        url = urlparse.urlparse(self._service['controlURL'])
+        body = self._http_request(url, request)[1]
+        if body == None:
+            return None
+        outargs = self._parse_action_response(action_def, body)
+        if outargs == None:
+            return None
+        self.log("Action %s %s %s" % (action_def['name'], inargs, outargs))
+        return  outargs
+
+    def log(self, msg):
+        """Utility log object."""
+        msg = "%s %s" % (self.get_short_service_id(), msg)
+        self._upnp_client.logger.log("SERVICE",  msg)
+
+    def subscribe(self, handler):
+        """Subscribe to events from remote UPnPService. Blocking."""
+        if handler in self._notify_handlers:
+            return True
+        else:
+            self._notify_handlers.append(handler)
+        # Check validity
+        if self._subscription.is_valid():
+            return True
+        else:
+            # Remote Subscribe
+            res = self._subscribe("subscribe", seconds=1800)
+            if res:
+                self.log("Subscribe " + self.get_short_service_id())
+            return res
+
+    def renew(self):
+        """Renew subscription to events from remote UPnPService. Blocking."""
+        res = self._subscribe("renew", seconds=1800)
+        if res:
+            self.log("Renew " + self.get_short_service_id())
+        return res
+
+    def unsubscribe(self, handler):
+        """Unsubscribe from remote UPnPService. Blocking. """
+        if handler in self._notify_handlers:
+            self._notify_handlers.remove(handler)
+        if len(self._notify_handlers) == 0:
+            # Remove Unsubscribe
+            res = self._subscribe("unsubscribe")
+            if res:
+                self.log("UnSubscribe " + self.get_short_service_id())
+            return res
+        else:
+            return True
+
+    def __getattr__(self, action_name):
+        """Return callable action object when stub object is qualified with
+        action name.
+        res = stub.action_name(input_params)
+        """
+        if action_name in self.get_action_names():
+            return ActionWrapper(self, action_name)
+        else :
+            raise AttributeError, action_name
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpclient/xmldescriptionparser.py b/instrumentation/next-share/BaseLib/UPnP/upnpclient/xmldescriptionparser.py
new file mode 100644 (file)
index 0000000..bb5c112
--- /dev/null
@@ -0,0 +1,353 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements parsing of UPnP Device Descriptions
+and UPnP Service Descriptions.
+"""
+import xml.dom.minidom as minidom
+import urlparse
+import uuid
+
+##############################################
+#  PRIVATE UTILITY FUNCTIONS
+##############################################
+
+def _get_subelement_data(element, subelement_tagname):
+    """Parse the data given an element and the tagName of an
+    assumed subelement of that element."""
+    subelement = _get_subelement(element, subelement_tagname)
+    if not subelement : 
+        return None
+    else : 
+        return _get_element_data(subelement)
+
+def _get_subelement(element, subelement_tagname):
+    """Get an element, given a parent element and a subelement tagname."""
+    subelements = element.getElementsByTagName(subelement_tagname)
+    if not subelements: 
+        return None
+    else : 
+        return subelements[0]
+
+def _get_element_data(element):
+    """Parse the data of the given element."""
+    text = ""
+    for node in element.childNodes:
+        if node.nodeType == node.TEXT_NODE:
+            text += node.data
+        if node.nodeType == node.CDATA_SECTION_NODE:
+            text += node.data
+    return str(text)
+
+def _get_absolute_url(base_url_string, url_string):
+    """Construct absolute URL from an absolute base URL and a relative URL."""
+    if base_url_string == None: 
+        # Try to use only url_string
+        if _is_absolute_url(url_string):
+            return url_string
+        else: return None
+    else:
+        return urlparse.urljoin(base_url_string, url_string)
+
+def _is_absolute_url(url_string):
+    """Determine whether given URL is absolute or not."""
+    url = urlparse.urlparse(url_string)
+    ret = True
+    if url.scheme != "http": 
+        ret = False
+    if url.port == None: 
+        ret = False
+    if len(url.netloc) == 0 : 
+        ret = False
+    return ret
+
+
+
+##############################################
+#  PUBLIC PARSERS
+##############################################
+
+def parse_device_description(xml_description, base_url):
+    """Parse device description. Return dictionary."""
+    ddp = _DeviceDescriptionParser()
+    return ddp.parse(xml_description, base_url)
+
+def parse_service_description(xml_description):
+    """Parse service description. Return dictionary."""
+    sdp = _ServiceDescriptionParser()
+    return sdp.parse(xml_description)
+
+##############################################
+#  DEVICE DESCRIPTION PARSER
+##############################################
+
+class _DeviceDescriptionParser:
+    """
+    This class implements parsing of the xml description of a 
+    upnp device (rootdevice).
+    
+    Does not parse sub-devices.
+    """
+    def __init__(self):
+        pass
+
+    def parse(self, xmldata, base_url):
+        """
+        This method parses the xml description of a upnp device (rootdevice).
+        -> Input is device description xml-data.
+        <- Output is a dictionary with all relevant information.
+        """
+        try:
+            doc = minidom.parseString(xmldata)
+        except (TypeError, AttributeError):
+            return None
+        if doc == None: 
+            return None
+
+        root_elem = doc.documentElement
+        device = {}
+
+        # URLBase
+        device['URLBase'] = _get_subelement_data(root_elem, 'URLBase')        
+        if device['URLBase'] == None:
+            device['URLBase'] = str(base_url)
+
+        # Device Element
+        device_elem = _get_subelement(root_elem, 'device')
+        if not device_elem: 
+            return None
+
+        # deviceType
+        data = _get_subelement_data(device_elem, "deviceType")
+        if not data: 
+            return None
+        tokens = data.split(':')
+        if len(tokens) == 5:
+            device['deviceType'] = data
+            device['deviceDomain'] = tokens[1]
+            device['deviceTypeShort'] = tokens[3]
+            device['deviceVersion'] = tokens[4]
+        else : return None
+
+        # UDN & UUID
+        data = _get_subelement_data(device_elem, 'UDN') 
+        if not data: 
+            return None
+        tokens = data.split(':')  # uuid:40a69722-4160-11df-9a88-00248116b859
+        if len(tokens) == 2:
+            device['UDN'] = data
+            device['uuid'] = uuid.UUID(tokens[1])
+        else: return None
+
+        # Optional fields
+        device['name'] = _get_subelement_data(device_elem, 
+                                              'friendlyName')
+        device['manufacturer'] = _get_subelement_data(device_elem, 
+                                                      'manufacturer')
+        device['manufacturerURL'] = _get_subelement_data(device_elem, 
+                                                         'manufacturerURL')
+        device['modelName'] = _get_subelement_data(device_elem, 
+                                                   'modelName')
+        device['modelDescription'] = _get_subelement_data(device_elem, 
+                                                          'modelDescription')
+        device['modelURL'] = _get_subelement_data(device_elem, 
+                                                  'modelURL')
+        device['serialNumber'] = _get_subelement_data(device_elem, 
+                                                      'serialNumber')
+        device['UPC'] = _get_subelement_data(device_elem, 'UPC')
+        url_str = _get_subelement_data(device_elem, 'presentationURL')
+        if url_str:
+            device['presentationURL'] =  _get_absolute_url(device['URLBase'], 
+                                                           url_str) 
+        
+        # Services
+        device['services'] = []
+        service_list_elem = _get_subelement(device_elem, 'serviceList')
+        if service_list_elem:
+            service_elems = service_list_elem.getElementsByTagName('service')
+            for service_elem in service_elems:                
+                data_str = {}
+                data_str['serviceType'] = _get_subelement_data(service_elem, 
+                                                               'serviceType')
+                data_str['serviceId'] =  _get_subelement_data(service_elem, 
+                                                              'serviceId')
+                url_str =  _get_subelement_data(service_elem, 'SCPDURL')
+                data_str['SCPDURL'] =  _get_absolute_url(device['URLBase'], 
+                                                         url_str)
+                url_str =  _get_subelement_data(service_elem, 'controlURL')
+                data_str['controlURL'] = _get_absolute_url(device['URLBase'], 
+                                                           url_str)
+                url_str =  _get_subelement_data(service_elem, 'eventSubURL')
+                data_str['eventSubURL'] = _get_absolute_url(device['URLBase'], 
+                                                            url_str)
+                device['services'].append(data_str)
+
+        return device
+
+
+##############################################
+#  SERVICE  DESCRIPTION PARSER
+##############################################
+
+class _ServiceDescriptionParser:
+    """
+    This class implements parsing of the xml description of a 
+    upnp service.
+    """
+    def __init__(self):
+        pass
+
+    def parse(self, xmldata):
+        """
+        This method parses the xml description of a upnp service.
+        -> Input is device description xml-data.
+        <- Output is a dictionary with all relevant information.
+        """
+        try:
+            doc = minidom.parseString(xmldata)
+        except (TypeError, AttributeError):
+            return None
+        if doc == None: 
+            return None
+
+        root_elem = doc.documentElement
+        service = {}
+
+        # State Variables
+        service['stateVariables'] = []
+        sv_table_elem =  _get_subelement(root_elem, 'serviceStateTable')
+        sv_elems = sv_table_elem.getElementsByTagName('stateVariable')
+        for sv_elem in sv_elems:
+            stv = {}
+            stv['name'] = _get_subelement_data(sv_elem, 'name')
+            stv['dataType'] = _get_subelement_data(sv_elem, 'dataType')
+            stv['defaultValue'] = _get_subelement_data(sv_elem, 'defaultValue')
+            stv['sendEvents'] = sv_elem.attributes['sendEvents'].value
+            service['stateVariables'].append(stv)
+
+        # Actions
+        service['actions'] = []
+        action_table_elem = _get_subelement(root_elem, 'actionList')
+        if action_table_elem:
+            action_elems = action_table_elem.getElementsByTagName('action')
+            for action_elem in action_elems:
+                action = {}
+                action['name'] = _get_subelement_data(action_elem, 'name')
+                action['inargs'] = []
+                action['outargs'] = []
+                # Arguments
+                arg_list_elem = _get_subelement(action_elem, 'argumentList')
+                if arg_list_elem:
+                    arg_elems = arg_list_elem.getElementsByTagName('argument')
+                    for arg_elem in arg_elems:
+                        arg = {}
+                        arg['name'] = _get_subelement_data(arg_elem, 'name')
+                        # Check that action_spec arguments refer to 
+                        # defined state variables
+                        rsv_name = _get_subelement_data(arg_elem, 
+                                                        'relatedStateVariable')
+                        for stv in service['stateVariables']:
+                            if rsv_name == stv['name']: 
+                                arg['rsv'] = rsv_name 
+                                break
+                        arg['direction'] = _get_subelement_data(arg_elem, 
+                                                                'direction')
+                        if arg['direction'] == 'in':
+                            action['inargs'].append(arg)
+                        elif arg['direction'] == 'out':
+                            action['outargs'].append(arg)
+                service['actions'].append(action)
+
+        return service
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    DEVICE_XML = """<?xml version="1.0"?>
+<root xmlns="urn:schemas-upnp-org:device-1-0">
+<specVersion>
+<major>1</major>
+<minor>0</minor>
+</specVersion>
+<URLBase>http://193.156.106.130:44444/</URLBase>
+<device>
+<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
+<friendlyName>Basic</friendlyName>
+<manufacturer>Manufacturer</manufacturer>
+<manufacturerURL>http://manufacturer.com</manufacturerURL>
+<modelName>Model 1</modelName>
+<modelDescription>Model description</modelDescription>
+<modelURL>http://manufacturer.com/model_1</modelURL>
+<serialNumber>123456</serialNumber>
+<UDN>uuid:40a69722-4160-11df-9a88-00248116b859</UDN>
+<UPC>012345678912</UPC>
+<presentationURL>presentation.html</presentationURL>
+<serviceList>
+<service>
+<serviceType>urn:schemas-upnp-org:service:SwitchPower:1</serviceType>
+<serviceId>urn:upnp-org:serviceId:MySwitchPower</serviceId>
+<SCPDURL>services/MySwitchPower/description.xml</SCPDURL>
+<controlURL>services/MySwitchPower/control</controlURL>
+<eventSubURL>services/MySwitchPower/events</eventSubURL>
+</service>
+</serviceList>
+</device>
+</root>
+"""
+    SERVICE_XML = """<?xml version="1.0"?>
+<scpd xmlns="urn:schemas-upnp-org:service-1-0">
+<specVersion>
+<major>1</major>
+<minor>0</minor>
+</specVersion>
+<actionList>
+<action>
+<name>SetTarget</name>
+<argumentList>
+<argument>
+<name>NewTargetValue</name>
+<relatedStateVariable>Status</relatedStateVariable>
+<direction>in</direction>
+</argument>
+</argumentList>
+</action>
+<action>
+<name>GetStatus</name>
+<argumentList>
+<argument>
+<name>ResultStatus</name>
+<relatedStateVariable>Status</relatedStateVariable>
+<direction>out</direction>
+</argument>
+</argumentList>
+</action>
+<action>
+<name>GetTarget</name>
+<argumentList>
+<argument>
+<name>RetTargetValue</name>
+<relatedStateVariable>Status</relatedStateVariable>
+<direction>out</direction>
+</argument>
+</argumentList>
+</action>
+</actionList>
+<serviceStateTable>
+<stateVariable sendEvents="yes">
+<name>Status</name>
+<dataType>boolean</dataType>
+<defaultValue>0</defaultValue>
+</stateVariable>
+</serviceStateTable>
+</scpd>
+"""
+
+    print parse_device_description(DEVICE_XML, "http://vg.no/")
+
+    print parse_service_description(SERVICE_XML)
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpconsole.py b/instrumentation/next-share/BaseLib/UPnP/upnpconsole.py
new file mode 100644 (file)
index 0000000..38097a6
--- /dev/null
@@ -0,0 +1,76 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This class implements a console interface to both UPnPServer 
+and UPnPClient.
+"""
+##############################################
+# UPNP CONSOLE
+##############################################
+
+from BaseLib.UPnP.common import TaskRunner
+from BaseLib.UPnP.common import ObjectConsole
+from BaseLib.UPnP.upnpserver import UPnPServer
+from BaseLib.UPnP.upnpclient import UPnPClient
+from BaseLib.UPnP.services import SwitchPower, URLService
+from BaseLib.UPnP.services import BookmarkService
+from BaseLib.UPnP import SERVER_PRODUCT_NAME
+from BaseLib.UPnP import SERVER_ROOT_DEVICE_CONFIG
+
+class UPnPConsole:
+
+    """This class wraps ObjectConsole to implement a 
+    custom UPnP console."""
+
+    def __init__(self):
+        
+        self._task_runner = TaskRunner()
+        self._server = UPnPServer(self._task_runner, 
+                            SERVER_PRODUCT_NAME,
+                            SERVER_ROOT_DEVICE_CONFIG)
+        self._client = UPnPClient(self._task_runner)
+
+        # Add a couple of services
+        self._server.add_service(SwitchPower("SwitchPower"))
+        self._server.add_service(URLService("URLService"))
+        self._server.add_service(BookmarkService())
+        
+        # Console Namespace
+        namespace = {}
+        namespace['S'] = self._server
+        namespace['C'] = self._client
+
+        self._console = ObjectConsole(self, namespace,
+                                      run="_run",
+                                      stop="_stop",
+                                      name="UPnP")
+
+
+    def _run(self):
+        """Run the TaskRunner."""
+        self._task_runner.run_forever()
+
+    def _stop(self):
+        """
+        Internal: Stop the UPnPClient, UPnPServer and the 
+        TaskRunner.
+        """
+        self._client.close()
+        self._server.close()
+        self._task_runner.stop()
+
+    def run(self):
+        """Runs the UPnP Console."""
+        self._console.run()
+
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+    UPnPConsole().run()
+
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/__init__.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/__init__.py
new file mode 100644 (file)
index 0000000..2be2c22
--- /dev/null
@@ -0,0 +1,7 @@
+"""
+This package contains the implementation of an 
+extensible non-blocking UPnPServer.
+"""
+from upnpserver import UPnPServer
+from upnpservice import UPnPService
+from upnpdevice import UPnPDevice
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/httpserver.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/httpserver.py
new file mode 100644 (file)
index 0000000..e621b81
--- /dev/null
@@ -0,0 +1,301 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""This module implements a HTTP Server for the UPnP Server."""
+
+import re
+import types
+import socket
+import urlparse
+import BaseLib.UPnP.common.upnpsoap as upnpsoap
+import BaseLib.UPnP.common.asynchHTTPserver as httpserver
+
+##############################################
+# REQUEST HANDLER
+##############################################
+
+class _RequestHandler(httpserver.AsynchHTTPRequestHandler):
+    """Request Handler for UPnP Server HTTP Server."""
+
+    def do_GET(self):
+        """Respond to a GET request"""
+        # In case of full url, parse to get to path
+        url = urlparse.urlparse(self.path)
+        
+        # Expect Paths
+        # /devices/devicename/presentation.html
+        # /devices/devicename/description.xml
+        # /services/serviceID/presentation.html
+        # /services/serviceID/description.xml
+        #
+        # or special paths
+        # /description.xml
+        # /presentation.html
+
+        path = url.path.strip('/')
+        body = None
+        tokens = path.split('/')
+        
+        # Special Root Device Description Path
+        if path == self.server.service_manager.get_description_path():
+            root_device = self.server.service_manager.get_root_device()
+            body = root_device.get_xml_description()
+            content_type = "text/xml"
+        # Special Root Device Presentation Path            
+        elif path == self.server.service_manager.get_presentation_path():
+            root_device = self.server.service_manager.get_root_device()
+            body = root_device.get_html_description()
+            content_type = "text/html"
+        # Other Requests.
+        elif len(tokens) == 3:
+            type_, name = tokens[:2]
+            
+            if type_ == 'devices':
+                object_ = self.server.service_manager.get_device(name)
+            elif type_ == 'services':
+                object_ = self.server.service_manager.get_service(name)
+                
+            if object:
+                if path == object_.description_path:
+                    body = object_.get_xml_description()
+                    content_type = 'text/xml'        
+                elif path == object_.presentation_path:
+                    body = object_.get_html_description()
+                    content_type = 'text/html'
+                
+        try:
+            if body:                
+                # Log
+                msg = "GET %s [%s]" % (path, self.client_address[0])
+                self.server.log(msg)
+
+                self.send_response(200)
+                self.send_header('content-length', str(len(body)))
+                self.send_header('content-type', content_type)
+                if self.headers.has_key('accept-language'):
+                    self.send_header('content-language', 
+                                     self.headers['accept-language'])
+                else:
+                    self.send_header('content-language', 'xml:lang="en"')
+                self.send_header('date', self.date_time_string())
+                self.end_headers()
+                self.wfile.write(body)
+                self.request.close()
+            else:
+                self.send_error(404)
+        except socket.error, error:
+            self.server.log("SocketError %s" % error)
+
+
+    def do_POST(self):
+        """Responds to POST request."""
+        # In case of full url, parse to get to path
+        url = urlparse.urlparse(self.path)
+        # Expect Path
+        # /services/serviceID/control
+        path = url.path.strip('/')
+        tokens = path.split('/')
+        if len(tokens) == 3:
+            service_id = tokens[1]
+         
+            # Parse Header
+            body_bytes = int(self.headers['content-length'])
+            soapaction = self.headers['soapaction'].strip('"')
+            [name_space, action_name] = soapaction.split("#")
+            service = name_space.split(":")[2]
+
+            # Body (SoapXML)   
+            body = self.rfile.read(body_bytes)
+            res = upnpsoap.parse_action_request(body)
+            if res:
+                action = res[0]
+                args = res[2]
+
+                # Log
+                msg = "POST %s %s [%s]" % (path, action, 
+                                           self.client_address[0])
+                self.server.log(msg)
+
+                service = self.server.service_manager.get_service(service_id)
+                if service : 
+                    result_list = service.invoke_action(action_name, args)
+
+                if isinstance(result_list, types.ListType):
+                    # Reply
+                    result_body = upnpsoap.create_action_response(name_space, 
+                    action_name, result_list)
+                    self.send_response(200)
+                else:
+                    # Error
+                    result_body = upnpsoap.create_error_response("501", 
+                    "Operation Not supported")
+                    self.send_response(500)
+
+                self.send_header('Content-Length', str(len(result_body)))
+                self.send_header('Content-Type', 'text/xml; charset="utf-8"')
+                self.send_header('DATE', self.date_time_string())
+                self.send_header('EXT','')
+                self.send_header('SERVER', self.server.get_server_header())
+                self.end_headers()
+                self.wfile.write(result_body)
+                self.request.close()
+                return
+
+        self.server.log("ERROR Post %s %s" % (path, 
+                                                   self.client_address[0]))
+        self.send_response(500)
+        self.end_headers()
+        self.request.close()
+    
+    def do_SUBSCRIBE(self):        
+        """Responds to SUBSCRIBE request."""
+        # In case of full url, parse to get to path
+        url = urlparse.urlparse(self.path)
+        # Expect path
+        # /services/service_id/events
+        # or full URL
+        # http://host:port/services/service_id/events
+        path = url.path.strip('/')
+        tokens = path.split('/')[-2:]
+
+        error = 500
+
+        if len(tokens) == 2:
+            service_id = tokens[0]
+            path = "/%s/%s" % tuple(tokens)
+
+            # Service
+            service = self.server.service_manager.get_service(service_id)
+
+            # Requested Duration of Subscription
+            if self.headers.has_key('timeout'):
+                duration = self.headers['timeout'].split('-')[-1]
+                if duration == 'infinite': 
+                    duration = 0
+                else: duration = int(duration)
+            else : duration = 0        
+
+            # Subscribe
+            if self.headers.has_key('nt'):
+                # Callback URLs
+                callback_urls = re.findall('<.*?>', self.headers['callback'])
+                callback_urls = [url.strip('<>') for url in callback_urls]
+                # Subscribe
+                sid, duration = service.subscribe(callback_urls, duration)
+                # Log
+                msg = "SUBSCRIBE %s [%s]" % (path, self.client_address[0])
+                self.server.log(msg)
+
+            # Renew
+            elif self.headers.has_key('sid'):
+                sid = self.headers['sid'].split(':')[-1]
+                # Renew
+                duration = service.renew(sid, duration)
+                # Log
+                msg = "RENEW %s %s" % (path, self.client_address[0])
+                self.server.log(msg)
+
+            if sid and duration:
+                # Respond
+                self.send_response(200)
+                self.send_header('server', self.server.get_server_header())
+                self.send_header('sid', 'uuid:%s' % sid)
+                self.send_header('timeout', 'Second-%s' % duration)
+                self.send_header('content-length', 0)
+                self.end_headers()
+                self.wfile.flush()
+                self.request.close()
+                return
+            else :
+                error = 412 # Precondition failed
+
+        
+        msg = "ERROR [%d] Subscribe %s %s" % (error, path, 
+                                         self.client_address[0])
+        self.server.log(msg)
+        self.send_response(error)
+        self.end_headers()
+
+    def do_UNSUBSCRIBE(self):
+        """Responds to UNSUBSCRIBE request."""
+        # In case of full url, parse to get to path
+        url = urlparse.urlparse(self.path)
+        # Expect path
+        # /services/service_id/events
+        # or full URL
+        # http://host:port/services/service_id/events
+        path = url.path.strip('/')
+        tokens = path.split('/')[-2:]
+        if len(tokens) == 2:
+            service_id = tokens[0]
+            path = "/%s/%s" % tuple(tokens)
+            # SID
+            sid = self.headers['sid'].split(':')[-1]
+            # UnSubscribe
+            service = self.server.service_manager.get_service(service_id)
+            service.unsubscribe(sid)
+            # Log
+            msg = "UNSUBSCRIBE %s [%s]" % (path, self.client_address[0])
+            self.server.log(msg)
+            # Protect against Control Point closing connection early.
+            try:
+                self.send_response(200)
+                self.end_headers()
+                self.request.close()
+            except socket.error: 
+                pass
+        else:
+            msg = "ERROR Unsubscribe %s %s" % (path, 
+            self.client_address[0])
+            self.server.log(msg)
+            self.send_response(500)
+            self.end_headers()
+            self.request.close()
+
+    def do_NOTIFY(self):
+        """Responds to NOTIFY request. Just for testing."""
+        msg = "NOTIFY [%s] %s" % (self.client_address[0], self.path)
+        self.server.log(msg)
+        data = "test"
+        try:
+            self.send_response(200)
+            self.send_header('content-length', len(data))
+            self.end_headers()
+            self.wfile.write(data)
+            self.request.close()
+        except socket.error:
+            pass
+
+##############################################
+# HTTP SERVER
+##############################################
+
+_HTTP_PORT = 44444
+
+class HTTPServer(httpserver.AsynchHTTPServer):
+    """HTTP Server for the UPnP Server."""
+
+    def __init__(self, task_runner, logger=None):
+
+        httpserver.AsynchHTTPServer.__init__(self, task_runner, _HTTP_PORT,
+                                             _RequestHandler, logger)
+
+        # Service Manager
+        self.service_manager = None
+
+    def set_service_manager(self, service_manager):
+        """Initialise with reference to service manager."""
+        self.service_manager = service_manager
+    
+    def get_server_header(self):
+        """Get SERVER header for UPnP Server."""
+        # Server Header
+        server_fmt = '%s UPnP/1.0 %s' 
+        return server_fmt % (self.service_manager.get_os_version(), 
+                             self.service_manager.get_product_version())
+
+    def startup(self):
+        """Extend startup of superclass."""
+        httpserver.AsynchHTTPServer.startup(self)
+        self.log("URL %s" % self.service_manager.get_presentation_url())
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/servicemanager.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/servicemanager.py
new file mode 100644 (file)
index 0000000..0c63ac2
--- /dev/null
@@ -0,0 +1,151 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""This module implements the servicemanager of the UPnP server.
+The service manager manages all devices and services. Its reference
+is given to all the other modules, so servicemanager is also used to
+hold some global state. """
+
+import socket
+import platform
+
+_SSDP_PORT = 44443
+_HTTP_PORT = 44444
+
+##############################################
+# SERVICE MANAGER
+##############################################
+
+class ServiceManager:
+
+    """
+    Holds devices and services, identified by deviceName and
+    serviceid, respectively.
+    todo : ServiceManager should also implement a 
+    hierarchical name space 
+    where devices are internal nodes and services are
+    Has some global state that is makes available for other modules.
+    This is the core of the UPnP Service implementation.
+    """
+
+    def __init__(self, task_runner, ssdp_server, http_server, 
+                 event_dispatcher, root_device,
+                 product_name, logger=None):
+
+        self._task_runner = task_runner
+        self._ssdp_server = ssdp_server
+        self._http_server = http_server
+        self._event_dispatcher = event_dispatcher
+
+        self._services = {}
+        self._root_device = root_device
+        self._host = socket.gethostbyname(socket.gethostname())
+        self._description_path = "description.xml"
+        self._presentation_path = "presentation.html"
+        self._os_version = platform.platform()
+        self._product_version = product_name
+        self._logger = logger
+        
+        self._root_device.set_service_manager(self)
+        self._ssdp_server.set_service_manager(self)
+        self._http_server.set_service_manager(self)
+
+    def startup(self):
+        """Startup http server and ssdp server."""
+        self._http_server.startup()        
+        self._ssdp_server.startup()
+        
+    def get_http_port(self):
+        """Return HTTP port used by UPnP server."""
+        return self._http_server.get_port()
+
+    def get_ssdp_port(self):
+        """Return SSDP port used by UPnP server."""
+        return self._ssdp_server.get_port()
+
+    def get_base_url(self):
+        """Return base url for UPnP server."""
+        return "http://%s:%d/" % (self.get_host(), self.get_http_port())
+
+    def get_description_path(self):
+        """Return description path for UPnP server."""
+        return self._description_path
+
+    def get_presentation_path(self):
+        """Return presentation path for UPnP server."""
+        return self._presentation_path
+    
+    def get_description_url(self):
+        """Return description url for UPnP server."""
+        return self.get_base_url() + self._description_path
+
+    def get_presentation_url(self):
+        """Return presentation url for UPnP server."""
+        return self.get_base_url() + self._presentation_path
+
+    def get_host(self):
+        """Return host for UPnP server."""
+        return self._host
+
+    def get_os_version(self):
+        """Return OS version for UPnP server."""
+        return self._os_version
+
+    def get_product_version(self):
+        """Return product name/version for UPnP server."""
+        return self._product_version
+
+    def get_logger(self):
+        """Return the global logger for UPnP server."""
+        return self._logger
+
+    def set_root_device(self, device):
+        """Register a device as root."""
+        device.set_service_manager(self)
+        self._root_device = device
+
+    def get_root_device(self):
+        """Returns the root device."""
+        return self._root_device
+
+    def get_device(self, name):
+        """Get device by name."""
+        if self._root_device.name == name:
+            return self._root_device
+        
+    def add_service(self, service):
+        """Add a new service to the UPnP Server."""
+        service.set_service_manager(self)
+        self._services[service.service_id] = service
+
+    def get_service(self, service_id):
+        """Get service by service_id."""
+        return self._services.get(service_id, None)
+    
+    def get_service_ids(self):
+        """Return a list of service ids."""
+        return self._services.keys()
+
+    def get_task_runner(self):
+        """Get task runner."""
+        return self._task_runner
+
+    def get_event_dispatcher(self):
+        """Get event dispatcher."""
+        return self._event_dispatcher
+
+    def get_devices_of_device(self, device):
+        """Get subdevices of a device."""
+        return []
+
+    def get_services_of_device(self, device):
+        """Get services contained within a device."""
+        if device == self._root_device:
+            return self._services.values()
+        else: return []
+
+    def close(self):
+        """Close service manager."""
+        for service in self._services.values():
+            service.close()
+        self._root_device.close()
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpdevice.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpdevice.py
new file mode 100644 (file)
index 0000000..e7758ae
--- /dev/null
@@ -0,0 +1,366 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""
+This module implements a UPnP device.  The implementation
+takes care of automatic production of xml and html descriptions
+of the device.
+"""
+import uuid
+import exceptions
+
+##############################################
+# HTML
+##############################################
+
+_HTML_FMT = "<html>\n<header></header>\n<body>\n%s</body>\n</html>"
+_HTML_BODY_FMT = "<h1>RootDevice : %s</h1>\n%s"
+_HTML_SERVICE_LIST_FMT = "<h2>Services</h2><ol>\n%s</ol>\n"
+
+_HTML_DEVICE_TYPE_FMT = "DeviceType : urn:%s:device:%s:%s<br>\n"
+_HTML_DEVICE_NAME_FMT = "Name : %s<br>\n"
+_HTML_MANUFACTURER_FMT = "Manufacturer : %s<br>\n"
+_HTML_MANUFACTURER_URL_FMT = "ManufacturerURL : %s<br>\n"
+_HTML_MODEL_NAME_FMT = "ModelName : %s<br>\n"
+_HTML_MODEL_NUMBER_FMT = "ModelNumber : %s<br>\n"
+_HTML_MODEL_DESCRIPTION_FMT = "ModelDescription : %s<br>\n"
+_HTML_MODEL_URL_FMT = "ModelURL : %s<br>\n"
+_HTML_SERIAL_NUMBER_FMT = "SerialNumber : %s<br>\n"
+_HTML_DEVICE_UUID_FMT = "UDN : uuid:%s<br>\n"
+_HTML_UPC_FMT = "UPC : %s<br>\n"
+_HTML_PRESENTATION_FMT = "PresentationURL : <a href=%s>%s</a><br>\n"
+_HTML_DESCRIPTION_FMT = "DescriptionURL : <a href=%s>%s</a><br>\n"
+
+_HTML_SERVICE_FMT = "<li><h3>%s</h3><br>\n%s</li>\n"
+_HTML_SERVICE_TYPE_FMT = "ServiceType : urn:schemas-upnp-org:" + \
+    "service:%s:%s<br>\n"
+_HTML_SERVICE_ID_FMT = "ServiceID : urn:upnp-org:serviceId:%s<br>\n"
+_HTML_SERVICE_DESCRIPTION_URL_FMT = "SCPDURL : <a href=%s>%s</a><br>\n"
+_HTML_SERVICE_CONTROL_URL_FMT = "ControlURL : %s<br>\n"
+_HTML_SERVICE_EVENT_URL_FMT = "EventSubURL : %s<br>\n"
+
+def _device_entries_tohtml(device):
+    """Produce html for all attributes of a device."""
+    str_ = []
+    str_.append( _HTML_DEVICE_TYPE_FMT % (device.device_domain, 
+                                       device.device_type, 
+                                       device.device_version))
+    str_.append( _HTML_DEVICE_NAME_FMT % device.name)
+    if device.manufacturer != None:
+        str_.append( _HTML_MANUFACTURER_FMT % device.manufacturer)
+    if device.manufacturer_url != None:
+        str_.append( _HTML_MANUFACTURER_URL_FMT % device.manufacturer_url)
+    if device.model_name != None:
+        str_.append( _HTML_MODEL_NAME_FMT % device.model_name)
+    if device.model_number != None:
+        str_.append( _HTML_MODEL_NUMBER_FMT % device.model_number)
+    if device.model_description != None:
+        str_.append( _HTML_MODEL_DESCRIPTION_FMT % device.model_description)
+    if device.model_url != None:
+        str_.append( _HTML_MODEL_URL_FMT % device.model_url)
+    if device.serial_number != None:
+        str_.append( _HTML_SERIAL_NUMBER_FMT % device.serial_number) 
+    str_.append( _HTML_DEVICE_UUID_FMT % device.uuid)
+    if device.upc != None:
+        str_.append( _HTML_UPC_FMT % device.upc)
+    url = device.get_presentation_url()
+    str_.append( _HTML_PRESENTATION_FMT % (url, url ))
+    url = device.get_description_url()
+    str_.append( _HTML_DESCRIPTION_FMT % (url, url ))
+    return "".join(str_)
+
+def _service_list_tohtml(services):
+    """Produce html for all services contained in a device."""
+    if len(services) > 0:
+        list_ = []
+        for service in services:
+            str_ = ""
+            str_ += _HTML_SERVICE_TYPE_FMT % (service.service_type, 
+                                           service.service_version)
+            str_ += _HTML_SERVICE_ID_FMT % service.service_id  
+            url = service.base_url + service.description_path
+            str_ += _HTML_SERVICE_DESCRIPTION_URL_FMT % (url, url)
+            str_ += _HTML_SERVICE_CONTROL_URL_FMT % \
+                (service.base_url + service.control_path)
+            str_ += _HTML_SERVICE_EVENT_URL_FMT % \
+                (service.base_url + service.event_path)
+            list_.append(_HTML_SERVICE_FMT % (service.service_id, str_))
+        return _HTML_SERVICE_LIST_FMT % "".join(list_)
+    else : return ""
+
+def _device_tohtml(device):
+    """Produce html description for a device. """
+    entries = _device_entries_tohtml(device)
+    service_list = _service_list_tohtml(device.get_services())
+    body = _HTML_BODY_FMT % (device.name, entries + service_list)
+    return _HTML_FMT % body
+    
+
+##############################################
+# XML
+##############################################
+
+_DEVICE_DESCRIPTION_FMT = """<?xml version="1.0"?>
+<root xmlns="urn:schemas-upnp-org:device-1-0">
+<specVersion>
+<major>1</major>
+<minor>0</minor>
+</specVersion>
+<URLBase>%s</URLBase>
+%s
+</root>"""
+
+_DEVICE_FMT = "<device>\n%s</device>"
+_DEVICE_LIST_FMT = "<deviceList>\n%s</deviceList>\n"
+_SERVICE_LIST_FMT = "<serviceList>\n%s</serviceList>\n"
+
+_DEVICE_TYPE_FMT = "<deviceType>urn:%s:device:%s:%s</deviceType>\n"
+_DEVICE_NAME_FMT = "<friendlyName>%s</friendlyName>\n"
+_MANUFACTURER_FMT = "<manufacturer>%s</manufacturer>\n"
+_MANUFACTURER_URL_FMT = "<manufacturerURL>%s</manufacturerURL>\n"
+_MODEL_NAME_FMT = "<modelName>%s</modelName>\n"
+_MODEL_NUMBER_FMT = "<modelNumber>%s</modelNumber>\n"
+_MODEL_DESCRIPTION_FMT = "<modelDescription>%s</modelDescription>\n"
+_MODEL_URL_FMT = "<modelURL>%s</modelURL>\n"
+_SERIAL_NUMBER_FMT = "<serialNumber>%s</serialNumber>\n"
+_DEVICE_UUID_FMT = "<UDN>uuid:%s</UDN>\n"
+_UPC_FMT = "<UPC>%s</UPC>\n"
+_PRESENTATION_FMT = "<presentationURL>%s</presentationURL>\n"
+
+
+_SERVICE_FMT = "<service>\n%s</service>\n"
+_SERVICE_TYPE_FMT = "<serviceType>urn:schemas-upnp-org:service:" + \
+    "%s:%s</serviceType>\n"
+_SERVICE_ID_FMT = "<serviceId>urn:upnp-org:serviceId:%s</serviceId>\n"
+_SERVICE_DESCRIPTION_URL_FMT = "<SCPDURL>%s</SCPDURL>\n"
+_SERVICE_CONTROL_URL_FMT = "<controlURL>%s</controlURL>\n"
+_SERVICE_EVENT_URL_FMT = "<eventSubURL>%s</eventSubURL>\n"
+
+
+def _device_entries_toxml(device):
+    """Produce xml description for device attributes."""
+    str_ = []
+    str_.append( _DEVICE_TYPE_FMT % (device.device_domain, 
+                                  device.device_type, device.device_version))
+    str_.append( _DEVICE_NAME_FMT % device.name)
+    if device.manufacturer != None:
+        str_.append( _MANUFACTURER_FMT % device.manufacturer)
+    if device.manufacturer_url != None:
+        str_.append( _MANUFACTURER_URL_FMT % device.manufacturer_url)
+    if device.model_name != None:
+        str_.append( _MODEL_NAME_FMT % device.model_name)
+    if device.model_number != None:
+        str_.append( _MODEL_NUMBER_FMT % device.model_number)
+    if device.model_description != None:
+        str_.append( _MODEL_DESCRIPTION_FMT % device.model_description)
+    if device.model_url != None:
+        str_.append( _MODEL_URL_FMT % device.model_url)
+    if device.serial_number != None:
+        str_.append( _SERIAL_NUMBER_FMT % device.serial_number) 
+    str_.append( _DEVICE_UUID_FMT % device.uuid)
+    if device.upc != None:
+        str_.append( _UPC_FMT % device.upc)
+    str_.append( _PRESENTATION_FMT % (device.get_presentation_url()))
+    return "".join(str_)
+
+def _device_list_toxml(devices):
+    """Produce xml for devices contained within a device."""
+    if len(devices) > 0:
+        str_ = ""
+        for device in devices:
+            str_ += device.to_xml()
+        return _DEVICE_LIST_FMT % str_
+    else : return ""
+
+def _service_list_toxml(services):
+    """Produce xml for services contained within a device."""
+    if len(services) > 0:
+        list_ = []
+        for service in services:
+            str_ = ""
+            str_ += _SERVICE_TYPE_FMT % (service.service_type, 
+                                      service.service_version)
+            str_ += _SERVICE_ID_FMT % service.service_id                
+            str_ += _SERVICE_DESCRIPTION_URL_FMT % (service.description_path)
+            str_ += _SERVICE_CONTROL_URL_FMT % (service.control_path)
+            str_ += _SERVICE_EVENT_URL_FMT % (service.event_path)
+            list_.append(_SERVICE_FMT % str_)
+        return _SERVICE_LIST_FMT % "".join(list_)
+    else : return ""
+
+
+##############################################
+# UPNP DEVICE
+##############################################
+
+class UPnPDeviceError(exceptions.Exception):
+    """Errro associated with UPnP Device."""
+    pass
+
+class UPnPDevice:
+
+    """
+    This implements the internal representation of a UPNP Device.    
+    The representation is used to generate the XML description,
+    and to add or remove services.
+    The given service manager implements the hierarchical namespace
+    for devices and services.
+    """
+
+    def __init__(self, device_config=None):
+        self._sm = None
+        self._is_root = False
+
+        # Initialse Device from config.
+        if device_config == None:
+            device_config = {}
+        self.name = device_config.get('name', None)
+        self.device_type = device_config.get('device_type', None)
+        self.device_version = device_config.get('device_version', 1)
+        self.device_domain = device_config.get('device_domain', 
+                                               'schemas-upnp-org')
+        self.manufacturer = device_config.get('manufacturer', None)
+        self.manufacturer_url = device_config.get('manufacturer_url', None)
+        self.model_name = device_config.get('model_name', None)
+        self.model_number = device_config.get('model_number', None)
+        self.model_description = device_config.get('model_description', None)
+        self.model_url = device_config.get('model_url', None)
+        self.serial_number = device_config.get('serial_number', None)
+        self.uuid = uuid.uuid1()
+        self.upc = device_config.get('upc', None)
+
+        self.base_url = ""
+        self.presentation_path = ""
+        self.description_path = ""
+
+
+    def set_service_manager(self, service_manager):
+        """Initialise device with reference to service manager."""
+        self._sm = service_manager
+        self.base_url = self._sm.get_base_url()
+        self.presentation_path = "devices/%s/presentation.html" % self.name 
+        self.description_path = "devices/%s/description.xml" % self.name
+
+    def set_is_root(self, value):
+        """Initialise device with is_root flag."""
+        self._is_root = value
+
+    def is_root(self):
+        """Checks if device is root."""
+        return self._is_root
+
+    def is_valid(self):
+        """Checks if device object has been properly initialised."""
+        return (self.device_type != None and self.name != None
+                and self.uuid != None and self.base_url != None 
+                and self._sm != None)
+
+    def get_services(self):
+        """Returns services which are included in this device."""
+        return self._sm.get_services_of_device(self)
+
+    def get_devices(self):
+        """Returns devices which are included in this device."""
+        return self._sm.get_devices_of_device(self) 
+
+    def to_xml(self):
+        """Returns the xml description of this device."""
+        if self.is_valid():
+            device_entries_xml = _device_entries_toxml(self) 
+            service_list_xml = _service_list_toxml(self.get_services())
+            device_list_xml = _device_list_toxml(self.get_devices())
+            return _DEVICE_FMT % (device_entries_xml + \
+                                      service_list_xml + device_list_xml)
+        else: 
+            msg = "Can not generate XML description . Invalid Device"
+            raise UPnPDeviceError, msg
+
+    def get_presentation_url(self):
+        """Returns the presentation URL (html) for this device."""
+        if self.is_root(): 
+            return self._sm.get_presentation_path()
+        else: 
+            return self.base_url + self.presentation_path
+
+    def get_description_url(self):
+        """Returns the description URL (xml) for this device."""
+        if self.is_root(): 
+            return self._sm.get_description_path()
+        else: 
+            return self.base_url + self.description_path
+
+    def get_xml_description(self):
+        """
+        Returns xml description wrapped in a valid xml document.
+        Should only be invoked on the root device.
+        """
+        if self.is_valid():
+            return _DEVICE_DESCRIPTION_FMT % (self.base_url, self.to_xml())
+        else:
+            msg = "Can not generate XML description. Invalid Device"
+            raise UPnPDeviceError, msg
+
+    def get_html_description(self):
+        """Return html description of device."""
+        return _device_tohtml(self)
+
+    def close(self):
+        """Close this device."""
+        pass
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    DEVICE_CONF = {
+        'device_type' : "MediaServer",
+        'device_version': 1,
+        'device_domain': 'schemas-upnp-org',
+        'name': "MediaServer",
+        'manufacturer': "Manufacturer",
+        'manufacturer_url': 'http://manufacturer.com',
+        'model_description': 'Model description',
+        'model_name': 'Model 1',
+        'model_number': '123456',
+        'model_url': 'http://manufacturer.com/model_1',
+        'serial_number': '123456',    
+        'uuid' : "3dd705c2-1c8a-11df-80c7-00248116b859",
+        'upc': 'universial product code',
+        }
+    
+
+    class MockService:
+        """Mock Service"""
+        def __init__(self):
+            self.service_type = "ContentDirectory"
+            self.service_version = 1
+            self.service_id =  "ContentDirectory"
+            self.description_path = "%s/description.xml" % self.service_id
+            self.control_path = "%s/control" % self.service_id
+            self.event_path = "%s/events" % self.service_id
+            self.base_url = "http://myhost:4444/"
+    
+    class MockServiceManager:
+        """Mock Service Manager."""
+        def __init__(self):
+            self._mock_service = MockService()
+        def get_services_of_device(self, device):
+            """Mock method."""
+            return [self._mock_service]
+        def get_devices_of_device(self, device):
+            """Mock method."""
+            return []
+        def get_base_url(self):
+            """Mock method."""
+            return 'http://myhost:4444/'
+
+
+    SM = MockServiceManager()
+
+    DEVICE = UPnPDevice(DEVICE_CONF)
+    DEVICE.set_service_manager(SM)
+
+    print DEVICE.get_xml_description()
+    print DEVICE.get_html_description()
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpeventdispatcher.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpeventdispatcher.py
new file mode 100644 (file)
index 0000000..14ac0e4
--- /dev/null
@@ -0,0 +1,109 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""This module implements a non-blocking event dispatcher for
+the UPnP server."""
+
+import urlparse
+import BaseLib.UPnP.common.asynchHTTPclient as httpclient
+import BaseLib.UPnP.common.upnpsoap as upnpsoap
+
+HTTP_NOTIFY_HEADER = u"""NOTIFY %(path)s HTTP/1.1\r
+HOST: %(host)s:%(port)d\r
+CONTENT-TYPE: text/xml\r
+CONTENT-LENGTH: %(length)s\r
+NT: upnp:event\r
+NTS: upnp:propchange\r
+SID: uuid:%(sid)s\r
+SEQ: %(key)s\r\n\r\n"""
+
+_LOG_TAG = "HTTPClient"
+
+##############################################
+# EVENT DISPATCHER
+##############################################
+
+class EventDispatcher:
+    """
+    Event Dispatcher wraps non-blocking httpClient to
+    provide a non-blocking event mechanism for the UPnP server.
+    """
+
+    def __init__(self, task_runner, logger=None):
+        self._tr = task_runner
+        self._httpclient = httpclient.AsynchHTTPClient(task_runner)
+
+        # Logging
+        self._logger = logger
+
+    ##############################################
+    # PRIVATE UTILITY
+    ##############################################
+
+    def _log(self, msg):
+        """Logging."""
+        if self._logger:
+            self._logger.log(_LOG_TAG, msg)
+
+    ##############################################
+    # PUBLIC API
+    ##############################################
+        
+    def dispatch(self, sid, event_key, callback_url, variables):
+        """Dispatch a new UPnP event message."""
+        # Generate Soap Body
+        # variables [(name, data)]
+        body = upnpsoap.create_event_message(variables)
+        # Create Notify Header
+        url = urlparse.urlparse(callback_url)
+        dict_ = {
+            'path': url.geturl(),
+            'host': url.hostname,
+            'port': url.port,
+            'sid': sid,
+            'length': len(body),
+            'key': event_key,
+            }
+        header = HTTP_NOTIFY_HEADER % dict_
+        # Dispatch Event Message
+        rid = self._httpclient.get_request_id()
+        self._httpclient.request(rid, url.hostname, url.port, header + body)
+        self._log("NOTIFY %s [%d]" %(url.hostname, event_key))
+
+    def close(self):
+        """Closes Event Dispacter along with its internal HTTP client."""
+        self._log('CLOSE')
+        self._httpclient.close()
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TASK_RUNNER = taskrunner.TaskRunner()
+    
+    # Parameters
+    import uuid
+    SID = uuid.uuid1()
+    KEY = 1
+    WORK_URL = "http://193.156.106.130:44444/events"
+    HOME_URL = "http://192.168.1.235:44444/events"
+    VARIABLES = [(u'arg1', u'data1'), (u'arg2', u'data2')]
+
+    class MockLogger:
+        """MockLogger."""
+        def log(self, log_tag, msg):
+            """Log to std out."""
+            print log_tag, msg
+
+    # Event Dispatcher
+    EVD = EventDispatcher(TASK_RUNNER, logger=MockLogger())
+    EVD.dispatch(SID, KEY, WORK_URL, VARIABLES)
+    try:
+        TASK_RUNNER.run_forever()
+    except KeyboardInterrupt:
+        print
+    EVD.close()
+        
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpserver.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpserver.py
new file mode 100644 (file)
index 0000000..8c2f9fe
--- /dev/null
@@ -0,0 +1,134 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""This module implements a UPnP Server."""
+
+from BaseLib.UPnP.ssdp.ssdpserver import SSDPServer
+import upnpdevice
+from httpserver import HTTPServer
+from servicemanager import ServiceManager
+from upnpeventdispatcher import EventDispatcher
+
+DEFAULT_ROOT_DEVICE_CONFIG = {
+    'device_type' : "Basic",
+    'device_version': 1,
+    'name': "Basic",
+    'device_domain': 'schemas-upnp-org',
+    'manufacturer': "Manufacturer",
+    'manufacturer_url': 'http://manufacturer.com',
+    'model_description': 'Model description',
+    'model_name': 'Model 1',
+    'model_number': '1.0',
+    'model_url': 'http://manufacturer.com/model_1',
+    'serial_number': '123456',    
+    'upc': '012345678912',
+}
+
+_LOG_TAG = "UPnPServer"
+
+class _Logger:
+    """Internal Logger presented to modules within the UPnP Server."""
+    def __init__(self, logger):
+        self._logger = logger
+    def log(self, log_tag, msg):
+        """Log module tag and msg. UPnPServer logtag is added."""
+        if self._logger:
+            self._logger.log(_LOG_TAG, log_tag, msg)
+
+
+##############################################
+# UPNP SERVER
+##############################################
+
+class UPnPServer:
+
+    """This class implements an extensible UPnP Server."""
+
+    def __init__(self, task_runner, product_name,
+                 root_device_config=None, logger=None):
+        
+        # TaskRunner
+        self._tr = task_runner
+
+        # Logger
+        self._logger = _Logger(logger)
+
+        # Initalise Root Device
+        if not root_device_config:
+            self._root_device_config = DEFAULT_ROOT_DEVICE_CONFIG
+        else:
+            self._root_device_config = root_device_config
+        device = upnpdevice.UPnPDevice(self._root_device_config)
+        device.set_is_root(True)
+        
+        # Event Dispatcher (HTTP Client)
+        self._ed = EventDispatcher(self._tr, logger=self._logger)
+
+        # HTTP Server
+        self._http_server = HTTPServer(self._tr, logger=self._logger)
+
+        # SSDP Server
+        self._ssdp_server = SSDPServer(self._tr, logger=self._logger)
+
+        # ServiceManager (The Core)
+        self._sm = ServiceManager(self._tr, self._ssdp_server,
+                                  self._http_server, self._ed, device,
+                                  product_name, logger=self._logger)
+
+        # Export Service Manager API
+        self.add_service = self._sm.add_service
+        self.get_service = self._sm.get_service
+        self.get_service_ids = self._sm.get_service_ids
+        
+        # Export Internals
+        self.get_root_device = self._sm.get_root_device
+
+        # Startup done by TaskRunner
+        self._tr.add_task(self._sm.startup)
+
+    def get_presentation_url(self):
+        root = self.get_root_device()
+        return root.base_url + root.get_presentation_url()
+
+    def announce(self):
+        """Causes underlying SSDPServer to re-announce itself."""
+        self._tr.add_task(self._ssdp_server.announce)
+
+    def close(self):
+        """Close the UPnP server."""
+        self._sm.close()
+        self._ssdp_server.close()
+        self._http_server.close()
+        self._ed.close()
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    import BaseLib.UPnP.common.taskrunner as taskrunner
+    TR = taskrunner.TaskRunner()
+
+    import BaseLib.UPnP.common.upnplogger as upnplogger
+    LOGGER = upnplogger.get_logger()
+
+    SERVER = UPnPServer(TR, "Product 1.0", logger=LOGGER)
+
+    from BaseLib.UPnP.services.switchpower import SwitchPower
+    from BaseLib.UPnP.services.urlservice import URLService
+    from BaseLib.UPnP.services import BookmarkService
+    SERVICE_1 = SwitchPower("MySwitchPower")
+    SERVICE_2 = URLService("URLService")
+    SERVICE_3 = BookmarkService()
+    SERVER.add_service(SERVICE_1)
+    SERVER.add_service(SERVICE_2)
+    SERVER.add_service(SERVICE_3)
+    try:
+        TR.run_forever()
+    except KeyboardInterrupt:
+        print
+    SERVER.close()
+    TR.stop()
+    
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpserverconsole.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpserverconsole.py
new file mode 100644 (file)
index 0000000..c39d728
--- /dev/null
@@ -0,0 +1,90 @@
+# Written by Ingar Arntzen
+# see LICENSE.txt for license information
+
+"""
+This module implements a console interface to a UPnP server.
+"""
+
+##############################################
+# UPNP SERVER CONSOLE
+##############################################
+
+from upnpserver import UPnPServer
+from BaseLib.UPnP.services import SwitchPower, URLService
+from BaseLib.UPnP.services import BookmarkService
+from BaseLib.UPnP.common.objectconsole import ObjectConsole
+from BaseLib.UPnP.common.taskrunner import TaskRunner
+
+class UPnPServerConsole:
+
+    """This class wraps ObjectConsole to implement a 
+    custom console for UPnP server."""
+
+    def __init__(self):
+        
+        # Task Runner
+        self._task_runner = TaskRunner()
+
+        # UPnP Server        
+        self._server = UPnPServer(self._task_runner, "Product 1.0")
+        
+        # Switch Power Service
+        service_id = "SwitchPower"
+        service = SwitchPower(service_id)
+        self._server.add_service(service)
+
+        # URL Service
+        service_id_2 = "URLService"
+        service_2 = URLService(service_id_2)
+        self._server.add_service(service_2)
+
+        # Bookmark Service
+        service_3 = BookmarkService()
+        self._server.add_service(service_3)
+
+        # Console Namespace
+        name_space = {}
+        root_device = self._server.get_root_device()
+        name_space[root_device.name] = root_device
+        name_space[service_id] = service
+        name_space[service_id_2] = service_2
+        name_space[service_3.get_short_service_id()] = service_3
+        name_space['add_service'] = self._server.add_service
+        name_space['get_service'] = self._server.get_service
+        name_space['get_services'] = self._server.get_service_ids
+        name_space['announce'] = self._server.announce
+
+        self._console = ObjectConsole(self, name_space, 
+                                      run="_run", 
+                                      stop="_stop", name="UPnP Server")
+
+    def _run(self):
+        """Start TaskRunner."""
+        self._task_runner.run_forever()
+
+    def _stop(self):
+        """Stop UPnPServer and TaskRunner"""
+        self._server.close()
+        self._task_runner.stop()
+
+
+    def run(self):
+        """Runs the UPnP Console."""
+        self._console.run()
+
+
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    UPnPServerConsole().run()
+
+    
+    
+
+
+
+
diff --git a/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpservice.py b/instrumentation/next-share/BaseLib/UPnP/upnpserver/upnpservice.py
new file mode 100644 (file)
index 0000000..d463262
--- /dev/null
@@ -0,0 +1,575 @@
+# Written by Ingar Arntzen, Norut
+# see LICENSE.txt for license information
+
+"""
+This module implements a UPnP Service. 
+
+This involves a base class intended for development of new services.
+The baseclass hides complexity related to producing UPnP Service
+description in both XML and HTML format. It also hides complexity 
+related to the placement of a service within a device hierarchy.
+"""
+import types
+import uuid
+import exceptions
+import BaseLib.UPnP.common.upnpmarshal as upnpmarshal
+
+class ActionError (exceptions.Exception): 
+    """Error associated with invoking actions on a UPnP Server. """
+    pass
+
+##############################################
+# XML FMT
+##############################################
+
+_SERVICE_DESCRIPTION_FMT = """<?xml version="1.0"?>
+<scpd xmlns="urn:schemas-upnp-org:service-1-0">
+<specVersion>
+<major>1</major>
+<minor>0</minor>
+</specVersion>
+<actionList>
+%s</actionList>
+<serviceStateTable>
+%s</serviceStateTable>
+</scpd>"""
+
+_ACTION_FMT = """<action>
+<name>%s</name>
+<argumentList>
+%s
+</argumentList>
+</action>
+"""
+
+_ARGUMENT_FMT = """<argument>
+<name>%s</name>
+<relatedStateVariable>%s</relatedStateVariable>
+<direction>%s</direction>
+</argument>"""
+
+_EVENTED_VARIABLE_FMT = """<stateVariable sendEvents="yes">
+<name>%s</name>
+<dataType>%s</dataType>
+<defaultValue>%s</defaultValue>
+</stateVariable>
+"""
+
+_ARG_VARIABLE_FMT = """<stateVariable sendEvents="no">
+<name>%s</name>
+<dataType>%s</dataType>
+</stateVariable>
+"""
+
+def _service_description_toxml(service):  
+    """This function produces the UPnP XML service description."""
+
+    svs_str = ""
+    # Evented Variables
+    for evar in service.get_evented_variables():
+        data_type = upnpmarshal.dumps_data_type(evar.the_type)
+        default_value = upnpmarshal.dumps(evar.default_value)
+        args = (evar.the_name, data_type, default_value)
+        svs_str += _EVENTED_VARIABLE_FMT % args
+
+    actions_str = ""
+    arg_counter = 0
+
+    # One state variable per type (event variables of arguments)
+    unique_variables = {} # type : variable name
+    for evar in service.get_evented_variables():
+        if not unique_variables.has_key(evar.the_type):
+            unique_variables[evar.the_type] = evar.the_name
+        
+    # Arguments
+    for action in service.get_actions():
+        args_str = ""
+        for arg in action.in_arg_list + action.out_arg_list:            
+
+            # Check if argument can be related to event variable
+            if unique_variables.has_key(arg.the_type):
+                related_variable_name = unique_variables[arg.the_type]
+            else:
+                arg_counter += 1
+                related_variable_name = "A_ARG_TYPE_%d" % arg_counter
+                unique_variables[arg.the_type] = related_variable_name
+                # New State Variable
+                data_type = upnpmarshal.dumps_data_type(arg.the_type)
+                svs_str += _ARG_VARIABLE_FMT % (related_variable_name, 
+                                                data_type)
+
+            # New Argument 
+            direction = 'in' if isinstance(arg, _InArg) else 'out'
+            args_str += _ARGUMENT_FMT % (arg.the_name, 
+                                         related_variable_name, direction)
+        # Action
+        actions_str += _ACTION_FMT % (action.name, args_str)
+
+    return _SERVICE_DESCRIPTION_FMT % (actions_str, svs_str)
+
+    
+
+##############################################
+# UPNP SERVICE
+##############################################
+
+class UPnPService:
+
+    """
+    This implements a base class for all UPnP Services.
+
+    New services should extend this class.
+    The base class hides complexity related to production
+    of XML service descriptions as well as HTTP descriptions.
+    The base class also hides complexity related to placement
+    in the UPnP device hierarchy.
+    """
+
+    def __init__(self, service_id, service_type, service_version=1):
+        self.service_manager = None
+
+        self._actions = {} # actionName : Action
+        self._events = {} # eventName : Event
+        self._subs = {} # callbackURL : Subscriptions
+        
+        # Initialise
+        self.service_type = service_type
+        self.service_version = service_version
+        self.service_id =  service_id
+
+        self.base_url = ""
+        self.description_path = ""
+        self.control_path = ""
+        self.event_path = ""
+        self._logger = None
+
+    def set_service_manager(self, service_manager):
+        """Initialise UPnP service with reference to service manager."""
+        self.service_manager = service_manager
+        self.base_url = self.service_manager.get_base_url()
+        self.description_path = "services/%s/description.xml" % self.service_id
+        self.control_path = "services/%s/control" % self.service_id
+        self.event_path = "services/%s/events" % self.service_id
+        # Logging
+        self._logger = self.service_manager.get_logger()
+        
+    def is_valid(self):
+        """Check if service is valid."""
+        return (self.service_type != None and self.service_id != None
+                and self.base_url != None and self.service_manager != None)
+
+    def get_short_service_id(self):
+        """Return short service id."""
+        return self.service_id
+
+    def get_service_id(self):
+        """Return full service id."""
+        return "urn:upnp-org:serviceId:%s" % self.service_id
+
+    def get_service_type(self):
+        """Return service type."""
+        fmt = "urn:schemas-upnp-org:service:%s:%s"
+        return  fmt % (self.service_type, self.service_version)
+
+    def get_xml_description(self):
+        """Returns xml description of service."""
+        return _service_description_toxml(self)
+
+    def close(self):
+        """Close UPnP service safely."""
+        for sub in self._subs.values():
+            sub.close()
+
+    ##############################################
+    # LOG API
+    ##############################################
+
+    def log(self, msg):
+        """Logger."""
+        if self._logger:
+            self._logger.log("SERVICE", "%s %s" % (self.service_id, msg))
+
+    ##############################################
+    # SUBSCRIBE / NOTIFY API
+    ##############################################
+
+    def notify(self, evented_variables):
+        """Notify all subscribers of updated event variables."""
+        self._remove_expired_subscriptions()
+        # Dispatch Event Messages to all subscribers  
+        # of the given serviceid.
+        # Make sure all stateVariables are evented variables.
+        for sub in self._subs.values():
+            sub.notify(evented_variables)
+
+    def subscribe(self, callback_urls, requested_duration):
+        """Process new subscription request."""
+        # requested duration == 0 => infinite
+        self._remove_expired_subscriptions()
+        # For the moment, just accept a single callbackUrl
+        # Subscriber defined by callbackUrl
+        callback_url = callback_urls[0]        
+        if self._subs.has_key(callback_url):
+            # Subscriber already exists
+            return (None, None)
+        else:
+            # Add new Subscriber
+            sub = _Subscription(self, callback_url, requested_duration)
+            self._subs[callback_url] = sub
+            # Dispatch Initial Event Message
+            sub.initial_notify()
+            return (sub.sid, sub.duration)
+    
+    def renew(self, sid_str, requested_duration):
+        """Request to renew an existing subscription."""
+        # requested duration == 0 => infinite
+        for sub in self._subs.values():
+            if str(sub.sid) == sid_str:
+                return sub.renew(requested_duration)
+        else: return None
+        
+    def unsubscribe(self, sid_str):
+        """Request to unsubscribe an existing subscription."""
+        sub = None
+        for sub in self._subs.values():
+            if str(sub.sid) == sid_str: 
+                break 
+        if sub:
+            sub.cancel()
+            del self._subs[sub.callback_url]
+            return True
+        else: 
+            return False
+
+    def _remove_expired_subscriptions(self):
+        """Scans subscriptions and removes invalidated."""
+        for url, sub in self._subs.items()[:]:
+            if sub.is_expired: 
+                del self._subs[url]
+
+
+    ##############################################
+    # ACTION API
+    ##############################################
+    
+    def define_action(self, method, in_args=None, out_args=None, 
+                      name=None):
+        """Define an action that the service implements. 
+        Used by subclass."""
+        if not in_args:
+            in_args = []
+        if not out_args:
+            out_args = []
+        if not name:
+            action_name = method.__name__
+        else:
+            action_name = name
+        # In/Out Args must be tuples of (name, type<?>)        
+        in_args = [_InArg(t[0], t[1]) for t in in_args]
+        out_args = [_OutArg(t[0], t[1]) for t in out_args]
+        action = _Action(action_name, method, in_args, out_args)
+        self._actions[action_name] = action
+
+    def invoke_action(self, action_name, in_args):
+        """Invoke and action that the service implements.
+        Used by httpserver as part of UPnP control interface."""
+        # in_args is assumed to be tuple of (name, data) all unicode string.
+        try:
+            if not self._actions.has_key(action_name): 
+                raise ActionError, "Action Not Supported"
+            else:
+                action = self._actions[action_name]            
+                return action.execute(in_args)
+        except ActionError, why:
+            print why
+
+    def get_actions(self):
+        """Returns all actions that the service implements."""
+        return self._actions.values()
+
+
+    ##############################################
+    # EVENTED VARIABLE API
+    ##############################################
+
+    def define_evented_variable(self, event_name, the_type, default_value):
+        """Define an evented variable for the service. Used by subclass."""
+        evar = _EventedVariable(self, event_name, the_type, default_value)
+        self._events[event_name] = evar
+        return evar
+
+    def get_evented_variable(self, event_name):
+        """Return evented variable given name."""
+        return self._events.get(event_name, None)
+
+    def get_evented_variables(self):
+        """Return all evented variables defined by the service."""
+        return self._events.values()
+
+    def set_evented_variables(self, list_):
+        """
+        Update a list of state variables at once.
+        Input will be a list of tuples [(eventedVariable, newValue)]
+        The method avoids sending one notification to every subscriber,
+        for each state variable. Instead, a single subscriber receives
+        one eventMessage containing all the updated state Variables 
+        in this list.
+        """
+        # Update Values
+        changed_variables = []
+        for evar, new_value in list_:
+            changed = evar.set(new_value, notify_ok=False)
+            if changed: 
+                changed_variables.append(evar)
+        # notify all in one batch
+        self.notify(changed_variables)
+
+
+##############################################
+# EVENTED VARIABLE
+##############################################
+
+class _EventedVariable:
+
+    """This class defines an evented variable. The class hides
+    complexity related to event notification."""
+
+    def __init__(self, service, the_name, the_type, default_value):
+        self._service = service
+        self.the_name = the_name
+        if type(the_type) == types.TypeType: 
+            self.the_type = the_type
+        else: 
+            msg = "Argument 'the_type' is not actually a python type."
+            raise TypeError,  msg
+        self._value = default_value
+        self.default_value = default_value
+
+    def set(self, new_value, notify_ok=True):
+        """Set a new value for the evented variable. If the value
+        is different from the old value, notifications will be generated."""
+        if type(new_value) != self.the_type:
+            msg = "Argument 'the_type' is not actually a python type."
+            raise TypeError, msg
+        if new_value != self._value:
+            # Update Value
+            self._value = new_value
+            # Notify
+            if notify_ok:
+                self._service.notify([self])
+            return True
+        else : return False
+
+    def get(self):
+        """Get the value of an evented variable."""
+        return self._value
+
+
+##############################################
+# ARGUMENT
+##############################################
+
+class _Argument :
+
+    """The class defines an argument by holding a type and
+    and argument name."""
+    def __init__(self, the_name, the_type):
+        self.the_name = the_name
+        self.the_type = the_type
+        
+class _InArg(_Argument): 
+    """The class defines an input argument by holding a type and
+    and argument name."""
+    pass
+
+class _OutArg(_Argument): 
+    """The class defines an output argument (result value) by 
+    holding a type and and argument name."""
+    pass
+
+##############################################
+# ACTION
+##############################################
+
+class _Action:
+
+    """This class represents an action implemented by the 
+    service."""
+    
+    def __init__(self, name, method, in_arg_list, out_arg_list):
+        self.name = name
+        self.method = method
+        self.in_arg_list = in_arg_list 
+        self.out_arg_list = out_arg_list 
+
+    def execute(self, in_args):
+        """Execute the action."""
+        # in_args is assumed to be tuple of (name, data) all unicode string.
+        # the tuple is supposed to be ordered according to in_arg_list
+        if len(in_args) != len(self.in_arg_list): 
+            raise ActionError, "Wrong number of input arguments"
+        typed_args = []
+        for i in range(len(in_args)):
+            name, data = in_args[i]
+            in_arg = self.in_arg_list[i]
+            if name != in_arg.the_name: 
+                raise ActionError, "Wrong name/order for input argument"
+            try:
+                value = upnpmarshal.loads(in_arg.the_type, data)
+            except upnpmarshal.MarshalError, why:
+                raise ActionError, why
+            typed_args.append(value)
+
+        # Execute
+        try:
+            result = self.method(*typed_args)
+        except TypeError, why:
+            raise ActionError, "Method Execution Failed (%s)" % why
+        
+        # Result is eiter a single value (incl. None) or a tuple of values.
+        # Make it into a list in both cases.
+        if result == None:
+            result = []
+        elif result == types.TupleType:
+            result = list(result)
+        else:
+            result = [result]
+
+        # Check that result holds the correct number of values
+        if len(result) != len(self.out_arg_list): 
+            raise ActionError, "Wrong number of Results"
+        # Check that each value has the correct type
+        # Also convert python type objects to string representations. 
+        # Construct out_args list of tuples [(name, data), ...]
+        out_args = []
+        for i in range(len(result)):
+            out_arg = self.out_arg_list[i]
+            value = result[i]
+            if not isinstance(value, out_arg.the_type):
+                raise ActionError, "Result is wrong type."
+            else:
+                try:
+                    data = upnpmarshal.dumps(value)
+                except upnpmarshal.MarshalError, why:
+                    raise ActionError, why
+                out_args.append((out_arg.the_name, data))
+        return out_args
+
+
+##############################################
+# SUBSCRIPTION
+##############################################
+
+class NotifyError (exceptions.Exception): 
+    """Error associated with event notification."""
+    pass
+
+class _Subscription:
+    
+    """This class represents a subscription made to the service,
+    for notification whenever one of its evented variables is updated."""
+    
+    def __init__(self, service, callback_url, requested_duration):
+        # requested_duration == 0 implies INFINITE
+        # requested_duration > 0 implies FINITE
+        # requested_duration < 0 not legal
+        self.service = service
+        self.sid = uuid.uuid1()
+        self.event_key = 0
+        self.callback_url = callback_url
+        self.duration = 1800 # ignore requested_duration
+        self.is_expired = False
+
+    def notify(self, evented_variables):
+        """Notify this subscriber that given evented variables 
+        have been updated."""
+        if self.is_expired : 
+            return False # should not be neccesary
+        else:
+            self.event_key += 1
+            # Construct list of tuples [(name, value), ...]
+            variables = []
+            for evar in evented_variables:
+                try:
+                    data = upnpmarshal.dumps(evar.get())
+                except upnpmarshal.MarshalError, why:
+                    raise NotifyError, why
+                variables.append((evar.the_name, data))
+
+            # Dispatch Notification
+            edp = self.service.service_manager.get_event_dispatcher()
+            edp.dispatch(self.sid, self.event_key, self.callback_url, variables)
+            return True
+
+    def initial_notify(self):
+        """Notify this subscriber of all evented state 
+        variables and their values"""
+        if self.is_expired: 
+            return False
+        # Event Key must be 0
+        if self.event_key != 0: 
+            return False
+        # All Evented Variables
+        evented_variables = self.service.get_evented_variables()
+        variables = []
+        for evar in evented_variables:
+            try:
+                data = upnpmarshal.dumps(evar.get())
+            except upnpmarshal.MarshalError, why:
+                raise NotifyError, why
+            variables.append((evar.the_name, data))
+        
+        # Dispatch Notification
+        edp = self.service.service_manager.get_event_dispatcher()
+        edp.dispatch(self.sid, 0, self.callback_url, variables)
+        return True
+
+    def renew(self, requested_duration):
+        """Renew subscription for this subscriber."""
+        self.duration = requested_duration
+        self.is_expired = False
+        return self.duration
+    
+    def cancel(self):
+        """Cancel subscription for this subscriber."""
+        self.is_expired = True
+        return True
+
+    def close(self):
+        """Close this subscription safely."""
+        pass
+
+##############################################
+# MAIN
+##############################################
+
+if __name__ == '__main__':
+
+    class MockEventDispatcher:
+        """Mock Event Dispatcher."""
+        def __init__(self):
+            pass
+        def dispatch(self, sid, event_key, callback_url, variables):
+            """Mock method."""
+            print "Notify", sid, event_key, callback_url, variables
+
+    class MockServiceManager:
+        """Mock Service Manager."""
+        def __init__(self):
+            self._ed = MockEventDispatcher()
+        def get_event_dispatcher(self):
+            """Mock method."""
+            return self._ed
+        def get_base_url(self):
+            """Mock method."""
+            return "http://myhost:44444"
+        def get_logger(self):
+            """Mock method."""
+            return None
+
+    SM = MockServiceManager()
+    from BaseLib.UPnP.services import SwitchPower
+    SERVICE = SwitchPower('SwitchPower')
+    SERVICE.set_service_manager(SM)
+    print SERVICE.get_xml_description()
+    
diff --git a/instrumentation/next-share/BaseLib/Utilities/Instance2Instance.py b/instrumentation/next-share/BaseLib/Utilities/Instance2Instance.py
new file mode 100644 (file)
index 0000000..0c9be18
--- /dev/null
@@ -0,0 +1,231 @@
+# Written by Arno Bakker, Diego Rabaioli
+# see LICENSE.txt for license information
+""" Communication layer between other instance or Web plugin e.g. for starting Downloads. """
+
+# Protocol V1: Tribler 4.5.0:
+# - [4 byte length of cmd][cmd]
+# Protocol V2: SwarmPlugin
+# - [cmd]\r\n
+#
+#
+import sys
+import socket
+from traceback import print_exc
+from threading import Thread, Event
+from BaseLib.Core.BitTornado.RawServer import RawServer
+
+DEBUG = False
+
+class Instance2InstanceServer(Thread):
+    
+    def __init__(self,i2iport,connhandler,timeout=300.0):
+        Thread.__init__(self)
+        self.setDaemon(True)
+        self.setName('Instance2Instance'+self.getName())
+        self.i2iport = i2iport
+        self.connhandler = connhandler
+        
+
+        self.i2idoneflag = Event()
+        
+        self.rawserver = RawServer(self.i2idoneflag,
+                                   timeout/5.0, 
+                                   timeout,
+                                   ipv6_enable = False,
+                                   failfunc = self.rawserver_fatalerrorfunc,
+                                   errorfunc = self.rawserver_nonfatalerrorfunc)
+        self.rawserver.add_task(self.rawserver_keepalive,1)
+        # Only accept local connections
+        self.rawserver.bind(self.i2iport,bind=['127.0.0.1'],reuse=True) 
+        
+    def rawserver_keepalive(self):
+        """ Hack to prevent rawserver sleeping in select() for a long time, not
+        processing any tasks on its queue at startup time 
+        
+        Called by Instance2Instance thread """
+        self.rawserver.add_task(self.rawserver_keepalive,1)
+        
+        
+    def shutdown(self):
+        self.connhandler.shutdown()
+        self.i2idoneflag.set()
+
+    #
+    # Following methods are called by Instance2Instance thread
+    #
+    def rawserver_fatalerrorfunc(self,e):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"i2is: RawServer fatal error func called",e
+        print_exc()
+
+    def rawserver_nonfatalerrorfunc(self,e):
+        """ Called by network thread """
+        if DEBUG:
+            print >>sys.stderr,"i2is: RawServer non fatal error func called",e
+            print_exc()
+        # Could log this somewhere, or phase it out
+
+    def run(self):
+        try:
+            try:
+                if DEBUG:
+                    print >>sys.stderr,"i2is: Ready to receive remote commands on",self.i2iport
+                self.rawserver.listen_forever(self)
+            except:
+                print_exc()    
+        finally:
+            self.rawserver.shutdown()
+
+    def external_connection_made(self,s):
+        try:
+            self.connhandler.external_connection_made(s)
+        except:
+            print_exc()
+            s.close()
+
+    def connection_flushed(self,s):
+        self.connhandler.connection_flushed(s)
+    
+    def connection_lost(self,s):
+        if DEBUG:
+            print >>sys.stderr,"i2is: connection_lost ------------------------------------------------"
+        self.connhandler.connection_lost(s)
+        
+    def data_came_in(self, s, data):
+        try:
+            self.connhandler.data_came_in(s,data)
+        except:
+            print_exc()
+            s.close()
+
+    def add_task(self,func,t):
+        self.rawserver.add_task(func,t)
+
+
+
+class InstanceConnectionHandler:
+    def __init__(self,readlinecallback=None):
+        self.readlinecallback = readlinecallback
+        self.singsock2ic = {} # Maps Tribler/Core/BitTornado/SocketHandler.py:SingleSocket to InstanceConnection
+    
+    def set_readlinecallback(self,readlinecallback):
+        self.readlinecallback = readlinecallback
+    
+    def external_connection_made(self,s):
+        # Extra check in case bind() no work
+        if DEBUG:
+            print >>sys.stderr,"i2is: ich: ext_conn_made"
+        peername = s.get_ip()
+        if peername != "127.0.0.1":
+            print >>sys.stderr,"i2is: ich: ext_conn_made: Refusing non-local connection from",peername
+            s.close()
+
+        ic = InstanceConnection(s,self,self.readlinecallback)
+        self.singsock2ic[s] = ic
+
+    def connection_flushed(self,s):
+        pass
+    
+    def connection_lost(self,s):
+        """ Called when peer closes connection and when we close the connection """
+        if DEBUG:
+            print >>sys.stderr,"i2is: ich: connection_lost ------------------------------------------------"
+        
+        # Extra check in case bind() no work
+        peername = s.get_ip()
+        if peername != "127.0.0.1":
+            print >>sys.stderr,"i2is: ich: connection_lost: Refusing non-local connection from",peername
+            return
+
+        del self.singsock2ic[s]
+        
+    def data_came_in(self,s, data):
+        if DEBUG:
+            print >>sys.stderr,"i2is: ich: data_came_in"
+
+        ic = self.singsock2ic[s]
+        try:
+            ic.data_came_in(data)
+        except:
+            print_exc()
+
+    def shutdown(self):
+        for ic in self.singsock2ic.values():
+            ic.shutdown()
+
+
+
+class InstanceConnection:
+    def __init__( self, singsock, connhandler, readlinecallback):
+        self.singsock = singsock
+        self.connhandler = connhandler
+        self.readlinecallback = readlinecallback
+        self.rflag = False
+        self.remain = ''
+        self.proto = 0
+
+    
+    def data_came_in(self,data):
+        """ Read \r\n ended lines from data and call readlinecallback(self,line) """
+        
+        #if DEBUG:
+        print >>sys.stderr,"i2is: ic: data_came_in",`data`,len(data),"proto",self.proto
+        
+        if self.proto == 0:
+            if data[0] == '\x00':
+                # Backwards compatibility: first 4 bytes are length, no \r\n
+                self.proto = 1
+            else:
+                self.proto = 2
+            
+        if self.proto == 1:
+            # hack
+            self.remain += data
+            idx = self.remain.find('START') 
+            if  idx == -1:
+                return
+            else:
+                data = self.remain[idx:]+'\r\n'
+                self.remain = ''
+        
+        for i in range(0,len(data)):
+            if not self.rflag:
+                if data[i]=='\r':
+                    self.rflag = True
+            else:
+                if data[i] == '\n':
+                    cmd = self.remain+data[0:i+1] 
+                    self.remain = data[i+1:]
+                    self.readlinecallback(self,cmd[:-2])# strip off \r\n
+                    if self.remain.endswith('\r\n'):
+                        cmd = self.remain
+                        self.remain = ''
+                        self.readlinecallback(self,cmd[:-2])# strip off \r\n
+                    return
+                    
+                rflag = False
+                
+        self.remain = self.remain + data
+    
+    def write(self,data):
+        if self.singsock is not None:
+            self.singsock.write(data)            
+    
+    def close(self):
+        if self.singsock is not None:
+            self.singsock.close()
+            self.connhandler.connection_lost(self.singsock)
+            self.singsock = None
+        
+
+class Instance2InstanceClient:
+    
+    def __init__(self,port,cmd,param):
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s.connect(('127.0.0.1',port))
+        msg = cmd+' '+param+'\r\n'
+        s.send(msg)
+        s.close()
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Utilities/LinuxSingleInstanceChecker.py b/instrumentation/next-share/BaseLib/Utilities/LinuxSingleInstanceChecker.py
new file mode 100644 (file)
index 0000000..596ff9a
--- /dev/null
@@ -0,0 +1,24 @@
+# Written by Jelle Roozenburg, Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import commands
+   
+class LinuxSingleInstanceChecker:
+    """ Looks for a process with argument basename.py """
+    
+    def __init__(self,basename):
+        self.basename = basename
+
+    def IsAnotherRunning(self):
+        """ Uses pgrep to find other <self.basename>.py processes """
+        # If no pgrep available, it will always start the app
+        cmd = 'pgrep -fl "%s\.py" | grep -v pgrep' % (self.basename)
+        progressInfo = commands.getoutput(cmd)
+        
+        print >>sys.stderr,"LinuxSingleInstanceChecker returned",progressInfo
+        
+        numProcesses = len(progressInfo.split('\n'))
+        #if DEBUG:
+        #    print 'main: ProgressInfo: %s, num: %d' % (progressInfo, numProcesses)
+        return numProcesses > 1
diff --git a/instrumentation/next-share/BaseLib/Utilities/TimedTaskQueue.py b/instrumentation/next-share/BaseLib/Utilities/TimedTaskQueue.py
new file mode 100644 (file)
index 0000000..4c06d78
--- /dev/null
@@ -0,0 +1,105 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+#
+# TimedTaskQueue is a server that executes tasks on behalf of e.g. the GUI that
+# are too time consuming to be run by the actual GUI Thread (MainThread). Note 
+# that you still need to delegate the actual updating of the GUI to the 
+# MainThread via the wx.CallAfter mechanism.
+#
+import sys
+
+from threading import Thread,Condition
+from traceback import print_exc,print_stack
+from time import time
+
+DEBUG = False
+
+class TimedTaskQueue:
+    
+    __single = None
+    
+    def __init__(self,nameprefix="TimedTaskQueue",isDaemon=True):
+        self.cond = Condition()
+        self.queue = []
+        self.count = 0.0 # serves to keep task that were scheduled at the same time in FIFO order
+        self.thread = Thread(target = self.run)
+        self.thread.setDaemon(isDaemon)
+        self.thread.setName( nameprefix+self.thread.getName() )
+        self.thread.start()
+        
+    def add_task(self,task,t=0,id=None):
+        """ t parameter is now usable, unlike before. 
+            If id is given, all the existing tasks with the same id will be removed
+            before inserting this task 
+        """
+        
+        if task is None:
+            print_stack()
+        
+        self.cond.acquire()
+        when = time()+t
+        if DEBUG:
+            print >>sys.stderr,"ttqueue: ADD EVENT",t,task
+            
+        if id != None:  # remove all redundant tasks
+            self.queue = filter(lambda item:item[2]!=id, self.queue)
+        self.queue.append((when,self.count,task,id))
+        self.count += 1.0
+        self.cond.notify()
+        self.cond.release()
+        
+    def run(self):
+        """ Run by server thread """
+        while True:
+            task = None
+            timeout = None
+            flag = False
+            self.cond.acquire()
+            while True:
+                while len(self.queue) == 0 or flag:
+                    flag = False
+                    if timeout is None:
+                        # Wait until something is queued
+                        self.cond.wait()
+                    else:
+                        # Wait till first event is due
+                        self.cond.wait(timeout)
+                # A new event was added or an event is due
+                self.queue.sort()
+                
+                (when,count,task,id) = self.queue[0]
+                if DEBUG:
+                    print >>sys.stderr,"ttqueue: EVENT IN QUEUE",when,task
+                now = time()
+                if now < when:
+                    # Event not due, wait some more
+                    if DEBUG:
+                        print >>sys.stderr,"ttqueue: EVENT NOT TILL",when-now
+                    timeout = when-now
+                    flag = True
+                else:
+                    # Event due, execute
+                    if DEBUG:
+                        print >>sys.stderr,"ttqueue: EVENT DUE"
+                    self.queue.pop(0)
+                    break
+            self.cond.release()
+            
+            # Execute task outside lock
+            try:
+                # 'stop' and 'quit' are only used for unit test
+                if task == 'stop':  
+                    break
+                elif task == 'quit':
+                    if len(self.queue) == 0:
+                        break
+                    else:
+                        (when,count,task,id) = self.queue[-1]
+                        t = when-time()+0.001
+                        self.add_task('quit',t)
+                else:
+                    task()        
+            except:
+                print_exc()
+        
+        
diff --git a/instrumentation/next-share/BaseLib/Utilities/__init__.py b/instrumentation/next-share/BaseLib/Utilities/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Utilities/configreader.py b/instrumentation/next-share/BaseLib/Utilities/configreader.py
new file mode 100644 (file)
index 0000000..e23e048
--- /dev/null
@@ -0,0 +1,464 @@
+# Written by ABC authors
+# see LICENSE.txt for license information
+
+import sys
+import wx
+import os
+
+from cStringIO import StringIO
+
+from ConfigParser import ConfigParser, MissingSectionHeaderError, NoSectionError, ParsingError, DEFAULTSECT
+
+from BaseLib.Core.BitTornado.bencode import bencode, bdecode
+from BaseLib.Core.defaults import dldefaults,DEFAULTPORT
+
+# TODO: remove these defaults, config doesn't work this way with Tribler Core.
+bt1_defaults = []
+for k,v in dldefaults.iteritems():
+    bt1_defaults.append((k,v,"See triblerAPI"))
+
+DEBUG = False
+
+################################################################
+#
+# Class: ConfigReader
+#
+# Extension of ConfigParser that supports various types of
+# config values.  Values are converted to strings when writing
+# and back into their respective types when reading.
+#
+################################################################
+class ConfigReader(ConfigParser):
+    def __init__(self, filename, section, defaults = None):
+        if defaults is None:
+            defaults = {}
+            
+        ConfigParser.__init__(self)
+        self.defaults = defaults
+
+        self.defaultvalues = { "string"  : "",
+                               "int"     : 0,
+                               "float"   : 0.0,
+                               "boolean" : False,
+                               "color"   : wx.Colour(0, 0, 0),
+                               "bencode-list" : [],
+                               "bencode-string": "",
+                               "bencode-fontinfo": {'name': None,
+                                                    'size': None,
+                                                    'style': None,
+                                                    'weight': None }
+                              }
+
+        self.filename = filename
+        self.section = section
+        
+        # If the directory for this file doesn't exist,
+        # try creating it now
+        dirname = os.path.dirname(self.filename)
+        if not os.access(dirname, os.F_OK):
+            os.makedirs(dirname)
+
+        # Arno: Apparently port 6881 is poisoned because ISPs have blocked it.
+        # A random port does not work well with Buddycast so, pick a random, fixed one
+        if filename.endswith('abc.conf') and not os.access(filename, os.F_OK):
+            defaults['minport'] = str(DEFAULTPORT)
+        
+        try:
+            self.read(self.filename)
+        except MissingSectionHeaderError:
+            # Old file didn't have the section header
+            # (add it in manually)
+            oldfile = open(self.filename, "r")
+            oldconfig = oldfile.readlines()
+            oldfile.close()
+
+            newfile = open(self.filename, "w")
+            newfile.write("[" + self.section + "]\n")
+            newfile.writelines(oldconfig)
+            newfile.close()
+            
+            self.read(self.filename)
+        except ParsingError:
+            # A more severe exception occured
+            # Try to do whatever is possible to repair
+            #
+            # If this fails, then there's trouble
+            self.tryRepair()
+            self.read(self.filename)
+        
+    def testConfig(self, goodconfig, newline, passes = 0):
+        if newline:
+            testconfig = goodconfig + newline + "\r\n"
+            
+            # Write out to a StringIO object
+            newfile = StringIO(testconfig)
+            try:
+                testparser = ConfigParser()
+                testparser.readfp(newfile)
+                
+                # Line looks ok, add it to the config file
+                return testconfig
+            except MissingSectionHeaderError:
+                if passes > 0:
+                    # Something is odd here... just return the version that works
+                    return goodconfig
+                else:
+                    return self.testConfig(goodconfig + "[" + self.section + "]\n", newline, passes = 1)
+            except ParsingError:
+                # Ignore the line, don't add it to the config file
+                return goodconfig
+    
+    # Try to repair a damaged config file
+    # (i.e.: one w/ parsing errors, etc.)
+    def tryRepair(self):
+        oldconfig = ""
+        
+        try:
+            oldfile = open(self.filename, "r")
+            oldconfig = oldfile.readlines()
+            oldfile.close()
+        except:
+            # Can't read the original file at all
+            #
+            # try to write a blank file with just the section header
+            newfile = open(self.filename, "w")
+            newfile.write("[" + self.section + "]\n")
+            newfile.close()
+            return
+            
+        goodconfig = ""
+        
+        for line in oldconfig:
+            # Strip off any leading or trailing spaces
+            newline = line.strip()
+
+            # If the line looks ok, try writing it
+            goodconfig = self.testConfig(goodconfig, newline)
+
+        newfile = open(self.filename, "w")
+        newfile.writelines(goodconfig)
+        newfile.close()
+            
+    def setSection(self, section):
+        self.section = section
+
+    def ValueToString(self, value, typex):
+        if typex == "boolean":
+            if value:
+                text = "1"
+            else:
+                text = "0"
+        elif typex == "color":
+            red = str(value.Red())
+            while len(red) < 3:
+                red = "0" + red
+
+            green = str(value.Green())            
+            while len(green) < 3:
+                green = "0" + green
+                
+            blue = str(value.Blue())            
+            while len(blue) < 3:
+                blue = "0" + blue
+
+            text = str(red) + str(green) + str(blue)
+        elif typex.startswith("bencode"):
+            text = bencode(value)
+        else:
+            if type(value) is unicode:
+                text = value
+            else:
+                text = str(value)
+        
+        return text
+
+    def StringToValue(self, value, type):
+        # Assume that the value is already in the proper form
+        # if it's not a string
+        # (the case for some defaults)
+        if value is not None:
+            if not isinstance(value, unicode) and not isinstance(value, str):
+                return value
+
+        try:
+            if type == "boolean":
+                if value == "1":
+                    value = True
+                else:
+                    value = False
+            elif type == "int":
+                value = int(value)
+            elif type == "float":
+                value = float(value)
+            elif type == "color":
+                red = int(value[0:3])
+                green = int(value[3:6])
+                blue = int(value[6:9])
+                value = wx.Colour(red, green, blue)
+            elif type.startswith("bencode"):
+                value = bdecode(value)
+        except:           
+            value = None
+            
+        if value is None:
+            value = self.defaultvalues[type]
+        
+        return value
+
+    def ReadDefault(self, param, type = "string", section = None):
+        if section is None:
+            section = self.section
+
+        if param is None or param == "":
+            return ""
+
+        param = param.lower()
+        value = self.defaults.get(param, None)
+            
+        value = self.StringToValue(value, type)
+            
+        return value
+        
+    def Read(self, param, type = "string", section = None):
+        if section is None:
+            section = self.section
+            
+        if DEBUG:
+            print >>sys.stderr,"ConfigReader: Read(",param,"type",type,"section",section
+            
+        if param is None or param == "":
+            return ""
+
+#        value = None
+
+        try:
+            value = self.get(section, param)
+            value = value.strip("\"")
+#            value = value.strip("'")
+        except:
+            param = param.lower()
+            value = self.defaults.get(param, None)
+            if DEBUG:
+                sys.stderr.write("ConfigReader: Error while reading parameter: (" + str(param) + ")\n")
+            # Arno, 2007-03-21: The ABCOptions dialog tries to read config values
+            # via this mechanism. However, that doesn't take into account the
+            # values from BitTornado/download_bt1.py defaults. I added that.
+            if value is None:
+                if not DEBUG:
+                    pass
+                    # sys.stderr.write("ConfigReader: Error while reading parameter, no def: (" + str(param) + ")\n")
+                    # print_stack()
+                    
+                for k,v,d in bt1_defaults:
+                    if k == param:
+                        value = v
+                        break
+#            data = StringIO()
+#            print_exc(file = data)
+#            sys.stderr.write(data.getvalue())
+
+        if DEBUG:
+            print >>sys.stderr,"ConfigReader: Read",param,type,section,"got",value
+
+        value = self.StringToValue(value, type)
+           
+        return value
+        
+    def Exists(self, param, section = None):
+        if section is None:
+            section = self.section
+            
+        return self.has_option(section, param)
+        
+    def Items(self, section = None):
+        if section is None:
+            section = self.section
+        
+        try:
+            items = self.items(section)
+            for i in range(len(items)):
+                (key, value) = items[i]
+                value = value.strip("\"")
+#                value = value.strip("'")
+                items[i] = (key, value)
+            return items
+        except:
+            self.add_section(section)
+        return []
+
+    def GetOptions(self, section = None):
+        if section is None:
+            section = self.section
+        try:
+            options = self.options(section)
+        except NoSectionError:
+            options = []
+        return options
+
+    def Write(self, param, value, type = "string", section = None):
+        if section is None:
+            section = self.section
+            
+        if param is None or param == "":            
+            return False
+        
+        param = param.lower()
+
+            
+        if not self.has_section(section):
+            self.add_section(section)
+               
+        text = self.ValueToString(value, type)
+
+        while 1:
+            try:
+                oldtext = self.Read(param)
+                
+                self.set(section, param, text)
+    
+                # Return True if we actually changed something            
+                if oldtext != text:
+                    return True
+                
+                break
+            except NoSectionError:
+                self.add_section(section)
+            except:
+#                sys.stderr.write("Error while writing parameter: (" + str(param) + ") with value: (" + str(text) + ")\n")
+#                data = StringIO()
+#                print_exc(file = data)
+#                sys.stderr.write(data.getvalue())
+                break
+        
+        return False
+    
+    def DeleteEntry(self, param, section = None):
+        if section is None:
+            section = self.section
+               
+        try:
+            return self.remove_option(section, param)
+        except:
+            return False
+        
+    def DeleteGroup(self, section = None):
+        if section is None:
+            section = self.section
+
+        try:
+            return self.remove_section(section)
+        except:
+            return False
+        
+    def Flush(self):        
+        self.write(open(self.filename, "w"))
+
+    def _read(self, fp, fpname):
+        cursect = None                            # None, or a dictionary
+        optname = None
+        lineno = 0
+        e = None                                  # None, or an exception
+        firstline = True            
+        while True:
+            line = fp.readline()
+            if not line:
+                break
+            lineno = lineno + 1
+            if firstline:
+                # Skip BOM
+                if line[:3] == '\xef\xbb\xbf':
+                    line = line[3:]
+                    self.encoding = 'utf_8'
+                else:
+                    self.encoding = sys.getfilesystemencoding()
+                firstline = False
+            # comment or blank line?
+            if line.strip() == '' or line[0] in '#;':
+                continue
+            if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+                # no leading whitespace
+                continue
+            # continuation line?
+            if line[0].isspace() and cursect is not None and optname:
+                value = line.strip()
+                if value:
+                    cursect[optname] = "%s\n%s" % (cursect[optname], value.decode(self.encoding))
+            # a section header or option header?
+            else:
+                # is it a section header?
+                mo = self.SECTCRE.match(line)
+                if mo:
+                    sectname = mo.group('header')
+                    if sectname in self._sections:
+                        cursect = self._sections[sectname]
+                    elif sectname == DEFAULTSECT:
+                        cursect = self._defaults
+                    else:
+                        cursect = {'__name__': sectname}
+                        self._sections[sectname] = cursect
+                    # So sections can't start with a continuation line
+                    optname = None
+                # no section header in the file?
+                elif cursect is None:
+                    raise MissingSectionHeaderError(fpname, lineno, line)
+                # an option line?
+                else:
+                    mo = self.OPTCRE.match(line)
+                    if mo:
+                        optname, vi, optval = mo.group('option', 'vi', 'value')
+                        if vi in ('=', ':') and ';' in optval:
+                            # ';' is a comment delimiter only if it follows
+                            # a spacing character
+                            pos = optval.find(';')
+                            if pos != -1 and optval[pos-1].isspace():
+                                optval = optval[:pos]
+                        optval = optval.strip()
+                        # allow empty values
+                        if optval == '""':
+                            optval = ''
+                        optname = self.optionxform(optname.rstrip())
+                        try:
+                            _opt = optval.decode(self.encoding)
+                        except UnicodeDecodeError:
+                            self.encoding = sys.getfilesystemencoding()
+                            _opt = optval.decode(self.encoding)
+                        cursect[optname] = _opt
+                    else:
+                        # a non-fatal parsing error occurred.  set up the
+                        # exception but keep going. the exception will be
+                        # raised at the end of the file and will contain a
+                        # list of all bogus lines
+                        if not e:
+                            e = ParsingError(fpname)
+                        e.append(lineno, repr(line))
+        # if any parsing errors occurred, raise an exception
+        if e:
+            raise e
+        
+    def write(self, fp):
+        fp.writelines('\xef\xbb\xbf')
+        if self._defaults:
+            fp.write("[%s]\n" % DEFAULTSECT)
+            for (key, value) in self._defaults.items():
+                if type(value) is not str and type(value) is not unicode:
+                    value = str(value)
+                fp.write((key + " = " + value + "\n").encode('utf_8'))
+            fp.write("\n")
+        for section in self._sections:
+            fp.write("[%s]\n" % section)
+            for (key, value) in self._sections[section].items():
+                if key != "__name__":
+                    if type(value) is not str and type(value) is not unicode:
+                        value = str(value)
+                    try:
+                        fp.write((key + " = " + value + "\n").encode('utf_8'))
+                    # for unicode bencod-list items (already UTF-8 encoded)
+                    except UnicodeDecodeError:
+                        fp.write((key + " = " + value + "\n"))
+            fp.write("\n")
+
+
+
+
+
+
diff --git a/instrumentation/next-share/BaseLib/Video/Buttons.py b/instrumentation/next-share/BaseLib/Video/Buttons.py
new file mode 100644 (file)
index 0000000..9703c1a
--- /dev/null
@@ -0,0 +1,316 @@
+# Written by Jelle Roozenburg, Maarten ten Brinke 
+# see LICENSE.txt for license information
+import wx, os, sys
+from traceback import print_exc
+
+DEBUG = False
+
+class PlayerButton(wx.Panel):
+    """
+    Button that changes the image shown if you move your mouse over it.
+    It redraws the background of the parent Panel, if this is an imagepanel with
+    a variable self.bitmap.
+    """
+
+    def __init__(self, parent, imagedir, imagename):
+        self.imagedir = imagedir
+        self.imagename = imagename
+        self.parent = parent
+        self.init()
+        
+    def init(self):
+        self.initDone = False
+        self.enabled = True
+        self.backgroundColor = wx.WHITE
+        wx.Panel.__init__(self, self.parent) 
+        self.selected = False
+        self.tooltip = None
+        self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction)
+        
+        
+        self.searchBitmaps()
+        self.createBackgroundImage()
+        
+        #<mluc> on mac, the button doesn't get a size
+        #if self.bitmaps[0] and self.GetSize()==(0,0):
+        if self.bitmaps[0]:
+            self.SetSize(self.bitmaps[0].GetSize())
+#        print self.Name
+#        print 'size'
+#        print self.Size
+        
+        
+        self.initDone = True
+        self.Refresh(True)
+        self.Update()
+
+
+    def GetImageName(self):
+        return self.imagename
+        
+        
+    def searchBitmaps(self):
+        self.bitmaps = [None, None ,None]
+        self.parentBitmap = None
+        self.mouseOver = False
+                
+        if not os.path.isdir(self.imagedir):
+            print 'Error: no image directory found in %s' % self.imagedir
+            return
+        
+        # find a file with same name as this panel
+        self.bitmapPath = [os.path.join(self.imagedir, self.imagename+'.png'), 
+                           os.path.join(self.imagedir, self.imagename+'_hover.png'),
+                           os.path.join(self.imagedir, self.imagename+'_dis.png')]
+        
+        i = 0
+        for img in self.bitmapPath:
+            if os.path.isfile(img):
+                self.bitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY)
+                i+=1
+            elif DEBUG:
+                print 'Could not find image: %s' % img
+         
+           
+        
+        
+    def createBackgroundImage(self):
+        if self.bitmaps[0]:
+            wx.EVT_PAINT(self, self.OnPaint)
+            self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
+                
+    
+    def OnErase(self, event):
+        pass
+        #event.Skip()
+        
+    def setSelected(self, sel):
+        self.selected = sel
+        self.Refresh()
+        
+    def isSelected(self):
+        return self.selected
+        
+    def mouseAction(self, event):
+        event.Skip()
+        if event.Entering():
+            #print 'enter' 
+            self.mouseOver = True
+            self.Refresh()
+        elif event.Leaving():
+            self.mouseOver = False
+            #print 'leave'
+            self.Refresh()
+
+
+    def getParentBitmap(self):
+        try:
+            parent = self.GetParent()
+            bitmap = parent.bitmap
+            #print bitmap
+        except:
+            return None
+        
+        if bitmap:
+            location = self.GetPosition()
+            #location[0] -= parent.GetPosition()[0]
+            #location[1] -= parent.GetPosition()[1]
+            #if DEBUG:
+            #    print '(button %s) Mypos: %s, Parentpos: %s' % (self.GetName(), self.GetPosition(), parent.GetPosition())
+            rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]]
+            #if DEBUG:
+            #    print '(button %s) Slicing rect(%d,%d) size(%s) from parent image size(%s)' % (self.GetName(), location[0], location[1], str(self.GetClientSize()), str(bitmap.GetSize()))
+            bitmap = self.getBitmapSlice(bitmap, rect)
+            return bitmap
+        else:
+            return None
+    
+    def joinImage(self, im1,im2,offsetx=0,offsety=0):
+        "Draw im2 on im1"
+        stopx = im2.GetWidth()
+        if stopx > (im1.GetWidth()-offsetx):
+            stopx = im1.GetWidth()-offsetx
+        stopy = im2.GetHeight()
+        if stopy > (im1.GetHeight()-offsety):
+            stopy = im1.GetHeight()-offsety
+        if stopx>0 and stopy>0:
+            for x in range(0,stopx):
+                for y in range(0,stopy):
+                    rgb2 = (im2.GetRed(x,y),im2.GetGreen(x,y),im2.GetBlue(x,y))
+                    if rgb2 !=(255,0,255):
+                        im1.SetRGB(x+offsetx,y+offsety,rgb2[0],rgb2[1],rgb2[2])
+        return im1
+    def getBitmapSlice(self, bitmap, rect):
+        try:
+            #print rect
+            bitmapSize = bitmap.GetSize()
+            rect[0] %= bitmapSize[0]
+            rect[1] %= bitmapSize[1]
+            rects = [rect]
+            if rect[0]+rect[2] > bitmapSize[0]:
+                rect1 = (rect[0], rect[1], bitmapSize[0]-rect[0], rect[3])
+                rect2 = (0, rect[1], rect[0]+rect[2] - bitmapSize[0], rect[3])
+                rects = [rect1, rect2]
+            if rect[1]+ rect[3] > bitmapSize[1]:
+                rects2 = []
+                for r in rects:
+                    r1 = (r[0], r[1], r[2], bitmapSize[1] - r[3])
+                    r2 = (r[0], 0, r[2], r[1]+r[3] - bitmapSize[1])
+                    rects2.append(r1)
+                    rects2.append(r2)
+                rects = rects2
+            images = []
+            if len(rects) > 1:
+                if DEBUG:
+                    print "(button %s) Result: %s" % (self.GetName(), rects)
+                image = wx.EmptyImage(rect[2], rect[3])
+                for r in rects:    
+                    rect = wx.Rect(r[0], r[1], r[2], r[3])
+                    if DEBUG:
+                        print '(button %s) Trying to get rect: %s from bitmap: %s' % (self.GetName(), rect, bitmap.GetSize())
+                    subBitmap = bitmap.GetSubBitmap(rect)
+                    subImage = subBitmap.ConvertToImage()
+                    if len(rects) == 2:
+                        if r == rects[0]:
+                            place = (0,0)
+                        elif r == rects[1]:
+                            place = (rects[0][2], 0)
+                    elif len(rects) == 4:
+                        if r == rects[0]:
+                            place = (0,0)
+                        elif r == rects[1]:
+                            place = (0, rects[0][3])
+                        elif r == rects[2]:
+                            place = (rects[0][2],0)
+                        elif r == rects[3]:
+                            place = (rects[0][2], rects[0][3])
+                    if DEBUG:
+                        print "(button %s) Place subbitmap: %s" % (self.GetName(), str(place))
+                    self.joinImage(image, subImage, place[0], place[1])
+                if DEBUG:
+                    print '(button %s) Result img size: %s' % (self.GetName(), str(image.GetSize()))
+                return image.ConvertToBitmap()
+            else:
+                return bitmap.GetSubBitmap(wx.Rect(rect[0], rect[1], rect[2], rect[3]))
+        except:
+            if DEBUG:
+                print_exc()
+            return None
+                                            
+    def setEnabled(self, e):
+        self.enabled = e
+        if not e:
+            self.SetToolTipString('')
+#        else:
+#            if self.tooltip:
+#                self.SetToolTipString(self.tooltip)
+        self.Refresh()
+        
+    def isEnabled(self):
+        return self.enabled
+    
+    def setBackground(self, wxColor):
+        self.backgroundColor = wxColor
+        self.Refresh()
+        
+    def OnPaint(self, evt):
+        dc = wx.BufferedPaintDC(self)
+        dc.SetBackground(wx.Brush(self.backgroundColor))
+        dc.Clear()
+        
+        if self.parentBitmap:
+            dc.DrawBitmap(self.parentBitmap, 0,0, True)
+        else:
+            self.parentBitmap = self.getParentBitmap()
+            if self.parentBitmap:
+                dc.DrawBitmap(self.parentBitmap, 0,0, True)
+        
+        if not self.enabled:
+            return
+
+
+        if self.selected == 2:
+            dc.DrawBitmap(self.bitmaps[2], 0,0, True)
+            return
+
+
+        if self.bitmaps[0]:
+            dc.DrawBitmap(self.bitmaps[0], 0,0, True)
+        if (self.mouseOver or self.selected) and self.bitmaps[1]:
+            dc.DrawBitmap(self.bitmaps[1], 0,0, True)
+        
+
+class PlayerSwitchButton(PlayerButton):
+        
+    def __init__(self, parent, imagedir, imagename, imagename2):
+        self.imagedir = imagedir
+        self.imagename = imagename
+        self.imagename2 = imagename2
+        self.parent = parent
+        self.init()
+        
+    def searchBitmaps(self):
+        self.toggled = False
+        self.allBitmaps = [None, None, None, None, None, None]
+        self.parentBitmap = None
+        self.mouseOver = False
+                
+                    
+        if not os.path.isdir(self.imagedir):
+            print >>sys.stderr,'PlayerSwitchButton: Error: no image directory found in',self.imagedir
+            return
+        
+        # find a file with same name as this panel
+        self.bitmapPath = [os.path.join(self.imagedir, self.imagename+'.png'), 
+                           os.path.join(self.imagedir, self.imagename+'_hover.png'),
+                           os.path.join(self.imagedir, self.imagename+'_dis.png'),
+                           os.path.join(self.imagedir, self.imagename2+'.png'), 
+                           os.path.join(self.imagedir, self.imagename2+'_hover.png'),
+                           os.path.join(self.imagedir, self.imagename2+'_dis.png')
+                           ]
+        
+        i = 0
+        for img in self.bitmapPath:
+            if os.path.isfile(img):
+                self.allBitmaps[i] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY)
+                i+=1
+            elif DEBUG:
+                print 'Could not find image: %s' % img
+                
+
+        if self.toggled:
+            self.bitmaps = self.allBitmaps[3:]
+        else:
+            self.bitmaps = self.allBitmaps[:3]
+                
+    def setToggled(self, b, tooltip = { "enabled": "", "disabled": ""}):
+        self.toggled = b
+
+        if not self.initDone:
+            return
+
+        if b:
+            self.bitmaps=self.allBitmaps[3:]
+            if self.enabled:
+                self.SetToolTipString(tooltip["enabled"])
+        else:
+            self.bitmaps=self.allBitmaps[:3]
+            if self.enabled:
+                self.SetToolTipString(tooltip["disabled"])
+            
+        #print 'Bitmaps is now: %s' % self.bitmaps
+        #should Refresh?
+        self.Refresh()
+        
+    def isToggled(self):
+        return self.toggled
+
+
+
+##class VolumeButton(PlayerButton):
+
+
+
+
+    
diff --git a/instrumentation/next-share/BaseLib/Video/CachingStream.py b/instrumentation/next-share/BaseLib/Video/CachingStream.py
new file mode 100644 (file)
index 0000000..245140c
--- /dev/null
@@ -0,0 +1,109 @@
+# Written by Arno Bakker\r
+# see LICENSE.txt for license information\r
+#\r
+\r
+\r
+import sys\r
+\r
+DEBUG = False\r
+\r
+class SmartCachingStream:\r
+    """ Class that adds buffering to a seekable stream, such that reads after\r
+    seeks that stay in the bounds of the buffer are handled from the buffer,\r
+    instead of doing seeks and reads on the underlying stream.\r
+    \r
+    Currently specifically tuned to input streams as returned by Core.\r
+    """\r
+    def __init__(self,inputstream,blocksize=1024*1024):\r
+        print >>sys.stderr,"CachingStream: __init__"\r
+        self.instream = inputstream\r
+        self.inblocksize = blocksize \r
+        self.inpos = 0\r
+        \r
+        self.buffer = None\r
+        self.bufstart = None\r
+        self.bufend = None\r
+        self.bufpos = 0\r
+\r
+    def read(self,nwant=None):\r
+        if DEBUG:\r
+            print >>sys.stderr,"read: ",nwant\r
+            print >>sys.stderr,"bufpos",self.bufpos,"inpos",self.inpos,"bufs",self.bufstart,"bufe",self.bufend\r
+        \r
+        if self.buffer is None:\r
+            self.read_new(nwant)\r
+            \r
+            return self.read_buf(nwant)\r
+        else:\r
+            if self.bufstart <= self.bufpos and self.bufpos < self.bufend:\r
+                # Reading from current buffer:\r
+                return self.read_buf(nwant)\r
+            else:\r
+                # Current buffer inadequate\r
+                assert self.bufpos == self.inpos\r
+                self.read_new(nwant)\r
+            \r
+                return self.read_buf(nwant)\r
+        \r
+        \r
+    def seek(self,offset,whence=0):\r
+        if DEBUG:\r
+            print >>sys.stderr,"seek: ",offset\r
+        if self.buffer is not None:\r
+            if self.bufstart <= offset and offset < self.bufend:\r
+                # Seeking within current buffer\r
+                self.bufpos = offset\r
+                return\r
+            else:\r
+                # No, get rid off buffer\r
+                self.buffer = None\r
+                self.bufstart = None\r
+                self.bufend = None\r
+                \r
+        self.instream.seek(offset,whence)\r
+        self.inpos = offset\r
+        self.bufpos = offset\r
+        \r
+        \r
+    def read_new(self,nwant):\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_new: ",nwant\r
+        avail = self.inblocksize\r
+        \r
+        # Core specific: we only return a single piece on each read, so\r
+        # to make the buffer larger we just read 4 times.\r
+        #\r
+        buffer1 = self.instream.read(avail)\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_new: 1got",len(buffer1)\r
+        buffer2 = self.instream.read(avail)\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_new: 2got",len(buffer2)\r
+        buffer3 = self.instream.read(avail)\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_new: 3got",len(buffer3)\r
+        buffer4 = self.instream.read(avail)\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_new: 4got",len(buffer4)\r
+\r
+        self.buffer = buffer1 + buffer2 + buffer3 + buffer4\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_new: got",len(self.buffer)\r
+        self.bufstart =  self.inpos\r
+        self.inpos += len(self.buffer)\r
+        self.bufend = self.inpos\r
+        \r
+\r
+    def read_buf(self,nwant):\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_buf: ",nwant\r
+        ngot = min(nwant,self.bufend-self.bufpos)\r
+        bufoff = self.bufpos-self.bufstart\r
+        ret = self.buffer[bufoff:bufoff+ngot]\r
+        # TODO: opt if buffer == pos+nwant\r
+        self.bufpos += ngot\r
+        if DEBUG:\r
+            print >>sys.stderr,"read_buf: ngot",ngot,"returned",len(ret)\r
+        return ret\r
+            \r
+\r
diff --git a/instrumentation/next-share/BaseLib/Video/Images/4framebackground.png b/instrumentation/next-share/BaseLib/Video/Images/4framebackground.png
new file mode 100644 (file)
index 0000000..50075ad
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/4framebackground.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/4framesliderDot.png b/instrumentation/next-share/BaseLib/Video/Images/4framesliderDot.png
new file mode 100644 (file)
index 0000000..cf77466
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/4framesliderDot.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/4framesliderDot_dis.png b/instrumentation/next-share/BaseLib/Video/Images/4framesliderDot_dis.png
new file mode 100644 (file)
index 0000000..cf77466
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/4framesliderDot_dis.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/4framesliderVolume.png b/instrumentation/next-share/BaseLib/Video/Images/4framesliderVolume.png
new file mode 100644 (file)
index 0000000..3da46e0
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/4framesliderVolume.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/background.png b/instrumentation/next-share/BaseLib/Video/Images/background.png
new file mode 100644 (file)
index 0000000..d83e8d6
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/background.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/bl.png b/instrumentation/next-share/BaseLib/Video/Images/bl.png
new file mode 100644 (file)
index 0000000..41ed260
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/bl.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/br.png b/instrumentation/next-share/BaseLib/Video/Images/br.png
new file mode 100644 (file)
index 0000000..3ddbed6
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/br.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/fullScreen-hover.png b/instrumentation/next-share/BaseLib/Video/Images/fullScreen-hover.png
new file mode 100644 (file)
index 0000000..3487e43
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/fullScreen-hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/fullScreen.png b/instrumentation/next-share/BaseLib/Video/Images/fullScreen.png
new file mode 100644 (file)
index 0000000..4b54eb4
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/fullScreen.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/fullScreen_dis.png b/instrumentation/next-share/BaseLib/Video/Images/fullScreen_dis.png
new file mode 100644 (file)
index 0000000..fbb04d8
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/fullScreen_dis.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/fullScreen_hover.png b/instrumentation/next-share/BaseLib/Video/Images/fullScreen_hover.png
new file mode 100644 (file)
index 0000000..1ba3f1b
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/fullScreen_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/pause.png b/instrumentation/next-share/BaseLib/Video/Images/pause.png
new file mode 100644 (file)
index 0000000..dcf28a7
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/pause.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/pause_dis.png b/instrumentation/next-share/BaseLib/Video/Images/pause_dis.png
new file mode 100644 (file)
index 0000000..09c12a6
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/pause_dis.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/pause_hover.png b/instrumentation/next-share/BaseLib/Video/Images/pause_hover.png
new file mode 100644 (file)
index 0000000..a7b7481
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/pause_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/play.png b/instrumentation/next-share/BaseLib/Video/Images/play.png
new file mode 100644 (file)
index 0000000..e686c08
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/play.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/play_dis.png b/instrumentation/next-share/BaseLib/Video/Images/play_dis.png
new file mode 100644 (file)
index 0000000..953ebea
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/play_dis.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/play_hover.png b/instrumentation/next-share/BaseLib/Video/Images/play_hover.png
new file mode 100644 (file)
index 0000000..e787670
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/play_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/sliderDot.png b/instrumentation/next-share/BaseLib/Video/Images/sliderDot.png
new file mode 100644 (file)
index 0000000..40e0ce4
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/sliderDot.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/sliderDot_dis.png b/instrumentation/next-share/BaseLib/Video/Images/sliderDot_dis.png
new file mode 100644 (file)
index 0000000..639df01
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/sliderDot_dis.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/sliderDot_hover.png b/instrumentation/next-share/BaseLib/Video/Images/sliderDot_hover.png
new file mode 100644 (file)
index 0000000..755a580
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/sliderDot_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/sliderVolume.png b/instrumentation/next-share/BaseLib/Video/Images/sliderVolume.png
new file mode 100644 (file)
index 0000000..fdc39f5
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/sliderVolume.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/tl.png b/instrumentation/next-share/BaseLib/Video/Images/tl.png
new file mode 100644 (file)
index 0000000..1ddc944
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/tl.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/tr.png b/instrumentation/next-share/BaseLib/Video/Images/tr.png
new file mode 100644 (file)
index 0000000..ca2ea83
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/tr.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol0.png b/instrumentation/next-share/BaseLib/Video/Images/vol0.png
new file mode 100644 (file)
index 0000000..d62e026
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol0.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol0Enabled.png b/instrumentation/next-share/BaseLib/Video/Images/vol0Enabled.png
new file mode 100644 (file)
index 0000000..d62e026
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol0Enabled.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol0Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol0Enabled_clicked.png
new file mode 100644 (file)
index 0000000..d62e026
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol0Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol0_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol0_clicked.png
new file mode 100644 (file)
index 0000000..d62e026
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol0_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol1.png b/instrumentation/next-share/BaseLib/Video/Images/vol1.png
new file mode 100644 (file)
index 0000000..bca8ab1
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol1.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol1Enabled.png b/instrumentation/next-share/BaseLib/Video/Images/vol1Enabled.png
new file mode 100644 (file)
index 0000000..8f8ffcd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol1Enabled.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol1Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol1Enabled_clicked.png
new file mode 100644 (file)
index 0000000..bca8ab1
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol1Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol1_hover.png b/instrumentation/next-share/BaseLib/Video/Images/vol1_hover.png
new file mode 100644 (file)
index 0000000..8f8ffcd
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol1_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol2.png b/instrumentation/next-share/BaseLib/Video/Images/vol2.png
new file mode 100644 (file)
index 0000000..f4a4013
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol2.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol2Enabled.png b/instrumentation/next-share/BaseLib/Video/Images/vol2Enabled.png
new file mode 100644 (file)
index 0000000..904e73a
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol2Enabled.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol2Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol2Enabled_clicked.png
new file mode 100644 (file)
index 0000000..f4a4013
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol2Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol2_hover.png b/instrumentation/next-share/BaseLib/Video/Images/vol2_hover.png
new file mode 100644 (file)
index 0000000..904e73a
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol2_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol3.png b/instrumentation/next-share/BaseLib/Video/Images/vol3.png
new file mode 100644 (file)
index 0000000..fbde14b
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol3.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol3Enabled.png b/instrumentation/next-share/BaseLib/Video/Images/vol3Enabled.png
new file mode 100644 (file)
index 0000000..b090740
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol3Enabled.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol3Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol3Enabled_clicked.png
new file mode 100644 (file)
index 0000000..fbde14b
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol3Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol3_hover.png b/instrumentation/next-share/BaseLib/Video/Images/vol3_hover.png
new file mode 100644 (file)
index 0000000..b090740
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol3_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol4.png b/instrumentation/next-share/BaseLib/Video/Images/vol4.png
new file mode 100644 (file)
index 0000000..c96223c
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol4.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol4Enabled.png b/instrumentation/next-share/BaseLib/Video/Images/vol4Enabled.png
new file mode 100644 (file)
index 0000000..80d99f8
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol4Enabled.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol4Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol4Enabled_clicked.png
new file mode 100644 (file)
index 0000000..c96223c
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol4Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol4_hover.png b/instrumentation/next-share/BaseLib/Video/Images/vol4_hover.png
new file mode 100644 (file)
index 0000000..80d99f8
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol4_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol5.png b/instrumentation/next-share/BaseLib/Video/Images/vol5.png
new file mode 100644 (file)
index 0000000..8d16fab
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol5.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol5Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol5Enabled_clicked.png
new file mode 100644 (file)
index 0000000..c96223c
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol5Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol5_hover.png b/instrumentation/next-share/BaseLib/Video/Images/vol5_hover.png
new file mode 100644 (file)
index 0000000..2196abc
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol5_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol6.png b/instrumentation/next-share/BaseLib/Video/Images/vol6.png
new file mode 100644 (file)
index 0000000..3356eb8
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol6.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol6Enabled_clicked.png b/instrumentation/next-share/BaseLib/Video/Images/vol6Enabled_clicked.png
new file mode 100644 (file)
index 0000000..3356eb8
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol6Enabled_clicked.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Images/vol6_hover.png b/instrumentation/next-share/BaseLib/Video/Images/vol6_hover.png
new file mode 100644 (file)
index 0000000..baab583
Binary files /dev/null and b/instrumentation/next-share/BaseLib/Video/Images/vol6_hover.png differ
diff --git a/instrumentation/next-share/BaseLib/Video/Ogg.py b/instrumentation/next-share/BaseLib/Video/Ogg.py
new file mode 100644 (file)
index 0000000..3a0b936
--- /dev/null
@@ -0,0 +1,303 @@
+# Written by Arno Bakker \r
+# see LICENSE.txt for license information\r
+\r
+""" Utility functions for (live) streams in Ogg container format.\r
+    \r
+    See: http://www.ietf.org/rfc/rfc3533.txt\r
+         http://www.theora.org/doc/Theora.pdf  (Aug 5, 2009)\r
+         http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html (Feb 3, 2010)\r
+         http://flac.sourceforge.net/ogg_mapping.html\r
+""" \r
+\r
+import sys\r
+import os\r
+from cStringIO import StringIO\r
+\r
+DEBUG = False\r
+\r
+\r
+def is_ogg(name):\r
+    return name.endswith('.ogg') or name.endswith('.ogv') or name.endswith('ogm') or name.endswith('oga') or name.endswith('ogx')\r
+\r
+\r
+def ogg_grab_page(input,checkcrc=False):\r
+    """ Read a Ogg Version 0 page.\r
+    @param input  An input stream object.\r
+    @param checkcrc Whether to check the page's CRC or not.\r
+    @return (isheader,header,body) tuples.\r
+    isheader is True if the page is a BOS or comment or setup header page.\r
+    """\r
+    # TODO: make resistant against streams that return less than req. bytes\r
+    # TODO: add identifiers for other codecs to recognize their headers\r
+    capture_pattern = input.read(4)\r
+    stream_structure_version = input.read(1)\r
+    header_type_flag = input.read(1)\r
+    granule_position = input.read(8)\r
+    bitstream_serial_number = input.read(4)\r
+    page_sequence_number = input.read(4)\r
+    CRC_checksum = input.read(4)\r
+    number_page_segments = input.read(1)\r
+    segment_table = input.read(ord(number_page_segments))\r
+    \r
+    header_size = ord(number_page_segments)+27\r
+    segment_size = 0\r
+    for i in range(0,ord(number_page_segments)):\r
+        segment_size += ord(segment_table[i])\r
+    page_size = header_size + segment_size\r
+\r
+    if capture_pattern != "OggS":\r
+        raise ValueError("Header does not start with OggS")\r
+    # TODO: calc CRC\r
+    if page_size > 65307:\r
+        raise ValueError("Page too big")\r
+\r
+    if DEBUG:\r
+        print >>sys.stderr,"ogg: type",ord(header_type_flag)\r
+\r
+    header = capture_pattern+stream_structure_version+header_type_flag+granule_position+bitstream_serial_number+page_sequence_number+CRC_checksum+number_page_segments+segment_table\r
+    body = input.read(page_size - header_size)\r
+\r
+    if checkcrc:\r
+        import binascii\r
+        import socket \r
+\r
+        crcheader = capture_pattern+stream_structure_version+header_type_flag+granule_position+bitstream_serial_number+page_sequence_number+'\x00\x00\x00\x00'+number_page_segments+segment_table\r
+        crcpage = crcheader+body\r
+        \r
+        newcrc = ogg_crc(crcpage) \r
+        newcrcnbo = socket.htonl(newcrc) & 0xffffffff\r
+        newcrcstr = "%08x" % newcrcnbo\r
+        \r
+        oldcrcstr = binascii.hexlify(CRC_checksum)\r
+        if DEBUG:\r
+            print >>sys.stderr,"ogg: CRC exp",oldcrcstr,"got",newcrcstr\r
+        if oldcrcstr != newcrcstr:\r
+            raise ValueError("Page fails CRC check")\r
+    \r
+    # BOS or header page\r
+    header_type = body[0]\r
+    isheader = False\r
+    if header_type == '\x01' or header_type == '\x03' or header_type == '\x05':\r
+        isheader = True\r
+        vorbis_grab_header(StringIO(body))\r
+    elif header_type == '\x80' or header_type == '\x81' or header_type == '\x82':\r
+        isheader = True\r
+        theora_grab_header(StringIO(body))\r
+    elif header_type == '\x7F':\r
+        isheader = True\r
+        flac_grab_header(StringIO(body))\r
+        \r
+    return (isheader,header,body)\r
+    \r
+    \r
+def vorbis_grab_header(input):\r
+    if DEBUG:\r
+        header_type = input.read(1)\r
+        if header_type == '\x01':\r
+            codec = input.read(6)\r
+            print >>sys.stderr,"ogg: Got vorbis ident header",codec\r
+        elif header_type == '\x03':\r
+            print >>sys.stderr,"ogg: Got vorbis comment header"\r
+        elif header_type == '\x05':\r
+            print >>sys.stderr,"ogg: Got vorbis setup header"\r
+        \r
+\r
+def theora_grab_header(input):\r
+    if DEBUG:\r
+        header_type = input.read(1)\r
+        if header_type == '\x80':\r
+            codec = input.read(6)\r
+            print >>sys.stderr,"ogg: Got theora ident header",codec\r
+        elif header_type == '\x81':\r
+            print >>sys.stderr,"ogg: Got theora comment header"\r
+        elif header_type == '\x82':\r
+            print >>sys.stderr,"ogg: Got theora setup header"\r
+\r
+\r
+def flac_grab_header(input):\r
+    if DEBUG:\r
+        header_type = input.read(1)\r
+        if header_type == '\x7f':\r
+            codec = input.read(4)\r
+            print >>sys.stderr,"ogg: Got flac ident header",codec\r
+\r
+\r
+"""\r
+Ogg apparently uses a non-standard CRC, see http://www.xiph.org/ogg/doc/framing.html\r
+The following code is from\r
+    http://mimosa-pudica.net/src/oggcut.py\r
+by y.fujii <y-fujii at mimosa-pudica.net>, public domain\r
+"""\r
+\r
+def makeCRCTable( idx ):\r
+    r = idx << 24\r
+    for i in range( 8 ):\r
+        if r & 0x80000000 != 0:\r
+            r = ((r & 0x7fffffff) << 1) ^ 0x04c11db7\r
+        else:\r
+            r = ((r & 0x7fffffff) << 1)\r
+\r
+    return r\r
+\r
+CRCTable = [ makeCRCTable( i ) for i in range( 256 ) ]\r
+\r
+def ogg_crc( src ):\r
+    crc = 0\r
+    for c in src:\r
+        crc = ((crc & 0xffffff) << 8) ^ CRCTable[(crc >> 24) ^ ord(c)]\r
+    return crc\r
+\r
+# End-of-Fujii code.\r
+\r
+\r
+\r
+\r
+\r
+    \r
+OGGMAGIC_TDEF = 0\r
+OGGMAGIC_FIRSTPAGE = 1\r
+OGGMAGIC_REST_OF_INPUT = 2\r
+    \r
+class OggMagicLiveStream:\r
+    \r
+    def __init__(self,tdef,input):\r
+        \r
+        self.tdef = tdef\r
+        self.input = input\r
+        self.firstpagestream = None\r
+\r
+        self.mode = OGGMAGIC_TDEF\r
+        self.find_first_page()\r
+\r
+    def find_first_page(self):\r
+        # Read max Ogg page size bytes + some, must contain page starter\r
+        nwant = 65307 + 4\r
+        firstpagedata = ''\r
+        while len(firstpagedata) < nwant: # Max Ogg page size\r
+            print >>sys.stderr,"OggMagicLiveStream: Reading first page, avail",self.input.available()\r
+            data = self.input.read(nwant)\r
+            firstpagedata += data\r
+            if len(data) == 0 and len(firstpagedata < nwant):\r
+                raise ValueError("OggMagicLiveStream: Could not get max. page bytes")\r
+            \r
+        self.firstpagestream = StringIO(firstpagedata)\r
+        \r
+        while True:\r
+            char = self.firstpagestream.read(1)\r
+            if len(char) == 0:\r
+                break\r
+            if char == 'O':\r
+                rest = self.firstpagestream.read(3)\r
+                if rest == 'ggS':\r
+                    # Found page boundary\r
+                    print >>sys.stderr,"OggMagicLiveStream: Found page"\r
+                    self.firstpagestream.seek(-4,os.SEEK_CUR)\r
+                    # For real reliability we should parse the page here\r
+                    # and look further if the "OggS" was just video data.\r
+                    # I'm now counting on the Ogg player to do that.\r
+                    # (need better parser than this code to be able to do that)\r
+                    break\r
+                else:\r
+                    self.firstpagestream.seek(-3,os.SEEK_CUR)\r
+                \r
+        if len(char) == 0:\r
+            raise ValueError("OggMagicLiveStream: could not find start-of-page in P2P-stream")\r
+            \r
+    def read(self,numbytes=None):\r
+        """\r
+        When read return:\r
+        1. Ogg header pages from TorrentDef\r
+        3. self.firstpagestream till EOF\r
+        4. self.input till EOF\r
+        """\r
+        #print >>sys.stderr,"OggMagicLiveStream: read",numbytes\r
+        \r
+        if numbytes is None:\r
+            raise ValueError("OggMagicLiveStream: don't support read all")\r
+            \r
+        if self.mode == OGGMAGIC_TDEF:\r
+            data = self.tdef.get_live_ogg_headers()\r
+            if DEBUG:\r
+                print >>sys.stderr,"OggMagicLiveStream: Writing TDEF",len(data)\r
+            if len(data) > numbytes:\r
+                raise ValueError("OggMagicLiveStream: Not implemented, Ogg headers too big, need more code")\r
+            self.mode = OGGMAGIC_FIRSTPAGE\r
+            return data\r
+        elif self.mode == OGGMAGIC_FIRSTPAGE:\r
+            data = self.firstpagestream.read(numbytes)\r
+            if DEBUG:\r
+                print >>sys.stderr,"OggMagicLiveStream: Writing 1st remain",len(data)\r
+            if len(data) == 0:\r
+                self.mode = OGGMAGIC_REST_OF_INPUT\r
+                return self.input.read(numbytes)\r
+            else:\r
+                return data\r
+        elif self.mode == OGGMAGIC_REST_OF_INPUT:\r
+            data = self.input.read(numbytes)\r
+            #print >>sys.stderr,"OggMagicLiveStream: Writing input",len(data)\r
+            return data\r
+            \r
+    def seek(self,offset,whence=None):\r
+        print >>sys.stderr,"OggMagicLiveStream: SEEK CALLED",offset,whence\r
+        if offset == 0:\r
+            if self.mode != OGGMAGIC_TDEF:\r
+                self.mode = OGGMAGIC_TDEF\r
+                self.find_first_page()\r
+        else:\r
+            raise ValueError("OggMagicLiveStream doens't support seeking other than to beginning")\r
+\r
+    def close(self):\r
+        self.input.close()\r
+    \r
+    def available(self):\r
+        return -1\r
+\r
+\r
+    \r
+    \r
+if __name__ == "__main__":\r
+    \r
+    header_pages = []\r
+    f = open("libre.ogg","rb")\r
+    while True:\r
+        (isheader,header,body) = ogg_grab_page(f)\r
+        if not isheader:\r
+            break\r
+        else:\r
+            header_pages.append((header,body))\r
+    f.close()\r
+    \r
+    \r
+    g = open("stroom.ogg","rb")\r
+    while True:\r
+        char = g.read(1)\r
+        if len(char) == 0:\r
+            break\r
+        if char == 'O':\r
+            rest = g.read(3)\r
+            if rest == 'ggS':\r
+                # Found page boundary\r
+                print >>sys.stderr,"Found page"\r
+                g.seek(-4,os.SEEK_CUR)\r
+                (isheader,pheader,pbody) = ogg_grab_page(g)\r
+                break\r
+            else:\r
+                g.seek(-3,os.SEEK_CUR)\r
+            \r
+    if len(char) > 0:\r
+        # Not EOF\r
+        h = open("new.ogg","wb")\r
+        for header,body in header_pages:\r
+            h.write(header)\r
+            h.write(body)\r
+        h.write(pheader)\r
+        h.write(pbody)\r
+        while True:\r
+            data = g.read(65536)\r
+            if len(data) == 0:\r
+                break\r
+            else:\r
+                h.write(data)\r
+        h.close()\r
+    g.close()\r
+    \r
diff --git a/instrumentation/next-share/BaseLib/Video/Progress.py b/instrumentation/next-share/BaseLib/Video/Progress.py
new file mode 100644 (file)
index 0000000..7136f0e
--- /dev/null
@@ -0,0 +1,558 @@
+# Written by Arno Bakker, Jan David Mol
+# see LICENSE.txt for license information
+
+import wx,time
+import sys,os
+
+
+from BaseLib.__init__ import LIBRARYNAME
+
+class BufferInfo:
+    """ Arno: WARNING: the self.tricolore member is read by the MainThread and 
+        written by the network thread. As it is a fixed array with simple values, this
+        concurrency problem is ignored.
+    """
+    NOFILL = " "
+    SOMEFILL = ".-+="
+    ALLFILL = "#"
+
+    def __init__(self,numbuckets=100,full=False):
+        self.numbuckets = numbuckets
+        self.playable = False
+        self.movieselector = None
+        if full == True:
+            self.tricolore = [2] * self.numbuckets
+    
+    def set_numpieces(self,numpieces):
+        self.numpieces = numpieces
+        self.buckets = [0] * self.numbuckets
+        self.tricolore = [0] * self.numbuckets
+        #self.bucketsize = int(ceil(float(self.numpieces) / self.numbuckets))
+        self.bucketsize = float(self.numpieces) / float(self.numbuckets)
+        self.lastbucketsize = self.numpieces - int(float(self.numbuckets-1) * self.bucketsize)
+
+    def complete( self, piece ):
+        bucket = int(float(piece) / self.bucketsize)
+        
+        #print >>sys.stderr,"BUCKET",bucket,"piece",piece,"bucksize",self.bucketsize
+        # If there is a multi-file torrent that has been partially downloaded before we go
+        # to VOD, it can happen that pieces outside the range of the file selected are
+        # reported as complete here.
+        if bucket < 0 or bucket >= self.numbuckets:
+            return
+        
+        self.buckets[bucket] += 1
+
+        fill = self.buckets[bucket]
+        if bucket == self.numbuckets-1:
+            total = self.lastbucketsize
+        else:
+            total = int(self.bucketsize)
+            
+        if fill == 0:
+            colour = 0
+        elif fill >= total:
+            colour = 2
+        else:
+            colour = 1
+
+        self.tricolore[bucket] = colour
+
+    def str( self ):
+        def chr( fill, total ):
+            if fill == 0:
+                return self.NOFILL
+            if fill >= int(total):
+                return self.ALLFILL
+
+            index = int(float(fill*len(self.SOMEFILL))/total)
+            if index >= len(self.SOMEFILL):
+                index = len(self.SOMEFILL)-1
+            return self.SOMEFILL[index]
+
+        chars = [chr( self.buckets[i], self.bucketsize ) for i in xrange(0,self.numbuckets-1)]
+        chars.append( chr( self.buckets[-1], self.lastbucketsize ) )
+        return "".join(chars)
+
+
+    def set_playable(self):
+        self.playable = True
+        
+    def get_playable(self):
+        return self.playable
+
+    def set_movieselector(self,movieselector):
+        self.movieselector = movieselector
+    
+    def get_bitrate(self):
+        if self.movieselector is not None:
+            return self.movieselector.get_bitrate()
+        else:
+            return 0.0
+
+    def get_blocks(self):
+        return self.tricolore
+
+
+class ProgressInf:
+    def __init__(self):
+        self.bufferinfo = BufferInfo()
+        self.callback = None
+        
+    def get_bufferinfo(self):
+        return self.bufferinfo
+
+    def set_callback(self,callback):
+        self.callback = callback
+        
+    def bufferinfo_updated_callback(self):
+        if self.callback is not None:
+            self.callback()
+        
+
+
+class ProgressBar(wx.Control):
+    #def __init__(self, parent, colours = ["#cfcfcf","#d7ffd7","#00ff00"], *args, **kwargs ):
+    #def __init__(self, parent, colours = ["#cfcfcf","#fde72d","#00ff00"], *args, **kwargs ):
+    #def __init__(self, parent, colours = ["#ffffff","#fde72d","#00ff00"], *args, **kwargs ):
+    def __init__(self, parent, colours = ["#ffffff","#92cddf","#006dc0"], *args, **kwargs ): ## "#ffffff","#CBCBCB","#ff3300"
+        self.colours = colours
+        self.pens    = [wx.Pen(c,0) for c in self.colours]
+        self.brushes = [wx.Brush(c) for c in self.colours]
+        self.reset()
+
+        style = wx.NO_BORDER
+        wx.Control.__init__(self, parent, -1, style=style)
+        self.SetMaxSize((-1,6))
+        self.SetMinSize((1,6))
+        self.SetBackgroundColour(wx.WHITE)
+
+        self.Bind(wx.EVT_PAINT, self.OnPaint)
+        self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
+        self.SetSize((60,6))
+
+        self.progressinf = None
+
+    def AcceptsFocus(self):
+        return False
+
+    def OnEraseBackground(self, event):
+        pass # Or None
+    
+    def OnPaint(self, evt):
+        
+        # define condition
+        x,y,maxw,maxh = self.GetClientRect()
+        #dc.DrawRectangle(x,y,)
+        
+        arrowsize = 6
+        arrowspace = 1
+        numrect = float(len(self.blocks))
+
+        # create blocks
+        w = max(1,maxw/numrect)
+        h = maxh
+        
+        width, height = self.GetClientSizeTuple()
+        buffer = wx.EmptyBitmap(width, height)
+        #dc = wx.PaintDC(self)
+        dc = wx.BufferedPaintDC(self, buffer)
+        dc.BeginDrawing()
+        dc.Clear()
+        
+        rectangles = [(x+int(i*w),y,int(w)+1,h) for i in xrange(0,int(numrect))]
+
+        # draw the blocks
+        pens = [self.pens[c] for c in self.blocks]
+        brushes = [self.brushes[c] for c in self.blocks]
+                
+        dc.DrawRectangleList(rectangles,pens,brushes)
+
+        dc.EndDrawing()
+
+    def set_pieces(self, blocks):
+        num = 50 # max number of blocks to show
+        if len(blocks) < num:
+            self.set_blocks([2*int(a) for a in blocks])
+        else:
+            sblocks = [0]*num
+            f = len(blocks)/num
+            for i in xrange(num):
+                part = blocks[int(f*i):int(f*(i+1))]
+                if True in part:
+                    sblocks[i] = 1
+                if not False in part:
+                    sblocks[i] = 2
+            self.set_blocks(sblocks)
+        
+    def set_blocks(self,blocks):
+        """ Called by MainThread """
+        self.blocks = blocks
+        
+    def setNormalPercentage(self, perc):
+        perc = int(perc)
+        self.blocks = ([2]*perc)+([0]* (100-perc))
+
+    def reset(self,colour=0):
+        self.blocks = [colour] * 100
+        
+class ProgressSlider(wx.Panel):
+    
+    def __init__(self, parent, utility, colours = ["#ffffff","#CBCBCB","#ff3300"], imgprefix= '', *args, **kwargs ):
+        self.colours = colours
+        #self.backgroundImage = wx.Image('')
+        self.progress      = 0.0
+        self.videobuffer  = 0.0
+        self.videoPosition = 0
+        self.timeposition = None
+        self.videoLength   = None
+        #wx.Control.__init__(self, parent, -1)
+        wx.Panel.__init__(self, parent, -1)
+        self.SetMaxSize((-1,25))
+        self.SetMinSize((1,25))
+        self.SetBackgroundColour(wx.WHITE)
+        self.utility = utility
+        self.bgImage = wx.Bitmap(os.path.join(self.utility.getPath(), LIBRARYNAME,'Video','Images',imgprefix+'background.png')) ## LIBRARYNAME
+        self.dotImage = wx.Bitmap(os.path.join(self.utility.getPath(), LIBRARYNAME,'Video','Images',imgprefix+'sliderDot.png')) ## LIBRARYNAME
+        self.dotImage_dis = wx.Bitmap(os.path.join(self.utility.getPath(), LIBRARYNAME,'Video','Images',imgprefix+'sliderDot_dis.png')) ## LIBRARYNAME
+        self.sliderPosition = None
+        self.rectHeight = 2
+        self.rectBorderColour = wx.LIGHT_GREY
+        self.textWidth = 70
+        self.margin = 10
+        self.doneColor = "#13bd00" # wx.RED 
+        self.bufferColor = "#0b7100" # wx.GREEN
+        self.sliderWidth = 0
+        self.selected = 2
+        self.range = (0,1)
+        self.dragging = False
+        self.allowDragging = False
+        self.Bind(wx.EVT_PAINT, self.OnPaint)
+        self.Bind(wx.EVT_SIZE, self.OnSize)
+        self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
+        self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
+        #self.SetSize((-1,self.bgImage.GetSize()[1]))
+        
+    def AcceptsFocus(self):
+        return False
+
+    def OnEraseBackground(self, event):
+        pass # Or None
+    
+    def OnSize(self, event):
+        self.Refresh()
+    
+    def OnMouse(self, event):
+        if not self.allowDragging:
+            return
+        
+        pos = event.GetPosition()
+        if event.ButtonDown():
+            if self.onSliderButton(pos):
+                print >> sys.stderr, 'ProgressSlider: Start drag'
+                self.dragging = True
+            elif self.onSlider(pos): # click somewhere on the slider
+                self.setSliderPosition(pos,True)
+        elif event.ButtonUp():
+            if self.dragging:
+                print >> sys.stderr, 'ProgressSlider: End drag'
+                self.setSliderPosition(pos, True)
+            self.dragging = False
+        elif event.Dragging():
+            if self.dragging:
+                self.setSliderPosition(pos, False)
+        elif event.Leaving():
+            if self.dragging:
+                self.setSliderPosition(pos,True)
+                
+    def onSliderButton(self, pos):
+        if not self.sliderPosition:
+            return False
+        x,y = pos
+        bx, by = self.sliderPosition
+        dotSize = self.dotImage.GetSize()
+        return abs(x-bx) < dotSize[0]/2 and abs(y-by)<dotSize[1]/2
+        
+    def onSlider(self, pos):
+        x,y = pos
+        width, height = self.GetClientSizeTuple()
+        return (x > self.margin and x<= self.margin+self.sliderWidth and \
+                abs(y - height/2) < self.rectHeight/2+4)
+        
+    def setSliderPosition(self, pos, ready):
+        x, y = pos
+        tmp_progress = (x-self.margin)/float(self.sliderWidth)
+        self.progress = min(1.0, max(0.0, tmp_progress))
+        self.videoPosition = self
+        self.Refresh()
+        if ready:
+            #theEvent = wx.ScrollEvent(pos=self.progress)
+            #self.GetEventHandler().ProcessEvent(theEvent)
+            #print >> sys.stderr, 'Posted event'
+            print >> sys.stderr, 'ProgressSlider: Set progress to : %f' % self.progress
+            self.sliderChangedAction()
+            
+    def sliderChangedAction(self):
+        self.GetParent().Seek()
+            
+        
+    def setSelected(self, sel):
+        self.selected = sel
+        self.Refresh()
+
+
+                
+        
+    def setBufferFromPieces(self, pieces_complete):
+        if not pieces_complete:
+            self.videobuffer = 0.0
+            return
+        last_buffered_piece = 0
+        while last_buffered_piece<len(pieces_complete) and pieces_complete[last_buffered_piece]:
+            last_buffered_piece+=1
+        if last_buffered_piece == len(pieces_complete)-1:
+            last_buffered_piece += 1
+        
+        self.videobuffer = last_buffered_piece/float(len(pieces_complete)) 
+        #print >> sys.stderr, 'progress: %d/%d pieces continuous buffer (frac %f)' % \
+        #    (last_buffered_piece, len(pieces_complete), self.videobuffer)
+        
+                    
+            
+    def SetValue(self, b):
+        if self.range[0] == self.range[1]:
+            return
+        
+        if not self.dragging:
+            self.progress = max(0.0, min((b - self.range[0]) / float(self.range[1] - self.range[0]), 1.0))
+            self.Refresh()
+        
+    def GetValue(self):
+        print >>sys.stderr, 'ProgressSlider: %f, Range (%f, %f)' % (self.progress, self.range[0], self.range[1])
+        return self.progress * (self.range[1] - self.range[0])+ self.range[0]
+
+    def SetRange(self, a,b):
+        self.range = (a,b)
+    
+    def setVideoBuffer(self, buf):
+        self.videobuffer = buf
+    
+    def SetTimePosition(self, timepos, duration):
+        self.timeposition = timepos
+        self.videoLength = duration
+
+    def ResetTime(self):
+        self.videoLength = None
+        self.timeposition = None
+        self.Refresh()
+
+        
+    def formatTime(self, s):
+        longformat = time.strftime('%d:%H:%M:%S', time.gmtime(s))
+        if longformat.startswith('01:'):
+            longformat = longformat[3:]
+        while longformat.startswith('00:') and len(longformat) > len('00:00'):
+            longformat = longformat[3:]
+        return longformat
+    
+    def OnPaint(self, evt):
+        width, height = self.GetClientSizeTuple()
+        buffer = wx.EmptyBitmap(width, height)
+        #dc = wx.PaintDC(self)
+        dc = wx.BufferedPaintDC(self, buffer)
+        dc.BeginDrawing()
+        dc.Clear()
+        
+        # Draw background
+        bgSize = self.bgImage.GetSize()
+        for i in xrange(width/bgSize[0]+1):
+            dc.DrawBitmap(self.bgImage, i*bgSize[0],0)
+        
+        
+        self.sliderWidth = width-(3*self.margin+self.textWidth)
+        position = self.sliderWidth * self.progress
+        self.sliderPosition = position+self.margin, height/2
+        self.bufferlength = (self.videobuffer-self.progress) * self.sliderWidth
+        self.bufferlength = min(self.bufferlength, self.sliderWidth-position)
+        
+        # Time strings
+        if self.videoLength is not None:
+            durationString = self.formatTime(self.videoLength)
+        else:
+            durationString = '--:--'
+        if self.timeposition is not None:
+            timePositionString = self.formatTime(self.timeposition)
+        else:
+            timePositionString = '--:--'
+        
+        if width > 3*self.margin+self.textWidth:
+            # Draw slider rect
+            dc.SetPen(wx.Pen(self.rectBorderColour, 2))
+            dc.DrawRectangle(self.margin,height/2-self.rectHeight/2, self.sliderWidth, self.rectHeight)
+            # Draw slider rect inside
+            dc.SetPen(wx.Pen(self.doneColor, 0))
+            dc.SetBrush(wx.Brush(self.doneColor))
+            smallRectHeight = self.rectHeight - 2
+            dc.DrawRectangle(self.margin,height/2-smallRectHeight/2, position, smallRectHeight)
+            dc.SetBrush(wx.Brush(self.bufferColor))
+            dc.SetPen(wx.Pen(self.bufferColor, 0))
+            dc.DrawRectangle(position+self.margin,height/2-smallRectHeight/2, self.bufferlength, smallRectHeight)
+            # draw circle
+            dotSize = self.dotImage.GetSize()
+            if self.selected == 2:
+                dc.DrawBitmap(self.dotImage_dis, position+self.margin-dotSize[0]/2, height/2-dotSize[1]/2, True)
+            else:
+                dc.DrawBitmap(self.dotImage, position+self.margin-dotSize[0]/2, height/2-dotSize[1]/2, True)
+        if width > 2*self.margin+self.textWidth:
+            # Draw times
+            font = self.GetFont()
+            font.SetPointSize(8)
+            dc.SetFont(font)
+            dc.DrawText('%s / %s' % (timePositionString, durationString), width-self.margin-self.textWidth, height/2-dc.GetCharHeight()/2)
+
+        dc.EndDrawing()
+
+    def EnableDragging(self):
+        self.allowDragging = True
+        self.setSelected(1)
+        
+    def DisableDragging(self):
+        self.allowDragging = False
+        self.setSelected(2)
+
+class VolumeSlider(wx.Panel):
+    
+    def __init__(self, parent, utility, imgprefix=''):
+        self.progress      = 0.0
+        self.position = 0
+        
+        #wx.Control.__init__(self, parent, -1)
+        wx.Panel.__init__(self, parent, -1)
+        self.SetMaxSize((150,25))
+        self.SetMinSize((150,25))
+        self.SetBackgroundColour(wx.WHITE)
+        self.utility = utility
+        self.bgImage = wx.Bitmap(os.path.join(self.utility.getPath(), LIBRARYNAME,'Video','Images',imgprefix+'background.png')) ## LIBRARYNAME
+        self.dotImage = wx.Bitmap(os.path.join(self.utility.getPath(), LIBRARYNAME,'Video','Images',imgprefix+'sliderVolume.png')) ## LIBRARYNAME
+        self.sliderPosition = None
+        self.rectHeight = 2
+        self.rectBorderColour = wx.LIGHT_GREY
+        self.margin = 10
+        self.cursorsize = [4,19]
+        self.doneColor = wx.BLACK #wx.RED
+        self.sliderWidth = 0
+        self.range = (0,1)
+        self.dragging = False
+        self.Bind(wx.EVT_PAINT, self.OnPaint)
+        self.Bind(wx.EVT_SIZE, self.OnSize)
+        self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
+        self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
+        #self.SetSize((-1,self.bgImage.GetSize()[1]))
+        
+    def AcceptsFocus(self):
+        return False
+
+    def OnEraseBackground(self, event):
+        pass # Or None
+    
+    def OnSize(self, event):
+        self.Refresh()
+    
+    def OnMouse(self, event):
+        pos = event.GetPosition()
+        if event.ButtonDown():
+            if self.onSliderButton(pos):
+                print >> sys.stderr, 'VolumeSlider: Start drag'
+                self.dragging = True
+            elif self.onSlider(pos): # click somewhere on the slider
+                self.setSliderPosition(pos,True)
+        elif event.ButtonUp():
+            if self.dragging:
+                print >> sys.stderr, 'VolumeSlider: End drag'
+                self.setSliderPosition(pos, True)
+            self.dragging = False
+        elif event.Dragging():
+            if self.dragging:
+                self.setSliderPosition(pos, False)
+        elif event.Leaving():
+            if self.dragging:
+                self.setSliderPosition(pos,True)
+                
+    def onSliderButton(self, pos):
+        if not self.sliderPosition:
+            return False
+        x,y = pos
+        bx, by = self.sliderPosition
+        extraGrip = 3 # 3px extra grip on sliderButton
+        return abs(x-bx) < self.cursorsize[0]/2+extraGrip and abs(y-by)<self.cursorsize[1]/2
+        
+    def onSlider(self, pos):
+        x,y = pos
+        width, height = self.GetClientSizeTuple()
+        return (x > self.margin and x<= self.margin+self.sliderWidth and \
+                abs(y - height/2) < self.rectHeight/2+4)
+        
+    def setSliderPosition(self, pos, ready):
+        x, y = pos
+        tmp_progress = (x-self.margin)/float(self.sliderWidth)
+        self.progress = min(1.0, max(0.0, tmp_progress))
+        self.videoPosition = self
+        self.Refresh()
+        if ready:
+            #theEvent = wx.ScrollEvent(pos=self.progress)
+            #self.GetEventHandler().ProcessEvent(theEvent)
+            #print >> sys.stderr, 'Posted event'
+            print >> sys.stderr, 'VolumeSlider: Set progress to : %f' % self.progress
+            self.sliderChangedAction()
+            
+    def sliderChangedAction(self):
+        self.GetParent().SetVolume()
+            
+            
+    def SetValue(self, b):
+        if not self.dragging:
+            self.progress = min((b - self.range[0]) / float(self.range[1] - self.range[0]), 1.0)
+            self.Refresh()
+        
+    def GetValue(self):
+        print >>sys.stderr, 'VolumeSlider: %f, Range (%f, %f)' % (self.progress, self.range[0], self.range[1])
+        return self.progress * (self.range[1] - self.range[0])+ self.range[0]
+
+    def SetRange(self, a,b):
+        self.range = (a,b)
+    
+    def OnPaint(self, evt):
+        width, height = self.GetClientSizeTuple()
+        buffer = wx.EmptyBitmap(width, height)
+        #dc = wx.PaintDC(self)
+        dc = wx.BufferedPaintDC(self, buffer)
+        dc.BeginDrawing()
+        dc.Clear()
+        
+        # Draw background
+        bgSize = self.bgImage.GetSize()
+        for i in xrange(width/bgSize[0]+1):
+            dc.DrawBitmap(self.bgImage, i*bgSize[0],0)
+        
+        
+        self.sliderWidth = width-(2*self.margin)
+        position = self.sliderWidth * self.progress
+        self.sliderPosition = position+self.margin, height/2
+        
+        
+        if width > 2*self.margin:
+            # Draw slider rect
+            dc.SetPen(wx.Pen(self.rectBorderColour, 2))
+            dc.DrawRectangle(self.margin,height/2-self.rectHeight/2, self.sliderWidth, self.rectHeight)
+            # Draw slider rect inside
+            dc.SetPen(wx.Pen(self.doneColor, 0))
+            dc.SetBrush(wx.Brush(self.doneColor))
+            smallRectHeight = self.rectHeight - 2
+            dc.DrawRectangle(self.margin,height/2-smallRectHeight/2, position, smallRectHeight)
+            # draw slider button
+            dotSize = self.dotImage.GetSize()
+            dc.DrawBitmap(self.dotImage, position+self.margin-dotSize[0]/2, height/2-dotSize[1]/2, True)
+        dc.EndDrawing()
+
+        
diff --git a/instrumentation/next-share/BaseLib/Video/VLCWrapper.py b/instrumentation/next-share/BaseLib/Video/VLCWrapper.py
new file mode 100644 (file)
index 0000000..e6ea350
--- /dev/null
@@ -0,0 +1,387 @@
+# Written by Fabian van der Werf and Arno Bakker
+# see LICENSE.txt for license information
+
+import sys
+import os
+import random
+from traceback import print_exc,print_stack
+from threading import currentThread
+
+# vlcstatusmap = {vlc.PlayingStatus:'vlc.PlayingStatus',
+#                 vlc.PauseStatus:'vlc.PauseStatus',
+#                 vlc.InitStatus:'vlc.InitStatus',
+#                 vlc.EndStatus:'vlc.EndStatus',
+#                 vlc.UndefinedStatus:'vlc.UndefinedStatus'}
+
+DEBUG = False
+VLC_MAXVOLUME = 200
+
+def check_threading():
+    if currentThread().getName() != "MainThread":
+        print >>sys.stderr,"VLCWrapper: Thread violation!"
+        print_stack()
+
+class VLCWrapper:
+    """ Wrapper around the MediaControl API, to hide some of its quirks,
+    like the Position() objects, and to hide the VideoRawVLCServer from users.
+    
+    At the moment, we create one instance of this class which is reused
+    each time to create a VLCWindow.
+    """
+
+    def __init__(self,installdir):
+        check_threading()
+        self.installdir = installdir
+        self.window = None
+        self.initialized = False
+
+    def _init_vlc(self):
+        """
+        To avoid a bug on Ubuntu Intrepid and Jaunty that causes the
+        GUI to instantly exit, we need to delay importing vlc and
+        setting the window.
+        """
+        import vlc
+        from BaseLib.Video.VideoServer import VideoRawVLCServer
+
+        # avoid another init
+        self.initialized = True
+
+        self.vlc = vlc
+    
+        #
+        # With VLC 0.9.x came changes to the MediaControl API. In particular,
+        # there is no longer a concept of a playlist. The VLCWrapper can now
+        # deal with both versions of the API.
+        #
+        try:
+            vlc.Instance
+            self.VLC_MEDIACONTROL_API_VERSION = "0.2"
+        except:
+            # print_exc()
+            self.VLC_MEDIACONTROL_API_VERSION = "0.1"
+
+        self.media = self.get_vlc_mediactrl()
+        self.videorawserv = VideoRawVLCServer.getInstance()
+
+        if not self.window is None:
+            self.set_window(self.window)
+
+    def set_window(self,wxwindow):
+        """ Must be called after wxwindow has been realized, such that
+        GetHandle() returns a valid xid. See
+        http://mailman.videolan.org/pipermail/vlc-devel/2006-September/025895.html
+        """
+        if not self.initialized:
+            self.window = wxwindow
+            return
+
+        check_threading()
+        xid = wxwindow.GetHandle()
+        if xid == 0:
+            if DEBUG:
+                print >>sys.stderr,"VLCWrapper: set_window: WARNING: window not yet materialized, XID=0"
+            return
+        
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: set_window, XID=",xid
+            
+        if sys.platform == 'darwin':
+            # pylint: disable-msg=E1101
+            self.media.set_visual_macosx_type(self.vlc.DrawableControlRef)
+            # pylint: enable-msg=E1101
+        self.media.set_visual(xid)
+        
+    
+    def get_vlc_mediactrl(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.1":
+            if sys.platform == 'win32':
+                oldcwd = os.getcwd()
+                os.chdir(os.path.join(self.installdir,'vlc'))
+    
+        # Arno: 2007-05-11: Don't ask me why but without the "--verbose=0" vlc will ignore the key redef.
+        params = ["--verbose=0"]
+        
+        """
+        # To enable logging to file:
+        #[loghandle,logfilename] = mkstemp("vlc-log")
+        #os.close(loghandle)
+        currwd = os.getcwd()
+        logfilename = os.path.join(currwd,"vlc.log")
+        params += [""--extraintf=logger""]
+        params += ["--logfile",logfilename]
+        """
+        
+        params += ["--no-drop-late-frames"] # Arno: 2007-11-19: don't seem to work as expected DEBUG
+        params += ["--no-skip-frames"]
+        params += ["--quiet-synchro"]
+        # JD: avoid "fast catchup" after frame loss by letting VLC have a flexible buffer
+        #params += ["--access-filter","timeshift"]
+        #params += ["--timeshift-force"]
+        # Arno: attempt to improve robustness
+        
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.1":
+            params += ["--http-reconnect"]
+            
+        #if sys.platform == 'win32':
+        #    params += ["--plugin-path", "c:\\build\\mbvlc100\\vlc\\plugins" ]
+
+
+
+
+        # Arno, 2009-03-30: On my Vista Test Machine (no Aero) video playback 
+        # doesn't work with our VLC 0.8.6h. The Direct3D vout is chosen and 
+        # that gives a "Failed to create texture" error. Apparent solution is 
+        # to set vout to vout_directx (opengl and wingdi also work, but former 
+        # doesn't work on all tested content and the latter gives poor output 
+        # quality. On a machine with Aero this unfortunately causes it to
+        # switch the color scheme to Windows Vista Basic :-( Need Aero detection.
+        #
+        if sys.platform == "win32":
+            try:
+                # 5 = XP, 6 = Vista
+                # pylint: disable-msg=E1101
+                if sys.getwindowsversion()[0] == 6:
+                    params += ["--vout","vout_directx"]
+                # pylint: enable-msg=E1101
+            except:
+                print_exc()
+
+        # VLC wiki says: "apply[ing] deinterlacing even if the original is not
+        # interlaced, is a really bad idea."
+        #params += ["--vout-filter","deinterlace"]
+        #params += ["--deinterlace-mode","linear"]
+        #params += ["--demux=ts"]
+        #params += ["--codec=mp4"]
+        #
+        params += ["--no-plugins-cache"]
+        # must come last somehow on Win32
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+            params += ["--key-toggle-fullscreen", "Esc"] 
+        else:
+            params += ["--key-fullscreen", "Esc"]
+        
+        # Arno, 2009-07-22: Not sure whether sys.argv0 gives right dir.
+        if sys.platform == 'darwin':
+            params += ["--plugin-path", "%s/macbinaries/vlc_plugins" % (
+                 # location of plugins: next to tribler.py
+                 os.path.abspath(os.path.dirname(sys.argv[0]))
+                 )]
+
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+            if sys.platform == 'win32':
+                params += ["--plugin-path", os.path.abspath(os.path.join(self.installdir,'vlc','plugins'))]
+
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+            params += ["--no-video-title-show"]
+            params += ["--no-osd"]
+
+        #print >>sys.stderr,"VLCWrapper: get_vlc_mediactrl: params",params
+            
+        media = self.vlc.MediaControl(params)
+
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.1":            
+            if sys.platform == 'win32':
+                os.chdir(oldcwd)
+    
+        return media
+    
+    def load(self,url,streaminfo=None):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        print >>sys.stderr,"VLCWrapper: load:",url,streaminfo
+        
+        #self.media.exit()
+        #self.media = self.get_vlc_mediactrl()
+        
+        if url is None:
+            """
+            To prevent concurrency between the MainThread closing the 
+            video window and the VLC Dummy-X thread making callbacks,
+            the callbacks go to a stable object, the VideoRawVLCServer that
+            persists during the lifetime of the player process.
+            """
+
+            sid = random.randint(0,sys.maxint)
+            self.videorawserv.set_inputstream(streaminfo,sid)
+               
+            if DEBUG:
+                print >>sys.stderr,"VLCWrapper: load: stream",sid,"size",streaminfo['length']
+            length = streaminfo['length']
+            if length is None:
+                length = -1
+            
+            self.media.set_raw_callbacks(self.videorawserv.ReadDataCallback,self.videorawserv.SeekDataCallback,length,sid)
+        else:
+            if DEBUG:
+                print >>sys.stderr,"VLCWrapper: load: calling playlist_add_item"
+            if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+                self.media.set_mrl(url)
+            else:
+                self.media.playlist_add_item(url)
+
+        #print >>sys.stderr,"VLCWrapper: load: after list is",self.media.playlist_get_list()
+
+
+    def start(self,abspos=0):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+                print >>sys.stderr,"VLCWrapper: start: item is",self.media.get_mrl()
+            else:
+                print >>sys.stderr,"VLCWrapper: start: list is",self.media.playlist_get_list()    
+        pos = self.vlc.Position()
+        pos.origin = self.vlc.AbsolutePosition
+        pos.key = self.vlc.MediaTime
+        pos.value = abspos
+        self.media.start(pos)
+
+
+    def stop(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: stop"
+        self.media.stop()
+
+    def pause(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: pause"
+        self.media.pause()
+
+    def resume(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: resume"
+        self.media.resume()
+
+
+    def get_stream_information_status(self):
+        """ Returns the state of VLC. """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading() 
+        return self.media.get_stream_information()["status"]
+
+    def get_stream_information_length(self):
+        """ Returns the length in bytes of current item playing """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading() 
+        return self.media.get_stream_information()["length"]
+
+    def get_media_position(self):
+        """ Returns absolute position in bytes of current item playing """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading() 
+        return self.media.get_media_position(self.vlc.AbsolutePosition, self.vlc.MediaTime).value
+
+    def set_media_position(self, where):
+        """ Arno: For some files set_media_position() doesn't work. Subsequent 
+        get_media_position()s then do not always return the right value.
+        TODO: seek mode
+        """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        pos = self.vlc.Position() 
+        pos.origin = self.vlc.AbsolutePosition
+        pos.key = self.vlc.MediaTime
+        pos.value = where
+        
+        self.media.set_media_position(pos)
+        
+
+    def sound_set_volume(self, frac):
+        """ frac is float 0..1 """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: sound_set_volume"
+        vol = int(frac * VLC_MAXVOLUME)
+        self.media.sound_set_volume(vol)
+
+    def sound_get_volume(self):
+        """ returns a float 0..1 """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        vol = self.media.sound_get_volume()
+        return float(vol) / VLC_MAXVOLUME
+
+    def set_fullscreen(self,b):
+        """ b is Boolean """
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper set_fullscreen"
+        if b:
+            self.media.set_fullscreen(1)
+        else:
+            self.media.set_fullscreen(0)
+
+    def playlist_get_list(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: playlist_get_list"
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+            return [self.media.get_mrl()]
+        else:
+            return self.media.playlist_get_list()
+
+    def playlist_clear(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        if DEBUG:
+            print >>sys.stderr,"VLCWrapper: playlist_clear"
+
+        if self.VLC_MEDIACONTROL_API_VERSION == "0.2":
+            #raise RuntimeError("VLC MediaControlAPI 0.2 doesn't support playlist ops")
+            pass
+        else:
+            self.media.playlist_clear()
+
+    def exit(self):
+        if not self.initialized:
+            self._init_vlc()
+
+        check_threading()
+        """ Use with care, Ivaylo's raw interface seems to have issues with
+        calling this. So we don't
+        """
+        self.media.exit()
+        self.media = None
+        
diff --git a/instrumentation/next-share/BaseLib/Video/VideoFrame.py b/instrumentation/next-share/BaseLib/Video/VideoFrame.py
new file mode 100644 (file)
index 0000000..3a6d109
--- /dev/null
@@ -0,0 +1,56 @@
+# Written by Fabian van der Werf and Arno Bakker\r
+# see LICENSE.txt for license information\r
+\r
+import wx\r
+import sys\r
+import os\r
+\r
+from BaseLib.Video.defs import *\r
+from BaseLib.__init__ import LIBRARYNAME\r
+\r
+DEBUG = False
+
+\r
+class VideoBaseFrame:\r
+\r
+    def show_videoframe(self):\r
+        pass\r
+    \r
+    def hide_videoframe(self):\r
+        pass\r
+\r
+    def get_videopanel(self):\r
+        pass\r
+    \r
+    def delete_videopanel(self):\r
+        pass\r
+\r
+    def OnCloseWindow(self, event = None):\r
+        pass\r
+\r
+    def get_window(self):\r
+        pass\r
+\r
+\r
+# See Tribler/Main/vwxGUI/MainVideoFrame.py for Tribler 5.0\r
+# See Tribler/Player/PlayerVideoFrame.py for the SwarmPlayer / 4.5\r
+\r
+\r
+class DelayTimer(wx.Timer):\r
+    """ vlc.MediaCtrl needs some time to stop after we give it a stop command.\r
+        Wait until it is and then tell it to play the new item\r
+    """\r
+    def __init__(self,embedplay):\r
+        wx.Timer.__init__(self)\r
+        self.embedplay = embedplay\r
+        self.Start(100)\r
+        \r
+    def Notify(self):\r
+        if self.embedplay.GetState() != MEDIASTATE_PLAYING:\r
+            if DEBUG:\r
+                print >>sys.stderr,"embedplay: VLC has stopped playing previous video, starting it on new"\r
+            self.Stop()\r
+            self.embedplay.Play()\r
+        elif DEBUG:\r
+            print >>sys.stderr,"embedplay: VLC is still playing old video"\r
+\r
diff --git a/instrumentation/next-share/BaseLib/Video/VideoPlayer.py b/instrumentation/next-share/BaseLib/Video/VideoPlayer.py
new file mode 100644 (file)
index 0000000..793f3d5
--- /dev/null
@@ -0,0 +1,1008 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+from threading import currentThread
+from traceback import print_exc
+import inspect
+import os
+import re
+import sys
+import urllib
+import urlparse
+import wx
+
+from BaseLib.Video.defs import *
+from BaseLib.Video.VideoServer import VideoHTTPServer,VideoRawVLCServer
+from BaseLib.Video.utils import win32_retrieve_video_play_command,win32_retrieve_playcmd_from_mimetype,quote_program_path,videoextdefaults
+
+from BaseLib.Core.simpledefs import *
+from BaseLib.Core.Utilities.unicode import unicode2str,bin2unicode
+
+from BaseLib.Video.CachingStream import SmartCachingStream
+from BaseLib.Video.Ogg import is_ogg,OggMagicLiveStream
+
+DEBUG = False
+
+if sys.platform == "linux2" or sys.platform == "darwin":
+    USE_VLC_RAW_INTERFACE = False
+else:
+    USE_VLC_RAW_INTERFACE = False # False for Next-Share
+    
+
+class VideoPlayer:
+    
+    __single = None
+    
+    def __init__(self,httpport=6880):
+        if VideoPlayer.__single:
+            raise RuntimeError, "VideoPlayer is singleton"
+        VideoPlayer.__single = self
+        self.videoframe = None
+        self.extprogress = None
+        self.vod_download = None
+        self.playbackmode = None
+        self.preferredplaybackmode = None
+        self.other_downloads = None
+        self.closeextplayercallback = None
+
+        self.videohttpservport = httpport
+        self.videohttpserv = None
+        # Must create the instance here, such that it won't get garbage collected
+        self.videorawserv = VideoRawVLCServer.getInstance()
+
+        self.resume_by_system = 1
+        self.user_download_choice = None
+        
+    def getInstance(*args, **kw):
+        if VideoPlayer.__single is None:
+            VideoPlayer(*args, **kw)
+        return VideoPlayer.__single
+    getInstance = staticmethod(getInstance)
+        
+    def register(self,utility,preferredplaybackmode=None,closeextplayercallback=None):
+        
+        self.utility = utility # TEMPARNO: make sure only used for language strings
+
+        self.preferredplaybackmode = preferredplaybackmode
+        self.determine_playbackmode()
+
+        if self.playbackmode == PLAYBACKMODE_INTERNAL:
+            # The python-vlc bindings. Created only once at the moment,
+            # as using MediaControl.exit() more than once with the raw interface
+            # blows up the GUI.
+            #
+            from BaseLib.Video.VLCWrapper import VLCWrapper
+            self.vlcwrap = VLCWrapper(self.utility.getPath())
+            self.supportedvodevents = [VODEVENT_START,VODEVENT_PAUSE,VODEVENT_RESUME]
+        else:
+            self.vlcwrap = None
+            # Can't pause when external player
+            self.supportedvodevents = [VODEVENT_START]
+            
+        if self.playbackmode != PLAYBACKMODE_INTERNAL or not USE_VLC_RAW_INTERFACE:
+            # Start HTTP server for serving video to external player
+            self.videohttpserv = VideoHTTPServer.getInstance(self.videohttpservport) # create
+            self.videohttpserv.background_serve()
+            self.videohttpserv.register(self.videohttpserver_error_callback,self.videohttpserver_set_status_callback)
+            
+        if closeextplayercallback is not None:
+            self.closeextplayercallback = closeextplayercallback
+
+    def set_other_downloads(self, other_downloads):
+        """A boolean indicating whether there are other downloads running at this time"""
+        self.other_downloads = other_downloads
+
+    def get_vlcwrap(self):
+        return self.vlcwrap
+    
+    def get_supported_vod_events(self):
+        return self.supportedvodevents
+
+    def set_videoframe(self,videoframe):
+        self.videoframe = videoframe
+
+
+    def play_file(self,dest): 
+        """ Play video file from disk """
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Playing file from disk",dest
+
+        (prefix,ext) = os.path.splitext(dest)
+        [mimetype,cmd] = self.get_video_player(ext,dest)
+        
+        if DEBUG:
+            print >>sys.stderr,"videoplay: play_file: cmd is",cmd
+        self.launch_video_player(cmd)
+
+    def play_file_via_httpserv(self,dest):
+        """ Play a file via our internal HTTP server. Needed when the user
+        selected embedded VLC as player and the filename contains Unicode
+        characters.
+        """ 
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Playing file with Unicode filename via HTTP"
+
+        (prefix,ext) = os.path.splitext(dest)
+        videourl = self.create_url(self.videohttpserv,'/'+os.path.basename(prefix+ext))
+        [mimetype,cmd] = self.get_video_player(ext,videourl)
+
+        stream = open(dest,"rb")
+        stats = os.stat(dest)
+        length = stats.st_size
+        streaminfo = {'mimetype':mimetype,'stream':stream,'length':length}
+        self.videohttpserv.set_inputstream(streaminfo)
+        
+        self.launch_video_player(cmd)
+
+
+    def play_url(self,url):
+        """ Play video file from network or disk """
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Playing file from url",url
+        
+        self.determine_playbackmode()
+        
+        t = urlparse.urlsplit(url)
+        dest = t[2]
+        
+        # VLC will play .flv files, but doesn't like the URLs that YouTube uses,
+        # so quote them
+        if self.playbackmode != PLAYBACKMODE_INTERNAL:
+            if sys.platform == 'win32':
+                x = [t[0],t[1],t[2],t[3],t[4]]
+                n = urllib.quote(x[2])
+                if DEBUG:
+                    print >>sys.stderr,"videoplay: play_url: OLD PATH WAS",x[2],"NEW PATH",n
+                x[2] = n
+                n = urllib.quote(x[3])
+                if DEBUG:
+                    print >>sys.stderr,"videoplay: play_url: OLD QUERY WAS",x[3],"NEW PATH",n
+                x[3] = n
+                url = urlparse.urlunsplit(x)
+            elif url[0] != '"' and url[0] != "'":
+                # to prevent shell escape problems
+                # TODO: handle this case in escape_path() that now just covers spaces
+                url = "'"+url+"'" 
+
+        (prefix,ext) = os.path.splitext(dest)
+        [mimetype,cmd] = self.get_video_player(ext,url)
+        
+        if DEBUG:
+            print >>sys.stderr,"videoplay: play_url: cmd is",cmd
+        
+        self.launch_video_player(cmd)
+
+
+    def play_stream(self,streaminfo):
+        if DEBUG:
+            print >>sys.stderr,"videoplay: play_stream"
+
+        self.determine_playbackmode()
+
+        if self.playbackmode == PLAYBACKMODE_INTERNAL:
+            if USE_VLC_RAW_INTERFACE:
+                # Play using direct callbacks from the VLC C-code
+                self.launch_video_player(None,streaminfo=streaminfo)
+            else:
+                # Play via internal HTTP server
+                self.videohttpserv.set_inputstream(streaminfo,'/')
+                url = self.create_url(self.videohttpserv,'/')
+
+                self.launch_video_player(url,streaminfo=streaminfo)
+        else:
+            # External player, play stream via internal HTTP server
+            path = '/'
+            self.videohttpserv.set_inputstream(streaminfo,path)
+            url = self.create_url(self.videohttpserv,path)
+
+            [mimetype,cmd] = self.get_video_player(None,url,mimetype=streaminfo['mimetype'])
+            self.launch_video_player(cmd)
+
+
+    def launch_video_player(self,cmd,streaminfo=None):
+        if self.playbackmode == PLAYBACKMODE_INTERNAL:
+
+            if cmd is not None:
+                # Play URL from network or disk
+                self.videoframe.get_videopanel().Load(cmd,streaminfo=streaminfo)
+            else:
+                # Play using direct callbacks from the VLC C-code
+                self.videoframe.get_videopanel().Load(cmd,streaminfo=streaminfo)
+
+            self.videoframe.show_videoframe()
+            self.videoframe.get_videopanel().StartPlay()
+        else:
+            # Launch an external player
+            # Play URL from network or disk
+            self.exec_video_player(cmd)
+
+
+    def stop_playback(self,reset=False):
+        """ Stop playback in current video window """
+        if self.playbackmode == PLAYBACKMODE_INTERNAL and self.videoframe is not None:
+            self.videoframe.get_videopanel().Stop()
+            if reset:
+                self.videoframe.get_videopanel().Reset()
+        self.set_vod_download(None)
+
+    def show_loading(self):
+        if self.playbackmode == PLAYBACKMODE_INTERNAL and self.videoframe is not None:
+            self.videoframe.get_videopanel().ShowLoading()
+
+    def close(self):
+        """ Stop playback and close current video window """
+        if self.playbackmode == PLAYBACKMODE_INTERNAL and self.videoframe is not None:
+            self.videoframe.hide_videoframe()
+        self.set_vod_download(None)
+
+    def play(self,ds, selectedinfilename=None):
+        """ Used by Tribler Main """
+        self.determine_playbackmode()
+        
+        d = ds.get_download()
+        tdef = d.get_def()
+        videofiles = d.get_dest_files(exts=videoextdefaults)
+        
+        if len(videofiles) == 0:
+            print >>sys.stderr,"videoplay: play: No video files found! Let user select"
+            # Let user choose any file
+            videofiles = d.get_dest_files(exts=None)
+            
+            
+        selectedoutfilename= None
+        if selectedinfilename is None:
+            # User didn't select file to play, select if there is a single, or ask
+            if len(videofiles) > 1:
+                infilenames = []
+                for infilename,diskfilename in videofiles:
+                    infilenames.append(infilename)
+                selectedinfilename = self.ask_user_to_select_video(infilenames)
+                print >> sys.stderr , "selectedinfilename == None" , selectedinfilename , len(selectedinfilename)
+                if selectedinfilename is None:
+                    print >>sys.stderr,"videoplay: play: User selected no video"
+                    return
+                for infilename,diskfilename in videofiles:
+                    if infilename == selectedinfilename:
+                        selectedoutfilename = diskfilename
+            else:
+                selectedinfilename = videofiles[0][0]
+                selectedoutfilename = videofiles[0][1]
+        else:
+            #print >> sys.stderr , "videoplay: play: selectedinfilename not None" , selectedinfilename , len(selectedinfilename)
+            for infilename,diskfilename in videofiles:
+                if infilename == selectedinfilename:
+                    selectedoutfilename = diskfilename
+        if self.videoframe is not None:
+            self.videoframe.get_videopanel().SetLoadingText(selectedinfilename)
+                
+        # 23/02/10 Boudewijn: This Download does not contain the
+        # selectedinfilename in the available files.  It is likely
+        # that this is a multifile torrent and that another file was
+        # previously selected for download.
+        if selectedoutfilename is None:
+            return self.play_vod(ds, selectedinfilename)
+
+        print >> sys.stderr , "videoplay: play: PROGRESS" , ds.get_progress()
+        complete = ds.get_progress() == 1.0 or ds.get_status() == DLSTATUS_SEEDING
+
+        bitrate = tdef.get_bitrate(selectedinfilename)
+        if bitrate is None and not complete:
+            video_analyser_path = self.utility.config.Read('videoanalyserpath')
+            if not os.access(video_analyser_path,os.F_OK):
+                self.onError(self.utility.lang.get('videoanalysernotfound'),video_analyser_path,self.utility.lang.get('videoanalyserwhereset'))
+                return
+
+        # The VLC MediaControl API's playlist_add_item() doesn't accept unicode filenames.
+        # So if the file to play is unicode we play it via HTTP. The alternative is to make
+        # Tribler save the data in non-unicode filenames.
+        #
+        flag = self.playbackmode == PLAYBACKMODE_INTERNAL and not self.is_ascii_filename(selectedoutfilename)
+        
+        if complete:
+            print >> sys.stderr, 'videoplay: play: complete'
+            if flag:
+                self.play_file_via_httpserv(selectedoutfilename)
+            else:
+                self.play_file(selectedoutfilename)
+            
+            self.manage_others_when_playing_from_file(d)
+            # Fake it, to get DL status reporting for right Download
+            self.set_vod_download(d)
+        else:
+            print >> sys.stderr, 'videoplay: play: not complete'
+            self.play_vod(ds,selectedinfilename)
+
+
+    def play_vod(self,ds,infilename):
+        """ Called by GUI thread when clicking "Play ASAP" button """
+
+        d = ds.get_download()
+        tdef = d.get_def()
+        # For multi-file torrent: when the user selects a different file, play that
+        oldselectedfile = None
+        if not tdef.get_live() and ds.is_vod() and tdef.is_multifile_torrent():
+            oldselectedfiles = d.get_selected_files()
+            oldselectedfile = oldselectedfiles[0] # Should be just one
+        
+        # 1. (Re)Start torrent in VOD mode
+        switchfile = (oldselectedfile is not None and oldselectedfile != infilename) 
+
+        print >> sys.stderr, ds.is_vod() , switchfile , tdef.get_live()
+        if not ds.is_vod() or switchfile or tdef.get_live():
+
+            
+            if switchfile:
+                if self.playbackmode == PLAYBACKMODE_INTERNAL:
+                    self.videoframe.get_videopanel().Reset()
+            
+            #[proceed,othertorrentspolicy] = self.warn_user(ds,infilename)
+            proceed = True
+            othertorrentspolicy = OTHERTORRENTS_STOP_RESTART
+            
+            if not proceed:
+                # User bailing out
+                return
+
+            if DEBUG:
+                print >>sys.stderr,"videoplay: play_vod: Enabling VOD on torrent",`d.get_def().get_name()`
+
+            self.manage_other_downloads(othertorrentspolicy,targetd = d)
+
+            # Restart download
+            d.set_video_event_callback(self.sesscb_vod_event_callback)
+            d.set_video_events(self.get_supported_vod_events())
+            if d.get_def().is_multifile_torrent():
+                d.set_selected_files([infilename])
+            print >>sys.stderr,"videoplay: play_vod: Restarting existing Download",`ds.get_download().get_def().get_infohash()`
+            self.set_vod_download(d)
+            d.restart()
+
+    def restart_other_downloads(self, download_state_list):
+        def get_vod_download_status(default):
+            for download_state in download_state_list:
+                if self.vod_download == download_state.get_download():
+                    return download_state.get_status()
+            return default
+
+        if self.resume_by_system:
+            # resume when there is no VOD download
+            if self.vod_download is None:
+                self.resume_by_system += 1
+                if DEBUG: print >> sys.stderr, "VideoPlayer: restart_other_downloads: Resume because vod_download is None", "(%d)" % self.resume_by_system
+
+            # resume when the VOD download is not part of download_state_list
+            elif not self.vod_download in [download_state.get_download() for download_state in download_state_list]:
+                self.resume_by_system += 1
+                if DEBUG:
+                    print >> sys.stderr, "VideoPlayer: restart_other_downloads: Resume because", `self.vod_download.get_def().get_name()`, "not in list", "(%d)" % self.resume_by_system
+                    print >> sys.stderr, "VideoPlayer: list:", `[download_state.get_download().get_def().get_name() for download_state in download_state_list]`
+
+            # resume when the VOD download has finished downloading
+            elif not get_vod_download_status(DLSTATUS_ALLOCATING_DISKSPACE) in (DLSTATUS_ALLOCATING_DISKSPACE, DLSTATUS_WAITING4HASHCHECK, DLSTATUS_HASHCHECKING, DLSTATUS_DOWNLOADING):
+                self.resume_by_system += 1
+                if DEBUG:
+                    print >> sys.stderr, "VideoPlayer: restart_other_downloads: Resume because vod_download_status is inactive", "(%d)" % self.resume_by_system
+                    print >> sys.stderr, "VideoPlayer: status:", dlstatus_strings[get_vod_download_status(DLSTATUS_ALLOCATING_DISKSPACE)]
+
+            # otherwise we do not resume
+            else:
+                self.resume_by_system = 1
+
+            # because of threading issues it is possible that we have
+            # false positives. therefore we will only resume torrents
+            # after we checked 2 times (once every second)
+            if self.resume_by_system > 2:
+                self.resume_by_system = 0
+
+                # sometimes the self.vod_download stays set to a
+                # download class that is no longer downloading
+                self.set_vod_download(None)
+
+                for download_state in download_state_list:
+                    download = download_state.get_download()
+                    torrent_def = download.get_def()
+                    infohash = torrent_def.get_infohash()
+                    
+                    from BaseLib.Main.vwxGUI.UserDownloadChoice import UserDownloadChoice
+                    self.user_download_choice = UserDownloadChoice.get_singleton()
+                    user_state = self.user_download_choice.get_download_state(infohash)
+
+                    # resume a download unless the user explisitly
+                    # stopped the download
+                    if not user_state == "stop":
+                        if DEBUG: print >> sys.stderr, "VideoPlayer: restart_other_downloads: Restarting", `download.get_def().get_name()`
+                        download.set_mode(DLMODE_NORMAL)
+                        download.restart()
+
+    def manage_other_downloads(self,othertorrentspolicy, targetd = None):
+        self.resume_by_system = 1
+        if DEBUG: print >> sys.stderr, "VideoPlayer: manage_other_downloads"
+
+        policy_stop = othertorrentspolicy == OTHERTORRENTS_STOP or \
+                      othertorrentspolicy == OTHERTORRENTS_STOP_RESTART
+
+        for download in self.utility.session.get_downloads():
+            if download.get_def().get_live():
+                # Filter out live torrents, they are always
+                # removed. They stay in myPreferenceDB so can be
+                # restarted.
+                if DEBUG: print >>sys.stderr,"VideoPlayer: manage_other_downloads: Remove live", `download.get_def().get_name()`
+                self.utility.session.remove_download(download)
+
+            elif download == targetd:
+                if DEBUG: print >>sys.stderr,"VideoPlayer: manage_other_downloads: Leave", `download.get_def().get_name()`
+                download.stop()
+                
+            elif policy_stop:
+                if DEBUG: print >>sys.stderr,"VideoPlayer: manage_other_downloads: Stop", `download.get_def().get_name()`
+                download.stop()
+
+            else:
+                if DEBUG: print >>sys.stderr,"VideoPlayer: manage_other_downloads: Ignore", `download.get_def().get_name()`
+
+    def manage_others_when_playing_from_file(self,targetd):
+        """ When playing from file, make sure all other Downloads are no
+        longer in VOD mode, so they won't interrupt the playback.
+        """
+        activetorrents = self.utility.session.get_downloads()
+        for d in activetorrents:
+            if d.get_mode() == DLMODE_VOD:
+                if d.get_def().get_live():
+                    #print >>sys.stderr,"videoplay: manage_when_file_play: Removing live",`d.get_def().get_name()`
+                    self.utility.session.remove_download(d)
+                else:
+                    #print >>sys.stderr,"videoplay: manage_when_file_play: Restarting in NORMAL mode",`d.get_def().get_name()`
+                    d.stop()
+                    d.set_mode(DLMODE_NORMAL)
+                    d.restart()
+
+
+
+
+    def start_and_play(self,tdef,dscfg, selectedinfilename = None):
+        """ Called by GUI thread when Tribler started with live or video torrent on cmdline """
+
+        # ARNO50: > Preview1: TODO: make sure this works better when Download already existed.
+        
+
+        if selectedinfilename == None:
+            if not tdef.get_live():
+                videofiles = tdef.get_files(exts=videoextdefaults)
+                if len(videofiles) == 1:
+                    selectedinfilename = videofiles[0]
+                elif len(videofiles) > 1:
+                    selectedinfilename = self.ask_user_to_select_video(videofiles)
+
+        if selectedinfilename or tdef.get_live():
+            if tdef.is_multifile_torrent():
+                dscfg.set_selected_files([selectedinfilename])
+
+            othertorrentspolicy = OTHERTORRENTS_STOP_RESTART
+            self.manage_other_downloads(othertorrentspolicy,targetd = None)
+
+            # Restart download
+            dscfg.set_video_event_callback(self.sesscb_vod_event_callback)
+            dscfg.set_video_events(self.get_supported_vod_events())
+            print >>sys.stderr,"videoplay: Starting new VOD/live Download",`tdef.get_name()`
+
+            download = self.utility.session.start_download(tdef,dscfg)
+   
+            if self.videoframe is not None:         
+                self.videoframe.get_videopanel().SetLoadingText(selectedinfilename)
+            
+
+            self.set_vod_download(download)
+            return download
+        else:
+            return None
+        
+    
+    def sesscb_vod_event_callback(self,d,event,params):
+        """ Called by the Session when the content of the Download is ready
+         
+        Called by Session thread """
+        
+        print >>sys.stderr,"videoplay: sesscb_vod_event_callback called",currentThread().getName(),"###########################################################"
+        wx.CallAfter(self.gui_vod_event_callback,d,event,params)
+
+    def gui_vod_event_callback(self,d,event,params):
+        """ Also called by SwarmPlayer """
+
+        print >>sys.stderr,"videoplay: gui_vod_event:",event
+        if event == VODEVENT_START:
+            filename = params["filename"]
+            mimetype = params["mimetype"]
+            stream   = params["stream"]
+            length   = params["length"]
+
+            if filename:
+                self.play_file(filename)
+            else:
+                if d.get_def().get_live():
+                    cachestream = stream
+                    blocksize = d.get_def().get_piece_length()
+                else:
+                    piecelen = d.get_def().get_piece_length()
+                    if piecelen > 2 ** 17:
+                        # Arno, 2010-01-21:
+                        # Workaround for streams with really large piece
+                        # sizes. For some content/containers, VLC can do
+                        # GET X-, GET X+10K-, GET X+20K HTTP requests
+                        # and we would answer these by putting megabytes
+                        # into the stream buffer, of which only 10K would be
+                        # used. This kills performance. Hence I add a caching
+                        # stream that tries to resolve answers from its internal
+                        # buffer, before reading the engine's stream.
+                        # This works, but only if the HTTP server doesn't
+                        # read too aggressively, i.e., uses small blocksize.
+                        #
+                        cachestream = SmartCachingStream(stream)
+
+                        blocksize = max(32768,piecelen/8)
+                    else:
+                        cachestream = stream
+                        blocksize = piecelen
+
+                if d.get_def().get_live() and is_ogg(d.get_def().get_name_as_unicode()):
+                    # Live Ogg stream. To support this we need to do
+                    # two things:
+                    # 1. Write Ogg headers (stored in .tstream)
+                    # 2. Find first Ogg page in stream.
+                    cachestream = OggMagicLiveStream(d.get_def(),stream)
+
+                
+                # Estimate duration. Video player (e.g. VLC) often can't tell
+                # when streaming.
+                estduration = None
+                if d.get_def().get_live():
+                    # Set correct Ogg MIME type
+                    if is_ogg(d.get_def().get_name_as_unicode()):
+                        params['mimetype'] = 'application/ogg'
+                else:
+                    file = None
+                    if d.get_def().is_multifile_torrent():
+                        file = d.get_selected_files()[0]
+                    bitrate = d.get_def().get_bitrate(file)
+                    if bitrate is not None:
+                        estduration = float(length) / float(bitrate)
+                    
+                    # Set correct Ogg MIME type
+                    if file is None:
+                        if is_ogg(d.get_def().get_name_as_unicode()):
+                            params['mimetype'] = 'application/ogg'
+                    else:
+                        if is_ogg(file):
+                            params['mimetype'] = 'application/ogg'
+                        
+
+                    
+                streaminfo = {'mimetype':mimetype,'stream':cachestream,'length':length,'blocksize':blocksize,'estduration':estduration}
+                self.play_stream(streaminfo)
+                
+        elif event == VODEVENT_PAUSE:
+            if self.videoframe is not None: 
+                self.videoframe.get_videopanel().PlayPause()
+            self.set_player_status("Buffering...")
+        elif event == VODEVENT_RESUME:
+            if self.videoframe is not None:
+                self.videoframe.get_videopanel().PlayPause()
+            self.set_player_status("")
+
+    def ask_user_to_select_video(self,videofiles):
+        dlg = VideoChooser(self.videoframe.get_window(),self.utility,videofiles,title='Tribler',expl='Select which file to play')
+        result = dlg.ShowModal()
+        if result == wx.ID_OK:
+            index = dlg.getChosenIndex()
+            filename = videofiles[index]
+        else:
+            filename = None
+        dlg.Destroy()
+        return filename
+
+    def is_ascii_filename(self,filename):
+        if isinstance(filename,str):
+            return True
+        try:
+            filename.encode('ascii','strict')
+            return True
+        except:
+            print_exc()
+            return False
+
+    def warn_user(self,ds,infilename):
+        
+        islive = ds.get_download().get_def().get_live()
+        if islive and not self.other_downloads:
+            # If it's the only download and live, don't warn.
+            return
+        
+        dlg = VODWarningDialog(self.videoframe.get_window(),self.utility,ds,infilename,self.other_downloads,islive)
+        result = dlg.ShowModal()
+        othertorrentspolicy = dlg.get_othertorrents_policy()
+        dlg.Destroy()
+        return [result == wx.ID_OK,othertorrentspolicy]
+
+    def create_url(self,videoserver,upath):
+        schemeserv = 'http://127.0.0.1:'+str(videoserver.get_port())
+        asciipath = unicode2str(upath)
+        return schemeserv+urllib.quote(asciipath)
+
+
+
+    def get_video_player(self,ext,videourl,mimetype=None):
+
+        video_player_path = self.utility.config.Read('videoplayerpath')
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Default player is",video_player_path
+
+        if mimetype is None:
+            if sys.platform == 'win32':
+                # TODO: Use Python's mailcap facility on Linux to find player
+                [mimetype,playcmd] = win32_retrieve_video_play_command(ext,videourl)
+                if DEBUG:
+                    print >>sys.stderr,"videoplay: Win32 reg said playcmd is",playcmd
+                    
+            if mimetype is None:
+                if ext == '.avi':
+                    # Arno, 2010-01-08: Hmmm... video/avi is not official registered at IANA
+                    mimetype = 'video/avi'
+                elif ext == '.mpegts' or ext == '.ts':
+                    mimetype = 'video/mp2t'
+                else:
+                    mimetype = 'video/mpeg'
+        else:
+            if sys.platform == 'win32':
+                [mimetype,playcmd] = win32_retrieve_playcmd_from_mimetype(mimetype,videourl)
+
+        if self.playbackmode == PLAYBACKMODE_INTERNAL:
+            if DEBUG:
+                print >>sys.stderr,"videoplay: using internal player"
+            return [mimetype,videourl]
+        elif self.playbackmode == PLAYBACKMODE_EXTERNAL_MIME and sys.platform == 'win32':
+            if playcmd is not None:
+                cmd = 'start /B "TriblerVideo" '+playcmd
+                return [mimetype,cmd]
+
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Defaulting to default player",video_player_path
+        qprogpath = quote_program_path(video_player_path)
+        #print >>sys.stderr,"videoplay: Defaulting to quoted prog",qprogpath
+        if qprogpath is None:
+            return [None,None]
+        qvideourl = self.escape_path(videourl)
+        playcmd = qprogpath+' '+qvideourl
+        if sys.platform == 'win32':
+            cmd = 'start /B "TriblerVideo" '+playcmd
+        elif sys.platform == 'darwin':
+            cmd = 'open -a '+playcmd
+        else:
+            cmd = playcmd
+        if DEBUG:
+            print >>sys.stderr,"videoplay: using external user-defined player by executing ",cmd
+        return [mimetype,cmd]
+
+
+
+    def exec_video_player(self,cmd):
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Command is @"+cmd+"@"
+        # I get a weird problem on Linux. When doing a
+        # os.popen2("vlc /tmp/file.wmv") I get the following error:
+        #[00000259] main interface error: no suitable interface module
+        #[00000001] main private error: interface "(null)" initialization failed
+        #
+        # The only thing that appears to work is
+        # os.system("vlc /tmp/file.wmv")
+        # but that halts Tribler, as it waits for the created shell to
+        # finish. Hmmmm....
+        #
+        try:
+            if sys.platform == 'win32':
+                #os.system(cmd)
+                (self.player_out,self.player_in) = os.popen2( cmd, 'b' )
+            else:
+                (self.player_out,self.player_in) = os.popen2( cmd, 'b' )
+        except Exception, e:
+            print_exc()
+            self.onError(self.utility.lang.get('videoplayerstartfailure'),cmd,str(e.__class__)+':'+str(e))
+
+
+
+    def escape_path(self,path):
+        if path[0] != '"' and path[0] != "'" and path.find(' ') != -1:
+            if sys.platform == 'win32':
+                # Add double quotes
+                path = "\""+path+"\""
+            else:
+                path = "\'"+path+"\'"
+        return path
+
+
+    def onError(self,action,value,errmsg=u''):
+        self.onMessage(wx.ICON_ERROR,action,value,errmsg)
+
+    def onWarning(self,action,value,errmsg=u''):
+        self.onMessage(wx.ICON_INFORMATION,action,value,errmsg)
+
+    def onMessage(self,icon,action,value,errmsg=u''):
+        # Don't use language independence stuff, self.utility may not be
+        # valid.
+        msg = action
+        msg += '\n'
+        msg += value
+        msg += '\n'
+        msg += errmsg
+        msg += '\n'
+        dlg = wx.MessageDialog(None, msg, self.utility.lang.get('videoplayererrortitle'), wx.OK|icon)
+        result = dlg.ShowModal()
+        dlg.Destroy()
+
+    def set_vod_download(self,d):
+        self.vod_download = d
+        
+    def get_vod_download(self):
+        return self.vod_download
+
+    #
+    # Set information about video playback progress that is displayed
+    # to the user.
+    #
+    def set_content_name(self,name):
+        if self.videoframe is not None:
+            self.videoframe.get_videopanel().SetContentName(name)
+
+    def set_content_image(self,wximg):
+        if self.videoframe is not None:
+            self.videoframe.get_videopanel().SetContentImage(wximg)
+
+    def set_player_status(self,msg):
+        if self.videoframe is not None:
+            self.videoframe.get_videopanel().SetPlayerStatus(msg)
+
+    def set_player_status_and_progress(self,msg,pieces_complete):
+        if self.videoframe is not None:
+            self.videoframe.get_videopanel().UpdateStatus(msg,pieces_complete)
+        
+    def set_save_button(self,enable,savebutteneventhandler):
+        if self.playbackmode == PLAYBACKMODE_INTERNAL and self.videoframe is not None:
+            self.videoframe.get_videopanel().EnableSaveButton(enable,savebutteneventhandler)
+
+    def get_state(self):
+        if self.playbackmode == PLAYBACKMODE_INTERNAL and self.videoframe is not None:
+            return self.videoframe.get_videopanel().GetState()
+        else:
+            return MEDIASTATE_PLAYING
+
+    def determine_playbackmode(self):
+        feasible = return_feasible_playback_modes(self.utility.getPath())
+        if self.preferredplaybackmode in feasible:
+            self.playbackmode = self.preferredplaybackmode
+        else:
+            self.playbackmode = feasible[0]
+
+    def get_playbackmode(self):
+        return self.playbackmode
+
+    #def set_preferredplaybackmode(self,mode):
+    #    This is a bit complex: If there is no int. player avail we change
+    #    the VideoFrame to contain some minimal info. Would have to dynamically
+    #    change that back if we allow dynamic switching of video player.
+    #    self.preferredplaybackmode = mode
+
+    #
+    # Internal methods
+    #
+    def videohttpserver_error_callback(self,e,url):
+        """ Called by HTTP serving thread """
+        wx.CallAfter(self.videohttpserver_error_guicallback,e,url)
+        
+    def videohttpserver_error_guicallback(self,e,url):
+        print >>sys.stderr,"videoplay: Video HTTP server reported error",str(e)
+        # if e[0] == ECONNRESET and self.closeextplayercallback is not None:
+        if self.closeextplayercallback is not None:
+            self.closeextplayercallback()
+
+    def videohttpserver_set_status_callback(self,status):
+        """ Called by HTTP serving thread """
+        wx.CallAfter(self.videohttpserver_set_status_guicallback,status)
+
+    def videohttpserver_set_status_guicallback(self,status):
+        self.videoframe.get_videopanel().SetPlayerStatus(status)
+
+
+
+class VideoChooser(wx.Dialog):
+    
+    def __init__(self,parent,utility,filelist,title=None,expl=None):
+        
+        self.utility = utility
+        self.filelist = []
+        
+        # Convert to Unicode for display
+        for file in filelist:
+            u = bin2unicode(file)
+            self.filelist.append(u)
+
+        if DEBUG:
+            print >>sys.stderr,"VideoChooser: filelist",self.filelist
+        
+        style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
+        if title is None:
+            title = self.utility.lang.get('selectvideofiletitle')
+        wx.Dialog.__init__(self,parent,-1,title,style=style)
+        
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        filebox = wx.BoxSizer(wx.VERTICAL)
+        self.file_chooser=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(300, -1), self.filelist)
+        self.file_chooser.SetSelection(0)
+        
+        if expl is None:
+            self.utility.lang.get('selectvideofile')
+        filebox.Add(wx.StaticText(self, -1, expl), 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
+        filebox.Add(self.file_chooser)
+        sizer.Add(filebox, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
+        
+        buttonbox = wx.BoxSizer(wx.HORIZONTAL)
+        okbtn = wx.Button(self, wx.ID_OK, label=self.utility.lang.get('ok'), style = wx.BU_EXACTFIT)
+        buttonbox.Add(okbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
+        cancelbtn = wx.Button(self, wx.ID_CANCEL, label=self.utility.lang.get('cancel'), style = wx.BU_EXACTFIT)
+        buttonbox.Add(cancelbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
+        sizer.Add(buttonbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
+
+        self.SetSizerAndFit(sizer)
+
+    def getChosenIndex(self):        
+        return self.file_chooser.GetSelection()
+
+
+
+class VODWarningDialog(wx.Dialog):
+    
+    def __init__(self, parent, utility, ds, infilename, other_downloads, islive):
+        self.parent = parent
+        self.utility = utility
+
+        style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
+        if islive:
+            title = self.utility.lang.get('livewarntitle')
+        else:
+            title = self.utility.lang.get('vodwarntitle')
+        
+        wx.Dialog.__init__(self,parent,-1,title,style=style)
+
+        if islive:
+            msg = self.utility.lang.get('livewarngeneral')
+        else:
+            msg = self.utility.lang.get('vodwarngeneral')
+        
+        """
+        if bitrate is None:
+            msg += self.utility.lang.get('vodwarnbitrateunknown')
+            msg += self.is_mov_file(videoinfo)
+            msg += self.utility.lang.get('vodwarnconclusionno')
+        elif bitrate > maxuploadrate and maxuploadrate != 0:
+            s = self.utility.lang.get('vodwarnbitrateinsufficient') % (str(bitrate/1024),str(maxuploadrate)+" KB/s")
+            msg += s
+            msg += self.is_mov_file(videoinfo)
+            msg += self.utility.lang.get('vodwarnconclusionno')
+        elif bitrate > maxmeasureduploadrate and maxuploadrate == 0:
+            s = self.utility.lang.get('vodwarnbitrateinsufficientmeasured') % (str(bitrate/1024),str(maxuploadrate)+" KB/s")
+            msg += s
+            msg += self.is_mov_file(videoinfo)
+            msg += self.utility.lang.get('vodwarnconclusionno')
+            
+        else:
+            if maxuploadrate == 0:
+                rate = self.utility.lang.get('unlimited')
+            else:
+                rate = str(maxuploadrate)+" KB/s"
+            s = self.utility.lang.get('vodwarnbitratesufficient') % (str(bitrate/1024),rate)
+            msg += s
+            extra = self.is_mov_file(videoinfo)
+            if extra  == '':
+                msg += self.utility.lang.get('vodwarnconclusionyes')
+            else:
+                msg += extra
+                msg += self.utility.lang.get('vodwarnconclusionno')
+        
+        """
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        text = wx.StaticText(self, -1, msg)
+        text.Wrap(500)
+        sizer.Add(text, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5)
+
+        # 22/08/08 boudewijn: only show the selectbox when there are
+        # torrents that are actively downloading
+        if other_downloads:
+            otherslist = [self.utility.lang.get('vodrestartothertorrents'),
+                          self.utility.lang.get('vodstopothertorrents'),
+                          self.utility.lang.get('vodleaveothertorrents')]
+
+            othersbox = wx.BoxSizer(wx.VERTICAL)
+            self.others_chooser=wx.Choice(self, -1, wx.Point(-1, -1), wx.Size(-1, -1), otherslist)
+            self.others_chooser.SetSelection(OTHERTORRENTS_STOP_RESTART)
+
+            othersbox.Add(wx.StaticText(self, -1, self.utility.lang.get('vodwhataboutothertorrentspolicy')), 1, wx.ALIGN_CENTER_VERTICAL)
+            othersbox.Add(self.others_chooser)
+            sizer.Add(othersbox, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
+        else:
+            self.others_chooser = None
+
+        sizer.Add(wx.StaticText(self, -1, self.utility.lang.get('vodwarnprompt')), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
+        
+
+        buttonbox = wx.BoxSizer(wx.HORIZONTAL)
+        okbtn = wx.Button(self, wx.ID_OK, label=self.utility.lang.get('yes'), style = wx.BU_EXACTFIT)
+        buttonbox.Add(okbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
+        cancelbtn = wx.Button(self, wx.ID_CANCEL, label=self.utility.lang.get('no'), style = wx.BU_EXACTFIT)
+        buttonbox.Add(cancelbtn, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
+        sizer.Add(buttonbox, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
+
+        self.SetSizerAndFit(sizer)
+
+    def get_othertorrents_policy(self):
+        if self.others_chooser:
+            idx = self.others_chooser.GetSelection()
+        else:
+            idx = OTHERTORRENTS_STOP_RESTART
+        if DEBUG:
+            print >>sys.stderr,"videoplay: Other-torrents-policy is",idx
+        return idx
+    
+    def is_mov_file(self,videoinfo):
+        orig = videoinfo['inpath']
+        (prefix,ext) = os.path.splitext(orig)
+        low = ext.lower()
+        if low == '.mov':
+            return self.utility.lang.get('vodwarnmov')
+        else:
+            return ''
+            
+
+def parse_playtime_to_secs(hhmmss):
+    if DEBUG:
+        print >>sys.stderr,"videoplay: Playtime is",hhmmss
+    r = re.compile("([0-9]+):*")
+    occ = r.findall(hhmmss)
+    t = None
+    if len(occ) > 0:
+        if len(occ) == 3:
+            # hours as well
+            t = int(occ[0])*3600 + int(occ[1])*60 + int(occ[2])
+        elif len(occ) == 2:
+            # minutes and seconds
+            t = int(occ[0])*60 + int(occ[1])
+        elif len(occ) == 1:
+            # seconds
+            t = int(occ[0])
+    return t
+
+def return_feasible_playback_modes(syspath):
+    l = []
+    try:
+        import vlc
+
+        if USE_VLC_RAW_INTERFACE:
+            # check if the special raw interface is available
+            # pylint: disable-msg=E1101
+            if not inspect.ismethoddescriptor(vlc.MediaControl.set_raw_callbacks):
+                raise Exception("Incorrect vlc plugin. This does not provide the set_raw_callbacks method")
+            # pylint: enable-msg=E1101
+        vlcpath = os.path.join(syspath,"vlc")
+        if sys.platform == 'win32':
+            if os.path.isdir(vlcpath):
+                l.append(PLAYBACKMODE_INTERNAL)
+        else:
+            l.append(PLAYBACKMODE_INTERNAL)
+    except Exception:
+        print_exc()
+    
+    if sys.platform == 'win32':
+        l.append(PLAYBACKMODE_EXTERNAL_MIME)
+        l.append(PLAYBACKMODE_EXTERNAL_DEFAULT)
+    else:
+        l.append(PLAYBACKMODE_EXTERNAL_DEFAULT)
+    return l
+
diff --git a/instrumentation/next-share/BaseLib/Video/VideoServer.py b/instrumentation/next-share/BaseLib/Video/VideoServer.py
new file mode 100644 (file)
index 0000000..cba9a8c
--- /dev/null
@@ -0,0 +1,611 @@
+# Written by Jan David Mol, Arno Bakker
+# see LICENSE.txt for license information
+#
+
+import sys
+import time
+import socket
+import BaseHTTPServer
+from SocketServer import ThreadingMixIn
+from threading import RLock,Thread,currentThread
+from traceback import print_exc,print_stack
+import string
+from cStringIO import StringIO
+
+import os
+import BaseLib.Core.osutils
+
+# NOTE: DEBUG is set dynamically depending from DEBUGWEBUI and DEBUGCONTENT
+DEBUG = True
+DEBUGCONTENT = True
+DEBUGWEBUI = False
+DEBUGLOCK = False
+DEBUGBASESERV = False
+        
+def bytestr2int(b):
+    if b == "":
+        return None
+    else:
+        return int(b)
+
+
+class AbstractPathMapper:
+    
+    def __init__(self):
+        pass
+    
+    def get(self,path):
+        msg = 'AbstractPathMapper: Unknown path '+path
+        stream = StringIO(msg)
+        streaminfo = {'mimetype':'text/plain','stream':stream,'length':len(msg)}
+        return streaminfo
+
+
+class VideoHTTPServer(ThreadingMixIn,BaseHTTPServer.HTTPServer):
+#class VideoHTTPServer(BaseHTTPServer.HTTPServer):
+    """
+    Arno: not using ThreadingMixIn makes it a single-threaded server.
+    
+    2009-09-08: Previously single or multi didn't matter because there would
+    always just be one request for one HTTP path. Now we started supporting HTTP
+    range queries and that results in parallel requests on the same path
+    (and thus our stream object). The reason there are parallel requests
+    is due to the funky way VLC uses HTTP range queries: It does not request 
+    begin1-end1, begin2-end2, begin2-end2, but begin1- & begin2- &
+    begin3-. That is, it requests almost the whole file everytime, and in
+    parallel too, aborting the earlier connections as it proceeds. 
+    
+    2009-12-05: I now made it Multi-threaded to also handle the NSSA search
+    API requests. The concurrency issue on the p2p streams is handled by
+    adding a lock per stream.
+    """
+    __single = None
+    
+    def __init__(self,port):
+        if VideoHTTPServer.__single:
+            raise RuntimeError, "HTTPServer is Singleton"
+        VideoHTTPServer.__single = self 
+
+        self.port = port
+        BaseHTTPServer.HTTPServer.__init__( self, ("127.0.0.1",self.port), SimpleServer )
+        self.daemon_threads = True
+        self.allow_reuse_address = True
+        #self.request_queue_size = 10
+
+        self.lock = RLock()        
+        
+        self.urlpath2streaminfo = {} # Maps URL to streaminfo
+        self.mappers = [] # List of PathMappers
+        
+        self.errorcallback = None
+        self.statuscallback = None
+        
+    def getInstance(*args, **kw):
+        if VideoHTTPServer.__single is None:
+            VideoHTTPServer(*args, **kw)
+        return VideoHTTPServer.__single
+    getInstance = staticmethod(getInstance)
+    
+    def background_serve( self ):
+        name = "VideoHTTPServerThread-1"
+        self.thread2 = Thread(target=self.serve_forever,name=name)
+        self.thread2.setDaemon(True)
+        self.thread2.start()
+        #thread.start_new_thread( self.serve_forever, () )
+
+    def register(self,errorcallback,statuscallback):
+        self.errorcallback = errorcallback
+        self.statuscallback = statuscallback
+
+    def set_inputstream(self,streaminfo,urlpath):
+        if DEBUGLOCK:
+            print >>sys.stderr,"vs: set_input: lock",urlpath,currentThread().getName()
+        self.lock.acquire()
+        streaminfo['lock'] = RLock()
+        self.urlpath2streaminfo[urlpath] = streaminfo
+        if DEBUGLOCK:
+            print >>sys.stderr,"vs: set_input: unlock",urlpath,currentThread().getName()
+        self.lock.release()
+        
+    def acquire_inputstream(self,urlpath):
+
+        streaminfo = None
+        # First check mappers, without locking, assuming video stream URL paths won't match mappers
+        for mapper in self.mappers:
+            streaminfo = mapper.get(urlpath)
+            # print >>sys.stderr,"videoserv: get_inputstream: Got streaminfo",`streaminfo`,"from",`mapper`
+            if streaminfo is not None and (streaminfo['statuscode'] == 200 or streaminfo['statuscode'] == 301):
+                return streaminfo
+
+        if DEBUGLOCK:
+            print >>sys.stderr,"vs: acq_input: lock",urlpath,currentThread().getName()
+        self.lock.acquire()
+        try:
+            streaminfo = self.urlpath2streaminfo.get(urlpath,None)
+        finally:
+            if DEBUGLOCK:
+                print >>sys.stderr,"vs: acq_input: unlock",urlpath,currentThread().getName()
+            self.lock.release()
+
+        # Grab lock of video stream, such that other threads cannot read from it. Do outside self.lock
+        if streaminfo is not None and 'lock' in streaminfo:
+            if DEBUGLOCK:
+                print >>sys.stderr,"vs: acq_input: stream: lock",urlpath,currentThread().getName()
+            streaminfo['lock'].acquire()
+        return streaminfo
+
+
+    def release_inputstream(self,urlpath):
+        if DEBUGLOCK:
+            print >>sys.stderr,"vs: rel_input: lock",urlpath,currentThread().getName()
+        self.lock.acquire()
+        try:
+            streaminfo = self.urlpath2streaminfo.get(urlpath,None)
+        finally:
+            if DEBUGLOCK:
+                print >>sys.stderr,"vs: rel_input: unlock",urlpath,currentThread().getName()
+            self.lock.release()
+
+        if streaminfo is not None and 'lock' in streaminfo:
+            if DEBUGLOCK:
+                print >>sys.stderr,"vs: rel_input: stream: unlock",urlpath,currentThread().getName()
+            streaminfo['lock'].release()
+
+
+    def del_inputstream(self,urlpath):
+        if DEBUGLOCK:
+            print >>sys.stderr,"vs: del_input: enter",urlpath
+        streaminfo = self.acquire_inputstream(urlpath)
+        
+        if DEBUGLOCK:
+            print >>sys.stderr,"vs: del_input: lock",urlpath,currentThread().getName()
+        self.lock.acquire()
+        try:
+            del self.urlpath2streaminfo[urlpath]
+        finally:
+            if DEBUGLOCK:
+                print >>sys.stderr,"vs: del_input: unlock",urlpath,currentThread().getName()
+            self.lock.release()
+
+        if streaminfo is not None and 'lock' in streaminfo:
+            if DEBUGLOCK:
+                print >>sys.stderr,"vs: del_input: stream: unlock",urlpath,currentThread().getName()
+            streaminfo['lock'].release()
+
+
+    def get_port(self):
+        return self.port
+
+    def add_path_mapper(self,mapper):
+        """ WARNING: mappers cannot be added dynamically, must be registered before background_serve()
+        """
+        self.mappers.append(mapper)
+
+    def shutdown(self):
+        if DEBUG:
+            print >>sys.stderr,"videoserv: Shutting down HTTP"
+        # Stop by closing listening socket of HTTP server
+        self.socket.close()
+
+    
+    def handle_error(self, request, client_address):
+        """ Error inside the BaseHTTPServer that reports errors like:
+          File "c:\Python265\lib\socket.py", line 406, in readline
+            data = self._sock.recv(self._rbufsize)
+          error: [Errno 10053] An established connection was aborted by the software in your host machine
+          
+          As this fill the log when VLC uses HTTP range requests in its brutal
+          way (send offset, close conn, send offset+10K-, close conn), 
+          only print when really wanted.
+        """
+        if DEBUGBASESERV:
+            print >>sys.stderr,"VideoHTTPServer: handle_error",request,client_address
+            print_exc()
+            
+
+
+class SimpleServer(BaseHTTPServer.BaseHTTPRequestHandler):
+
+    RANGE_REQUESTS_ENABLED=True
+    """
+    def __init__(self,request, client_address, server):
+        self.count = 0
+        BaseHTTPServer.BaseHTTPRequestHandler.__init__(self,request,client_address,server)
+    """
+
+    def log_message(self, format, *args):
+        pass
+
+    def do_GET(self):
+        """ 
+        Handle HTTP GET request. See remark about VLC's use of HTTP GET RANGE
+        requests above.
+        
+        Called by a separate thread for each request.
+        """
+        global DEBUG
+        try:
+            if self.path.startswith("/webUI"):
+                DEBUG = DEBUGWEBUI
+            else:
+                DEBUG = DEBUGCONTENT
+            
+            if DEBUG:
+                print >>sys.stderr,"videoserv: do_GET: Got request",self.path,self.headers.getheader('range'),currentThread().getName()
+                #print >>sys.stderr,"videoserv: do_GET: Range",self.headers.getrawheader('Range'),currentThread().getName()
+                
+            # 1. Get streaminfo for the data we should return in response
+            nbytes2send = None
+            nbyteswritten= 0
+            try:
+                streaminfo = self.server.acquire_inputstream(self.path)
+            except:
+                streaminfo = None
+            #print >>sys.stderr,"videoserv: do_GET: Got streaminfo",`streaminfo`
+            
+            # Ric: modified to create a persistent connection in case it's requested (HTML5)
+            if self.request_version == 'HTTP/1.1':
+                self.protocol_version = 'HTTP/1.1'
+                
+            try:
+                if streaminfo is None or ('statuscode' in streaminfo and streaminfo['statuscode'] != 200):
+                    # 2. Send error response
+                    if streaminfo is None:
+                        streaminfo = {'statuscode':500,'statusmsg':"Internal Server Error, couldn't find resource"}
+                    if DEBUG:
+                        print >>sys.stderr,"videoserv: do_GET: Cannot serve request",streaminfo['statuscode'],currentThread().getName()
+                        
+                    self.send_response(streaminfo['statuscode'])
+                    if streaminfo['statuscode'] == 301:
+                        self.send_header("Location", streaminfo['statusmsg'])
+                        self.end_headers()
+                    else:
+                        self.send_header("Content-Type","text/plain")
+                        self.send_header("Content-Length", len(streaminfo['statusmsg']))
+                        self.end_headers()
+                        self.wfile.write(streaminfo['statusmsg'])
+                    return
+                else:
+                    # 2. Prepare to send stream
+                    mimetype = streaminfo['mimetype']
+                    stream = streaminfo['stream']
+                    length = streaminfo['length']
+                    if 'blocksize' in streaminfo:
+                        blocksize = streaminfo['blocksize']
+                    else:
+                        blocksize = 65536
+                    if 'svc' in streaminfo:
+                        # When in SVC mode we return all data that we have 
+                        # currently. Subsequent requests will
+                        # return the next batch of data.
+                        svc = streaminfo['svc']
+                    else:
+                        svc = False
+
+        
+                #mimetype = 'application/x-mms-framed'
+                #mimetype = 'video/H264'
+                if DEBUG:
+                    print >>sys.stderr,"videoserv: do_GET: MIME type is",mimetype,"length",length,"blocksize",blocksize,currentThread().getName()
+    
+                # 3. Support for HTTP range queries: 
+                # http://tools.ietf.org/html/rfc2616#section-14.35
+                firstbyte = 0
+                if length is not None:
+                    lastbyte = length-1
+                else:
+                    lastbyte = None # to avoid print error below
+    
+                range = self.headers.getheader('range')
+                if self.RANGE_REQUESTS_ENABLED and length and range:
+                    # Handle RANGE query
+                    bad = False
+                    type, seek = string.split(range,'=')
+                    if seek.find(",") != -1:
+                        # - Range header contains set, not supported at the moment
+                        bad = True
+                    else:
+                        firstbytestr, lastbytestr = string.split(seek,'-')
+                        firstbyte = bytestr2int(firstbytestr)
+                        lastbyte = bytestr2int(lastbytestr)
+                
+                        if length is None:
+                            # - No length (live) 
+                            bad = True
+                        elif firstbyte is None and lastbyte is None:
+                            # - Invalid input
+                            bad = True
+                        elif firstbyte >= length:
+                            bad = True
+                        elif lastbyte >= length:
+                            if firstbyte is None:
+                                """ If the entity is shorter than the specified 
+                                suffix-length, the entire entity-body is used.
+                                """
+                                lastbyte = length-1
+                            else:
+                                bad = True
+                        
+                    if bad:
+                        # Send 416 - Requested Range not satisfiable and exit
+                        self.send_response(416)
+                        if length is None:
+                            crheader = "bytes */*"
+                        else:
+                            crheader = "bytes */"+str(length)
+                        self.send_header("Content-Range",crheader)
+                        self.end_headers()
+                        
+                        return
+                    
+                    if firstbyte is not None and lastbyte is None:
+                        # "100-" : byte 100 and further
+                        nbytes2send = length - firstbyte
+                        lastbyte = length - 1
+                    elif firstbyte is None and lastbyte is not None:
+                        # "-100" = last 100 bytes
+                        nbytes2send = lastbyte
+                        firstbyte = length - lastbyte
+                        lastbyte = length - 1
+                        
+                    else:
+                        nbytes2send = lastbyte+1 - firstbyte
+            
+                    # Arno, 2010-01-08: Fixed bug, now return /length
+                    crheader = "bytes "+str(firstbyte)+"-"+str(lastbyte)+"/"+str(length)
+            
+                    self.send_response(206)
+                    self.send_header("Content-Range",crheader)
+                else:
+                    # Normal GET request
+                    nbytes2send = length
+                    self.send_response(200)
+            
+            
+                if DEBUG:
+                    print >>sys.stderr,"videoserv: do_GET: final range",firstbyte,lastbyte,nbytes2send,currentThread().getName()
+            
+            
+                # 4. Seek in stream to desired offset, unless svc
+                if not svc:
+                    try:
+                        stream.seek(firstbyte)
+                    except:
+                        # Arno, 2010-10-17: Live will throw harmless exception, 
+                        # Ogg live needs it to reset to "send header" first state.
+                        # Better solution is to have OggMagicStream with
+                        # ControlledStream in BackgroundProcess.py
+                        print_exc()
+        
+                # For persistent connections keep the socket alive!
+                if self.request_version == 'HTTP/1.1':
+                    self.send_header("Connection", "Keep-Alive")
+                    # test.. to be adjusted depending on the request
+                    self.send_header("Keep-Alive", "timeout=15, max=100")
+                    
+                # 5. Send headers
+                self.send_header("Content-Type", mimetype)
+                self.send_header("Accept-Ranges", "bytes")
+
+
+                # Ric: bitrate needs to be detected even if the file is already completed
+                if streaminfo.has_key('bitrate') and length is not None:
+                    bitrate = streaminfo['bitrate']
+                    estduration = float(length) / float(bitrate)
+                    self.send_header("X-Content-Duration", estduration)
+
+                if length is not None:
+                    self.send_header("Content-Length", nbytes2send)
+                else:
+                    self.send_header("Transfer-Encoding", "chunked")
+                self.end_headers()
+    
+    
+                if svc:
+                    # 6. Send body: For SVC we send all we currently have, not blocking.
+                    data = stream.read()
+                    
+                    if len(data) > 0: 
+                        self.wfile.write(data)
+                    elif len(data) == 0:
+                        if DEBUG:
+                            print >>sys.stderr,"videoserv: svc: stream.read() no data" 
+                else:
+                    # 6. Send body (completely, a Range: or an infinite stream in chunked encoding
+                    done = False
+                    while True:
+                        data = stream.read(blocksize)
+                        if len(data) == 0:
+                            done = True
+                        
+                        #print >>sys.stderr,"videoserv: HTTP: read",len(data),"bytes",currentThread().getName()
+                        
+                        if length is None:
+                            # If length unknown, use chunked encoding
+                            # http://www.ietf.org/rfc/rfc2616.txt, $3.6.1 
+                            self.wfile.write("%x\r\n" % (len(data)))
+                        if len(data) > 0:
+                            # Limit output to what was asked on range queries:
+                            if length is not None and nbyteswritten+len(data) > nbytes2send:
+                                endlen = nbytes2send-nbyteswritten
+                                if endlen != 0:
+                                    self.wfile.write(data[:endlen])
+                                done = True
+                                nbyteswritten += endlen
+                            else:
+                                self.wfile.write(data)
+                                nbyteswritten += len(data)
+                            
+                            #print >>sys.stderr,"videoserv: HTTP: wrote total",nbyteswritten
+                            
+                        if length is None:
+                            # If length unknown, use chunked encoding
+                            self.wfile.write("\r\n")
+        
+                        if done:
+                            if DEBUG:
+                                print >>sys.stderr,"videoserv: do_GET: stream reached EOF or range query's send limit",currentThread().getName() 
+                            break
+                        
+                    if nbyteswritten != nbytes2send:
+                        print >>sys.stderr,"videoserv: do_GET: Sent wrong amount, wanted",nbytes2send,"got",nbyteswritten,currentThread().getName()
+
+                    # Arno, 2010-01-08: No close on Range queries
+                    if not range:
+                        stream.close()
+                        if self.server.statuscallback is not None:
+                            self.server.statuscallback("Done")
+                    
+            finally:
+                self.server.release_inputstream(self.path)
+            
+        except socket.error,e2:
+            #if DEBUG:
+            #    print >>sys.stderr,"videoserv: SocketError occured while serving",currentThread().getName()
+            pass
+        except Exception,e:
+            if DEBUG:
+                print >>sys.stderr,"videoserv: Error occured while serving",currentThread().getName()
+            print_exc()
+            self.error(e,self.path)
+
+        
+
+
+    def error(self,e,url):
+        if self.server.errorcallback is not None:
+            self.server.errorcallback(e,url)
+        else:
+            print_exc()
+        if self.server.statuscallback is not None:
+            self.server.statuscallback("Error playing video:"+str(e))
+
+
+class VideoRawVLCServer:
+    __single = None
+    
+    def __init__(self):
+        if VideoRawVLCServer.__single:
+            raise RuntimeError, "VideoRawVLCServer is Singleton"
+        VideoRawVLCServer.__single = self 
+
+        self.lock = RLock()
+        self.oldsid = None
+        self.sid2streaminfo = {}
+        
+        
+        #self.lastsid = None # workaround bug? in raw inf
+        
+    def getInstance(*args, **kw):
+        if VideoRawVLCServer.__single is None:
+            VideoRawVLCServer(*args, **kw)
+        return VideoRawVLCServer.__single
+    getInstance = staticmethod(getInstance)
+    
+    def set_inputstream(self,streaminfo,sid):
+        """ Store a record for stream ID "sid" which may be
+        retrieved by VLC anytime after this call
+        """
+        self.lock.acquire()
+        try:
+            print >>sys.stderr,"VLCRawServer: setting sid",sid
+            self.sid2streaminfo[sid] = streaminfo
+            
+            # workaround
+            # self.lastsid = sid
+        finally:
+            self.lock.release()
+        
+    def get_inputstream(self,sid):
+        """ Get the record for the given stream """
+        # TODO: locking?
+        self.lock.acquire()
+        try:
+            return self.sid2streaminfo[sid]
+        finally:
+            self.lock.release()
+
+    def shutdown(self):
+        pass
+
+    def ReadDataCallback(self, bufc, buflen, sid):
+        try:
+            #print >>sys.stderr,"VideoRawVLCServer:ReadDataCallback: stream",sid,"wants", buflen,"thread",currentThread().getName()
+            # workaround
+            #sid = self.lastsid
+            #print >>sys.stderr,"VideoRawVLCServer:ReadDataCallback: stream override sid",sid
+            
+            if self.oldsid is not None and self.oldsid != sid:
+                # Switched streams, garbage collect old
+                oldstream = self.sid2streaminfo[self.oldsid]['stream']
+                del self.sid2streaminfo[self.oldsid]
+                try:
+                    oldstream.close()
+                except:
+                    print_exc()
+            self.oldsid = sid
+            
+            streaminfo = self.get_inputstream(sid)
+            #print >>sys.stderr,"rawread: sid",sid,"n",buflen
+            data = streaminfo['stream'].read(buflen)
+            size = len(data)
+            #print >>sys.stderr,"rawread: sid",sid,"GOT",size
+            if size == 0:
+                return 0
+            else:
+                bufc[0:size]=data
+            #print >>sys.stderr,"VideoRawVLCServer:ReadDataCallback: bufc size ", len(bufc)
+            
+            return size
+        except:
+            print_exc()
+            return -1
+        
+    def SeekDataCallback(self, pos, sid):
+        try:
+            # WARNING: CURRENT 0.8.6h binaries have bug in vlcglue.c: pos is just a long int , not a long long int.
+            
+            #print >>sys.stderr,"VideoRawVLCServer: SeekDataCallback: stream",sid,"seeking to", pos,"oldsid",self.oldsid
+            # Arno: TODO: add support for seeking
+            if True:
+                streaminfo = self.get_inputstream(sid)
+                streaminfo['stream'].seek(pos,os.SEEK_SET)
+                return 0
+            
+            return -1
+        
+        except:
+            print_exc()
+            return -1
+
+
+
+class MultiHTTPServer(ThreadingMixIn,VideoHTTPServer):
+    """ MuliThreaded HTTP Server """
+
+    __single = None
+    
+    def __init__(self,port):
+        if MultiHTTPServer.__single:
+            raise RuntimeError, "MultiHTTPServer is Singleton"
+        MultiHTTPServer.__single = self 
+
+        self.port = port
+        BaseHTTPServer.HTTPServer.__init__( self, ("127.0.0.1",self.port), SimpleServer )
+        self.daemon_threads = True
+        self.allow_reuse_address = True
+        #self.request_queue_size = 10
+
+        self.lock = RLock()        
+        
+        self.urlpath2streaminfo = {} # Maps URL to streaminfo
+        self.mappers = [] # List of PathMappers
+        
+        self.errorcallback = None
+        self.statuscallback = None
+
+    def background_serve( self ):
+        name = "MultiHTTPServerThread-1"
+        self.thread2 = Thread(target=self.serve_forever,name=name)
+        self.thread2.setDaemon(True)
+        self.thread2.start()
diff --git a/instrumentation/next-share/BaseLib/Video/__init__.py b/instrumentation/next-share/BaseLib/Video/__init__.py
new file mode 100644 (file)
index 0000000..395f8fb
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/Video/defs.py b/instrumentation/next-share/BaseLib/Video/defs.py
new file mode 100644 (file)
index 0000000..08d4195
--- /dev/null
@@ -0,0 +1,16 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+PLAYBACKMODE_INTERNAL = 0
+PLAYBACKMODE_EXTERNAL_DEFAULT = 1
+PLAYBACKMODE_EXTERNAL_MIME = 2
+
+OTHERTORRENTS_STOP_RESTART = 0
+OTHERTORRENTS_STOP = 1
+OTHERTORRENTS_CONTINUE = 2
+
+# Arno: These modes are not what vlc returns, but Fabian's summary of that
+MEDIASTATE_PLAYING = 1
+MEDIASTATE_PAUSED  = 2
+MEDIASTATE_STOPPED = 3
+
diff --git a/instrumentation/next-share/BaseLib/Video/utils.py b/instrumentation/next-share/BaseLib/Video/utils.py
new file mode 100644 (file)
index 0000000..afeb675
--- /dev/null
@@ -0,0 +1,123 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+import os
+import sys
+
+from BaseLib.Core.Utilities.unicode import unicode2str
+if (sys.platform == 'win32'):
+    from BaseLib.Core.Utilities.win32regchecker import Win32RegChecker,HKLM
+
+videoextdefaults = ['aac','asf','avi','dv','divx','flac','flc','flv','mkv','mpeg','mpeg4','mpegts','mpg4','mp3','mp4','mpg','mkv','mov','m4v','ogg','ogm', 'ogv', 'oga', 'ogx','qt', 'rm','swf','ts','vob','wmv','wav', 'webm']
+# Ric: added svc ext. for enhancement layers
+svcextdefaults = ['dat']
+
+DEBUG = False
+
+def win32_retrieve_video_play_command(ext,videourl):
+    """ Use the specified extension of to find the player in the Windows registry to play the url (or file)"""
+    registry = Win32RegChecker()
+    
+    if DEBUG:
+        print >>sys.stderr,"videoplay: Looking for player for",unicode2str(videourl)
+    if ext == '':
+        return [None,None]
+    
+    contenttype = None
+    winfiletype = registry.readRootKey(ext)
+    if DEBUG:
+        print >>sys.stderr,"videoplay: winfiletype is",winfiletype,type(winfiletype)
+    if winfiletype is None or winfiletype == '':
+        # Darn.... Try this: (VLC seems to be the one messing the registry up in the
+        # first place)
+        winfiletype = registry.readRootKey(ext,value_name="VLC.Backup")
+        if winfiletype is None or winfiletype == '':
+            return [None,None]
+        # Get MIME type
+    if DEBUG:
+        print >>sys.stderr,"videoplay: Looking for player for ext",ext,"which is type",winfiletype
+
+    contenttype = registry.readRootKey(ext,value_name="Content Type")
+    
+    playkey = winfiletype+"\shell\play\command"
+    urlopen = registry.readRootKey(playkey)
+    if urlopen is None:
+        openkey = winfiletype+"\shell\open\command"
+        urlopen = registry.readRootKey(openkey)
+        if urlopen is None:
+            return [None,None]
+
+    # Default is e.g. "C:\Program Files\Windows Media Player\wmplayer.exe" /prefetch:7 /Play "%L"
+    # Replace %L
+    suo = urlopen.strip() # spaces
+    idx = suo.find('%L')
+    if idx == -1:
+        # Hrrrr: Quicktime uses %1 instead of %L and doesn't seem to quote the program path
+        idx = suo.find('%1')
+        if idx == -1:
+            return [None,None]
+        else:
+            replace = '%1'
+            idx2 = suo.find('%2',idx)
+            if idx2 != -1:
+                # Hmmm, a trailer, let's get rid of it
+                if suo[idx-1] == '"':
+                    suo = suo[:idx+3] # quoted
+                else:
+                    suo = suo[:idx+1]
+    else:
+        replace = '%L'
+        
+    # St*pid quicktime doesn't properly quote the program path, e.g.
+    # C:\Program Files\Quicktime\bla.exe "%1" instead of
+    # "C:\Program Files\Quicktime\bla.exe" "%1"
+    if suo[0] != '"':    
+        if idx > 0 and (len(suo)-1) >= idx+2 and suo[idx-1] == '"' and suo[idx+2]=='"':
+            # %x is quoted
+            end = max(0,idx-2)
+        else:
+            end = max(0,idx-1)
+        # I assume everthing till end is the program path
+        progpath = suo[0:end]
+        qprogpath = quote_program_path(progpath)
+        if qprogpath is None:
+            return [None,None]
+        suo = qprogpath+suo[end:]
+        if DEBUG:
+            print >>sys.stderr,"videoplay: new urlopen is",suo
+    return [contenttype,suo.replace(replace,videourl)]
+
+
+def win32_retrieve_playcmd_from_mimetype(mimetype,videourl):
+    """ Use the specified MIME type to find the player in the Windows registry to play the url (or file)"""
+    registry = Win32RegChecker()
+    
+    if DEBUG:
+        print >>sys.stderr,"videoplay: Looking for player for",unicode2str(videourl)
+    if mimetype == '' or mimetype is None:
+        return [None,None]
+    
+    keyname = '\\SOFTWARE\\Classes\\MIME\\Database\\Content Type\\'+mimetype
+    valuename = 'Extension'
+    ext = registry.readKeyRecursively(HKLM,keyname,value_name=valuename)
+    if DEBUG:
+        print >>sys.stderr,"videoplay: ext winfiletype is",ext
+    if ext is None or ext == '':
+        return [None,None]
+    if DEBUG:
+        print >>sys.stderr,"videoplay: Looking for player for mime",mimetype,"which is ext",ext
+
+    return win32_retrieve_video_play_command(ext,videourl)
+
+
+def quote_program_path(progpath):
+    idx = progpath.find(' ')
+    if idx != -1:
+        # Contains spaces, should quote if it's really path
+        if not os.access(progpath,os.R_OK):
+            if DEBUG:
+                print >>sys.stderr,"videoplay: Could not find assumed progpath",progpath
+            return None
+        return '"'+progpath+'"'
+    else:
+        return progpath
diff --git a/instrumentation/next-share/BaseLib/WebUI/WebUI.py b/instrumentation/next-share/BaseLib/WebUI/WebUI.py
new file mode 100644 (file)
index 0000000..d5be45f
--- /dev/null
@@ -0,0 +1,443 @@
+# Written by Riccardo Petrocco
+# see LICENSE.txt for license information
+
+# TODO
+# 1- romove only torrent, not the content
+# 2- confirm dialog on removing!
+
+import sys, os
+import time
+import random
+import urllib
+import urlparse
+import cgi
+import binascii
+import copy
+from cStringIO import StringIO
+from traceback import print_exc,print_stack
+from threading import RLock,Condition
+from base64 import encodestring
+
+# json is Python 2.6, P2P-Next std is 2.5.
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+
+from BaseLib.Core.API import *
+from BaseLib.Core.BitTornado.bencode import *
+from BaseLib.Video.VideoServer import AbstractPathMapper
+
+try:
+    import wxversion
+    wxversion.select('2.8')
+except:
+    pass
+import wx
+
+from BaseLib.Plugin.defs import *
+from BaseLib.__init__ import LIBRARYNAME
+
+DEBUG = False
+PATH = 'webUI'
+
+
+def streaminfo404():
+    return {'statuscode':404, 'statusmsg':'404 Not Found'}
+
+
+class WebIFPathMapper(AbstractPathMapper):
+    
+    binaryExtensions = ['.gif', '.png', '.jpg', '.js', '.css']
+    contentTypes = {
+        '.css': 'text/css',
+        '.gif': 'image/gif',
+        '.jpg': 'image/jpg',
+        '.png': 'image/png',
+        '.js' : 'text/javascript',
+        '.html': 'text/html',
+    }
+
+    def __init__(self,bgApp, session):
+        self.bgApp = bgApp
+        self.session = session
+        # Dict of dict in the for of:
+        # {
+        #   infohash_download1 : 
+        #       {
+        #        id : infohash
+        #        name: ..
+        #        status: ..
+        #        ...
+        #       }
+        #   infohash_download2 :
+        #   ...
+        # }
+        self.downspeed = 0
+        self.upspeed = 0
+        self.lastreqtime = time.time()
+        
+        ext = sys.argv[0].lower()
+
+        if ext.endswith('.exe'):
+            self.webUIPath = os.path.abspath(os.path.dirname(sys.argv[0]))
+        else:
+            self.webUIPath = os.getcwd()
+
+        # Arno,2010-07-16: Speeds are just calculated from periodically
+        # retrieved values, instead of pseudo-synchronously.
+        #
+        self.session.set_download_states_callback(self.speed_callback)
+
+    def get(self,urlpath):
+        try:
+            return self.doget(urlpath)
+        except:
+            print_exc()
+
+        
+    def doget(self,urlpath):
+     
+        """
+        Possible paths:
+        /search<application/x-www-form-urlencoded query>
+        """
+        if not urlpath.startswith(URLPATH_WEBIF_PREFIX):
+            return streaminfo404()
+
+        self.lastreqtime = time.time()
+
+        fakeurl = 'http://127.0.0.1'+urlpath
+        o = urlparse.urlparse(fakeurl)
+
+        if DEBUG:
+            print >>sys.stderr,"webUI: path", urlpath        
+            
+        path = urlpath[7:]
+
+            
+        if len(path) == 0:    
+            # Get the default status page!
+            #if urlpath == '' or urlpath == 'index.html'
+            page = self.statusPage()
+            pageStream = StringIO(page)
+                
+#            print >>sys.stderr, "-------------page-----------------", fakeurl, "\n" , o
+#            print >>sys.stderr, "-------------page-----------------", page, "\n" 
+#            print >>sys.stderr, "-------------page-----------------", pageStream, "\n"
+            #try:    
+            return {'statuscode':200,'mimetype': 'text/html', 'stream': pageStream, 'length': len(page)}
+            
+        elif len(path) > 0:
+            if path == "permid.js":
+                try:
+                    permid = encodestring(self.bgApp.s.get_permid()).replace("\n", "")
+                    txt = "var permid = '%s';"%permid
+                    dataStream = StringIO(txt)
+                except Exception,e:
+                    print >> sys.stderr, "permid.js failure:", e
+                    return {'statuscode': 500, 'statusmsg':'Bad permid'}
+
+                return {'statuscode':200, 'mimetype':'text/javascript', 'stream':dataStream, 'length': len(txt)}
+
+            # retrieve and send the right resource
+            extension = os.path.splitext(path)[1]
+            
+            if extension in self.binaryExtensions: 
+                mode = 'rb'
+            else: 
+                mode = 'r'
+            
+            # TODO
+            try:
+                absPath =  os.path.join(self.webUIPath, LIBRARYNAME, "WebUI", path) 
+            except:
+                pass
+                
+
+            # retrieve resourse such as pages or images
+            if urlpath[6] == '/' and os.path.isfile(absPath):
+
+                
+                fp = open(absPath, mode)
+                data = fp.read()
+                fp.close()
+                dataStream = StringIO(data)
+
+
+#                print >>sys.stderr, "-------------page-----------------", self.getContentType(extension), "\n" 
+#                print >>sys.stderr, "-------------page-----------------", dataStream, "\n"
+
+                
+                # Send response
+                return {'statuscode':200,'mimetype': self.getContentType(extension), 'stream': dataStream, 'length': len(data)}
+                
+            elif urlpath[6] == '?':
+            
+                if DEBUG:
+                    print >>sys.stderr,"webUI: received a GET request"
+
+                # It's a GET request (webUI/?..), check for json format
+
+                
+                # Important!! For hashes we don't unquote the request, we just
+                # replace the encoded quotes. Json will not parse the hashes
+                # if decoded!! This is caused by the fact that Json does not accept
+                # special chars like '/'.
+
+                try:
+                    req = urllib.unquote(urlpath[6:])
+                    o = req.split('&')[1]
+                    jreq = json.loads(o)
+                except:
+                    req = urlpath[6:].replace('%22', '"')
+                    o = req.split('&')[1]
+                    jreq = json.loads(o)
+                    
+                try:
+                    method = jreq['method']
+                except:
+                    return {'statuscode':504, 'statusmsg':'Json request in wrong format! At least a method has to be specified!'}
+
+                try:                    
+                    args = jreq['arguments']
+                    if DEBUG:
+                        print >> sys.stderr, "webUI: Got JSON request: " , jreq, "; method: ", method, "; arguments: ", args
+                except:
+                    args = None
+                    if DEBUG:
+                        print >> sys.stderr, "webUI: Got JSON request: " , jreq, "; method: ", method
+
+
+                if args is None:
+                # TODO check params
+                    data = self.process_json_request(method)
+                    if DEBUG:
+                        print >>sys.stderr, "WebUI: response to JSON ", method, " request: ", data
+                else:
+                    data = self.process_json_request(method, args)
+                    if DEBUG:
+                        print >>sys.stderr, "WebUI: response to JSON ", method, " request: ", data, " arguments: ", args                    
+
+                if data == "Args missing":
+                    return {'statuscode':504, 'statusmsg':'Json request in wrong format! Arguments have to be specified!'}
+                    
+                dataStream = StringIO(data)
+                return {'statuscode':200,'mimetype': 'application/json', 'stream': dataStream, 'length': len(data)}
+
+            else:
+                # resource not found or in wrong format            
+                return streaminfo404()
+
+
+    def process_json_request(self, method, args=None):
+        try:
+            return self.doprocess_json_request(method, args=args)
+        except:
+            print_exc()
+            return json.JSONEncoder().encode({"success" : "false"})
+
+    def doprocess_json_request(self, method, args=None):
+    
+        # Decode the infohash if present
+        if args is not None:
+            infohash = urllib.unquote( str(args['id']) )
+            
+        
+        if DEBUG:
+            print >>sys.stderr, "WebUI: received JSON request for method: ", method
+        
+        if method == "get_all_downloads":
+
+            condition = Condition()
+            dlist = []
+            states_func = lambda dslist:self.states_callback(dslist,condition,dlist)
+            self.session.set_download_states_callback(states_func)
+            
+            # asyncronous callbacks... wait for all the stats to be retrieved,
+            # Arno: in this case it is important that the value is accurate to 
+            # prevent just deleted items to reappear.
+            condition.acquire()
+            condition.wait(5.0)
+            condition.release()
+                
+            return json.JSONEncoder().encode({"downloads" : dlist})
+            
+            
+        elif method == "pause_all":
+
+            try:            
+                #downloads = self.session.get_downloads()
+                wx.CallAfter(self.bgApp.gui_webui_stop_all_downloads, self.session.get_downloads())
+                #for dl in downloads:
+                #    dl.stop()
+                    
+                return json.JSONEncoder().encode({"success" : "true"})
+
+                    
+            except:
+                return json.JSONEncoder().encode({"success" : "false"})
+            
+            
+        elif method == "resume_all":
+
+            try:            
+                #downloads = self.session.get_downloads()
+                wx.CallAfter(self.bgApp.gui_webui_restart_all_downloads, self.session.get_downloads())
+                
+                #for dl in downloads:
+                #    dl.restart()
+                    
+                return json.JSONEncoder().encode({"success" : "true"})
+
+                    
+            except:
+                return json.JSONEncoder().encode({"success" : "false"})
+            
+            
+        elif method == "remove_all":
+
+            try:
+                #downloads = self.session.get_downloads()
+                wx.CallAfter(self.bgApp.gui_webui_remove_all_downloads, self.session.get_downloads())
+                #for dl in downloads:
+                #    self.session.remove_download(dl, True)
+                    
+                return json.JSONEncoder().encode({"success" : "true"})
+                
+            except:
+                return json.JSONEncoder().encode({"success" : "false"})
+                
+
+        elif method == "get_speed_info":
+        
+            # Arno, 2010-07-16: Return latest values periodically retrieved.
+            return json.JSONEncoder().encode({"success" : "true", "downspeed": self.downspeed, "upspeed" : self.upspeed})
+                
+        # Methods that need arguments!!
+        elif args is None:
+            return "Args missing"            
+
+
+        elif method == "pause_dl":
+            
+            try:
+                downloads = self.session.get_downloads()
+                for dl in downloads:
+                    if dl.get_def().get_infohash() == infohash:
+                        wx.CallAfter(self.bgApp.gui_webui_stop_download, dl)
+                
+                return json.JSONEncoder().encode({"success" : "true"})    
+                
+            except:
+                return json.JSONEncoder().encode({"success" : "false"})
+
+
+        elif method == "resume_dl":
+            
+            try:
+                downloads = self.session.get_downloads()
+                for dl in downloads:
+                    if dl.get_def().get_infohash() == infohash:
+                        wx.CallAfter(self.bgApp.gui_webui_restart_download, dl)
+                
+                return json.JSONEncoder().encode({"success" : "true"})    
+                
+            except:
+                return json.JSONEncoder().encode({"success" : "false"})
+
+            
+        elif method == "remove_dl":
+            
+            try:
+                downloads = self.session.get_downloads()
+                
+                for dl in downloads:
+                    if dl.get_def().get_infohash() == infohash:
+                        wx.CallAfter(self.bgApp.gui_webui_remove_download, dl)
+                    
+                return json.JSONEncoder().encode({"success" : "true"})
+            except:
+                return json.JSONEncoder().encode({"success" : "false"})
+            
+
+    def states_callback(self,dslist,condition,dlist):
+        """ Called by Session thread """
+
+        # Display some stats
+        for ds in dslist:
+            d = ds.get_download()
+            
+            infohash = urllib.quote(d.get_def().get_infohash())
+#            infohash = (d.get_def().get_infohash()).toJSON()
+
+            dl = {'id' : infohash, 'name': d.get_def().get_name(), 'status': dlstatus_strings[ds.get_status()], 'progress': ds.get_progress(), 'upload': ds.get_current_speed(UPLOAD), 'download': ds.get_current_speed(DOWNLOAD)}
+
+            dlist.append(dl)
+            
+        condition.acquire()
+        condition.notify()
+        condition.release()
+        return (0.0, False)        
+        
+        
+    def speed_callback(self,dslist):
+        """ Called by Session thread """
+
+        upspeed = 0
+        downspeed = 0
+
+        # Display some stats
+        for ds in dslist:
+            d = ds.get_download()
+            
+            upspeed += ds.get_current_speed(UPLOAD)
+            downspeed += ds.get_current_speed(DOWNLOAD)
+
+        self.downspeed = downspeed
+        self.upspeed = upspeed
+        
+        # Arno,2010-07-16: Continuous
+        return (1.0, False)           
+
+
+    def statusPage(self):
+
+        page = '<!DOCTYPE html>'
+        page += '<html>\n'
+        
+        # get the headers
+        header =  os.path.join(self.webUIPath, LIBRARYNAME, "WebUI", "index", "head.html") 
+        if os.path.isfile(header):
+            f = open(header)
+
+            head = f.read()
+            f.close
+            page += head
+
+        
+        # get body
+        body =  os.path.join(self.webUIPath, LIBRARYNAME, "WebUI", "index", "body.html") 
+        if os.path.isfile(body):
+            f = open(body)
+            tmp = f.read()
+            f.close
+            page += tmp
+        
+        page += '</html>'
+
+        return page
+    
+    
+    def getContentType(self, ext):
+        """ Function to figure out content types """
+        content_type = 'text/plain'
+        
+        if ext in self.contentTypes:
+            content_type = self.contentTypes[ext]            
+        return content_type
+
+
+
diff --git a/instrumentation/next-share/BaseLib/WebUI/__init__.py b/instrumentation/next-share/BaseLib/WebUI/__init__.py
new file mode 100644 (file)
index 0000000..0efd683
--- /dev/null
@@ -0,0 +1,2 @@
+# Written by Riccardo Petrocco 
+# see LICENSE.txt for license information
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/arrows.png b/instrumentation/next-share/BaseLib/WebUI/images/arrows.png
new file mode 100644 (file)
index 0000000..4ef5a21
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/arrows.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/favicon.png b/instrumentation/next-share/BaseLib/WebUI/images/favicon.png
new file mode 100644 (file)
index 0000000..1ce178b
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/favicon.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/headBG.png b/instrumentation/next-share/BaseLib/WebUI/images/headBG.png
new file mode 100644 (file)
index 0000000..5302404
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/headBG.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/p2p-next.png b/instrumentation/next-share/BaseLib/WebUI/images/p2p-next.png
new file mode 100644 (file)
index 0000000..945c607
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/p2p-next.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/pause.png b/instrumentation/next-share/BaseLib/WebUI/images/pause.png
new file mode 100644 (file)
index 0000000..6954088
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/pause.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/pause_red.png b/instrumentation/next-share/BaseLib/WebUI/images/pause_red.png
new file mode 100644 (file)
index 0000000..17a6774
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/pause_red.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/progress_blue.png b/instrumentation/next-share/BaseLib/WebUI/images/progress_blue.png
new file mode 100644 (file)
index 0000000..01e9cd7
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/progress_blue.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/progress_green.png b/instrumentation/next-share/BaseLib/WebUI/images/progress_green.png
new file mode 100644 (file)
index 0000000..9079c42
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/progress_green.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/progress_red.png b/instrumentation/next-share/BaseLib/WebUI/images/progress_red.png
new file mode 100644 (file)
index 0000000..d925a21
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/progress_red.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/remove.png b/instrumentation/next-share/BaseLib/WebUI/images/remove.png
new file mode 100644 (file)
index 0000000..98198de
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/remove.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/remove_big.png b/instrumentation/next-share/BaseLib/WebUI/images/remove_big.png
new file mode 100644 (file)
index 0000000..e85ed85
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/remove_big.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/remove_red.png b/instrumentation/next-share/BaseLib/WebUI/images/remove_red.png
new file mode 100644 (file)
index 0000000..bd7d7ed
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/remove_red.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/resume.png b/instrumentation/next-share/BaseLib/WebUI/images/resume.png
new file mode 100644 (file)
index 0000000..c2670fa
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/resume.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/resume_green.png b/instrumentation/next-share/BaseLib/WebUI/images/resume_green.png
new file mode 100644 (file)
index 0000000..6dd6104
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/resume_green.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/splugin.png b/instrumentation/next-share/BaseLib/WebUI/images/splugin.png
new file mode 100644 (file)
index 0000000..da566d2
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/splugin.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/users_arrow.png b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow.png
new file mode 100644 (file)
index 0000000..62d2192
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_green.png b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_green.png
new file mode 100644 (file)
index 0000000..4f351af
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_green.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red.png b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red.png
new file mode 100644 (file)
index 0000000..0f3f16d
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red_light.png b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red_light.png
new file mode 100644 (file)
index 0000000..80fbe23
Binary files /dev/null and b/instrumentation/next-share/BaseLib/WebUI/images/users_arrow_red_light.png differ
diff --git a/instrumentation/next-share/BaseLib/WebUI/index/body.html b/instrumentation/next-share/BaseLib/WebUI/index/body.html
new file mode 100644 (file)
index 0000000..69cf404
--- /dev/null
@@ -0,0 +1,34 @@
+<body id="tribe_body">
+    <div class="wrapper">
+               <div class="head_menu">
+                   <div id="head_buttons">
+                        <ul>    
+                                <li id="pause_all"><div id="pause_resume_all"><div class="head_button"></div>Stop Sharing</div></li>
+                                <li style="display: none" id="resume_all"><div id="pause_resume_all"><div class="head_button"></div>Start Sharing</div></li>
+                                <li class="divider">&nbsp;</li>
+                                <li id="remove_all"><div id="remove_all_link"><div class="head_button"></div>Remove All</div></li>
+                        </ul>
+                        </div>
+               </div>
+               
+               
+               <div id="downloads_container">
+                       <ul class="downloads_list" id="downloads_list">
+                       <li style="display: none;"></li>
+               </ul>
+               </div>
+               
+       </div>
+               
+       <div class="footer">
+           <div id="logo">
+               <a href="http://p2p-next.org" id="logo_link">
+               </a> 
+        </div>
+               <div id="num_downloads">In total you are supporting <b id="num_dls">0</b> downloads</div>
+               <div id="total_upload">0 B/s</div>
+               <div id="total_download">0 B/s</div>
+
+       </div>
+               
+</body>
diff --git a/instrumentation/next-share/BaseLib/WebUI/index/head.html b/instrumentation/next-share/BaseLib/WebUI/index/head.html
new file mode 100644 (file)
index 0000000..f6e77b7
--- /dev/null
@@ -0,0 +1,23 @@
+<head>
+       <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+       <meta http-equiv="cache-control" content="Private" />
+       <meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0; user-scalable=0;"/>
+       <link href="./webUI/images/splugin.png" rel="shortcut icon" />
+       <script type="text/javascript" src="http://code.jquery.com/jquery-latest.min.js"></script>
+    <link type="text/css" rel="stylesheet" href="./webUI/index/stylesheet.css" />
+       <script type="text/javascript" src="./webUI/javascript/json.min.js"></script>
+       <script type="text/javascript" src="./webUI/javascript/download.js"></script>
+       <script type="text/javascript" src="./webUI/javascript/tribe.interface.js"></script>
+       <script type="text/javascript" src="./webUI/javascript/tribe.js"></script>
+       <script type="text/javascript">
+           
+        $(document).ready( function() {
+       
+               // Initialise the main Tribe controller
+               tribe = new Tribe();
+
+        });
+    </script>
+
+       <title>SwarmEngine Web Interface</title>
+</head>
diff --git a/instrumentation/next-share/BaseLib/WebUI/index/stylesheet.css b/instrumentation/next-share/BaseLib/WebUI/index/stylesheet.css
new file mode 100644 (file)
index 0000000..e66a509
--- /dev/null
@@ -0,0 +1,400 @@
+
+html {
+       margin-left: auto;
+       margin-right: auto;
+       padding: 0;
+       height: 100%;
+       max-width: 1500px;
+       width: 80%;
+}
+
+body {
+       font: 62.5% "lucida grande", Tahoma, Verdana, Arial, Helvetica, sans-serif; /* Resets 1em to 10px */
+       color: #222 !important;
+       /*background: #606d80;*/
+       background: white;
+       text-align: center;
+       margin: 0 0 0;
+       overflow: auto;
+       z-index: 1;
+       height: 100%;
+}
+
+img {
+       border: none;
+}
+
+a {
+       outline: 0;
+       -moz-outline: none;
+}
+
+div.wrapper {
+
+    min-height: 100%;
+    height: auto !important;
+    height: 100%;
+    margin: 0 auto -2em;
+}
+
+
+/*--------------------------------------
+ *
+ * T O P   M E N U
+ *
+ *--------------------------------------*/
+
+div.head_menu {
+       width: 100%;
+       height: 90px;
+       margin: 0;
+       background: transparent url('../images/headBG.png') left top repeat-x;
+
+       -moz-border-radius-bottomright: 10px;
+       -moz-border-radius-bottomleft: 10px;
+
+    -webkit-border-bottom-right-radius: 10px;
+    -webkit-border-bottom-left-radius: 10px;
+    
+    border: 2px solid white;
+    
+}
+
+div.head_menu h1 {
+       height: inherit;
+       width: 205px;
+       text-indent: -9000px !important;
+       padding: 0;
+       margin: 0 0 0 5px;
+}
+
+
+div#p2p-next {
+       height: 45px;
+       width: 115px;
+       background-image: url('../images/p2p-next.png');
+       vertical-align: left;
+}
+
+div#head_buttons {
+       height: 64px;
+       margin: 0 auto;
+       padding: 0 7px;
+       text-align: center;
+       margin-left: auto;
+       margin-right: auto;
+       width: 260px;
+       vertical-align: middle;    
+}
+
+div.head_menu ul {
+       height: 64px;
+       margin: 0 auto;
+       padding: 0 7px;
+       text-align: center;
+       margin-left: auto;
+       margin-right: auto;
+       width: 260px;
+       vertical-align: middle; 
+}
+
+
+div.head_menu ul li {
+       list-style-type: none;
+       list-style-image: none;
+       float: left;
+       padding: 0;
+       margin: 0;
+       vertical-align: middle;
+       height: 64px;
+}
+
+
+div.head_menu ul li > div {
+       color: #000;
+       font-size: 1.1em;
+       text-decoration: none;
+       padding: 2px 8px;
+       display: block;
+       margin: 0;
+       text-shadow: 0 1px 0 #ccc;
+       min-width: 64px;
+       cursor: pointer;
+}
+
+li#pause_all div div.head_button {
+       width: 120px;
+       height: 64px;
+       margin: 0 auto 5px;
+    background-image: url('../images/users_arrow_red_light.png');
+}
+
+li#resume_all div div.head_button {
+       width: 120px;
+       height: 64px;
+       margin: 0 auto 5px;
+    background-image: url('../images/users_arrow.png');
+}
+
+li#remove_all div div.head_button {
+       width: 64px;
+       height: 64px;
+       margin: 0 auto 5px;
+    background-image: url('../images/remove_big.png');
+}
+
+
+div.head_menu ul li.divider {
+       width: 0;
+       border-right: 1px dotted black;
+       opacity: 0.2;
+       margin: 14px 4px 0;
+       height: 49px;
+}
+
+
+/*--------------------------------------
+ *
+ * D O W N L O A D S   C O N T A I N E R
+ *
+ *--------------------------------------*/
+
+div#downloads_container {
+
+    -moz-border-radius: 10px;
+    -webkit-border-radius: 5px;
+    border: 2px solid #CCC;
+    padding: 10px;
+    margin-top: 10px;
+    margin-bottom: 30px;
+
+}
+
+ul.downloads_list {
+    -moz-border-radius: 10px;
+    -webkit-border-radius: 10px;
+    border: 2px solid #CCC;
+       width: 100%;
+       margin: 0;
+       padding: 0;
+       text-align: left;
+       cursor: default;
+    background-color: #CCC;
+}
+
+ul.downloads_list li {
+
+    height: 80px;
+       list-style-type: none;
+       list-style-image: none;
+       clear: both;
+       display: block;
+       vertical-align: middle;
+}
+
+ul.downloads_list li.dl {
+
+       border-bottom: 1px solid #ccc;
+       padding: 4px 120px 5px 10px; /* Make space for buttons on the right */
+       margin: 0 !important;
+       color: #666;
+}
+
+ul.downloads_list li.dl a img {
+       position: relative;
+       right: -10px;
+}
+
+
+ul.downloads_list li.dl div.dl_name {
+       font-size: 1.6em;
+       font-weight: bold;
+       word-wrap: break-word;
+       overflow: hidden;
+       color: #222;
+       margin-bottom: 2px;
+       margin-top: 2px;
+}
+
+ul.downloads_list li.dl.selected div.dl_name {
+       color: #fff;
+}
+
+ul.downloads_list li.dl div.dl_speeds {
+       font-size: 1.2em;
+}
+
+ul.downloads_list li.dl div.dl_progress_bar_container {
+    /*border-width: 1px;
+    border-style:solid;
+    border-color: black;*/
+
+    -moz-border-radius: 10px;
+    -webkit-border-radius: 10px;
+    border: 2px solid white;
+
+       height: 15px;
+       margin: 3px 0;
+       float: left;
+       line-height: 1px;
+       font-size: 1px;
+       width: 100%;
+       
+}
+
+ul.downloads_list li.dl div.dl_progress_bar {
+/*     height: 10px;
+       margin: 3px 0;
+       float: left;
+       line-height: 1px;
+       background-color: transparent;
+       font-size: 1px; 
+       width: 100%;
+
+       background-repeat: repeat-x;
+       background: transparent url('../images/headBG.png') left top repeat-x;
+       background-color: blue;*/
+    -moz-border-radius-topleft: 10px;
+       -moz-border-radius-bottomleft: 10px;
+
+    -webkit-border-top-left-radius: 10px;
+    -webkit-border-bottom-left-radius: 10px;
+       background-image: url('../images/progress_blue.png');
+       width: 40%;
+       height: 15px;
+    opacity: 0.6;
+}
+
+ul.downloads_list li.dl div.control_container {
+       float: right;
+       position: relative;
+       right: -120px;
+       top: -42px;
+       height: 32px;
+       width: 108px;
+}
+
+
+li.dl a div.remove_dl {
+    cursor: pointer;
+       float: left; 
+       height: 32px;
+       width: 36px;
+    background: url('../images/remove.png') no-repeat;
+}
+
+li.dl a div.pause_dl {
+    cursor: pointer;
+       float: left; 
+       height: 32px;
+       width: 55px;
+    background: url('../images/pause.png') no-repeat;
+}
+
+li.dl a div.resume_dl {
+    cursor: pointer;
+       float: left; 
+       height: 32px;
+       width: 64px;
+    background: url('../images/resume.png') no-repeat;
+}
+
+/*
+li.dl a div.dl_pause {
+       background-position: left top;
+}
+
+li.dl a:hover div.dl_pause {
+       background-position: left center;
+}
+
+li.dl a:active div.dl_pause {
+       background-position: left bottom;
+}
+
+li.dl a div.dl_resume {
+       background-position: center top;
+}
+
+li.dl a:hover div.dl_resume {
+       background-position: center center;
+}
+
+li.dl a:active div.dl_resume {
+       background-position: center bottom;
+}*/
+
+/*--------------------------------------
+ *
+ * FOOTER
+ *
+ *--------------------------------------*/
+
+div.footer {
+       -moz-border-radius-topright: 10px;
+       -moz-border-radius-topleft: 10px;
+
+    /*-webkit-border-radius: 10px;*/
+    -webkit-border-top-right-radius: 10px;
+    -webkit-border-top-left-radius: 10px;
+    
+    border: 2px solid white;
+    
+       width: 100%;
+       height: 2em;
+       margin: 0;
+       background: #CCCCCC;
+       font-size: 1.1em;
+       font-weight: normal;
+       border-bottom: 1px solid #888;
+       color: #000;
+       text-shadow: 0 1px 0 #f4f4f4;
+}
+
+div.footer div {
+       padding: 0 10px 1px 10px;
+       margin: 4px 0 0 0;
+}
+
+div.footer div#logo {
+       float: left;
+       width: 51px;
+       height: 25px;
+       padding: 0;
+       margin: 2px 8px;
+}
+
+#logo_link {
+       width: 51px;
+       height: 20px;
+       cursor: pointer;
+       display: block;
+       background-image: url('../images/p2p-next.png')
+}
+
+div.footer div#num_downloads {
+       float: left;
+}
+
+div.footer div#total_upload {
+       float: right;
+       padding-left: 13px;
+}
+
+div.footer div#total_download {
+       float: right;
+       padding-left: 13px;
+}
+
+div.footer div#total_upload {
+       background: url('../images/arrows.png') left -12px no-repeat;
+}
+
+div.footer div#total_download {
+       background: url('../images/arrows.png') left 2px no-repeat;
+}
+
+
+
+
+
diff --git a/instrumentation/next-share/BaseLib/WebUI/javascript/download.js b/instrumentation/next-share/BaseLib/WebUI/javascript/download.js
new file mode 100644 (file)
index 0000000..12429d6
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ *     Written by Riccardo Petrocco
+ *     see LICENSE.txt for license information
+ * 
+ */
+
+function Download( fragment, controller, data) {
+       this.initialize( fragment, controller, data);
+}
+
+
+Download.prototype =
+{
+
+       initialize: function( fragment, controller, data) 
+       {
+               this._id        = data.id;
+        this._name      = data.name;
+        this._status    = data.status;
+        this._progress  = data.progress;
+        this._UPspeed   = data.upload.toFixed(2);
+        this._DOWNspeed = data.download.toFixed(2);
+
+
+               // Create a new <li> element
+               var main = document.createElement( 'li' );
+               main.className = 'dl';
+               main.id = this._id;
+               main._dl = this;
+               var element = $(main);
+               element._dl = this;
+               this._element = element;
+               this._controller = controller;
+               
+               // TODO check, we are updating the dllist multiple times
+               controller._dllist.push( element );
+               
+               // Create the 'name' <div>
+               var e = document.createElement( 'div' );
+               e.className = 'dl_name';
+               main.appendChild( e );
+               element._name_dl = e;
+               
+               // Create the 'speeds' <div>
+               var e = document.createElement( 'div' );
+               e.className = 'dl_speeds';
+               main.appendChild( e );
+               element._speeds_dl = e;
+               
+               // Create the 'progress bar container' <div>
+               e = document.createElement( 'div' );
+               e.className = 'dl_progress_bar_container';
+
+        // Crate the 'progress bar' <div>
+               i = document.createElement( 'div' );
+               i.className = 'dl_progress_bar';
+               progressID = 'progress_' + this._id;
+               i.id = progressID;
+               element._progress_dl = i;
+               
+               e.appendChild(i);
+               main.appendChild( e );
+
+        // Create image container
+        var container = document.createElement( 'div' );
+        container.className = 'control_container';
+
+        // Remove button
+        var remove = document.createElement( 'div' );
+               remove.className = 'remove_dl';
+               e = document.createElement( 'a' );
+               e.appendChild( remove );
+               
+               container.appendChild( e );
+               element._remove_button = remove;
+               $(e).bind('click', function(e) { element._dl.clickRemoveButton(e); });
+               $(remove).bind('mouseover', function() { $(remove).css("background-image", "url(webUI/images/remove_red.png)"); });
+               $(remove).bind('mouseout', function() { $(remove).css("background-image", "url(webUI/images/remove.png)"); });                          
+
+               // Pause button
+               var pause = document.createElement( 'div' );
+               pause.className = 'pause_dl';
+               e = document.createElement( 'a' );
+               e.appendChild( pause );
+               container.appendChild( e );
+               element._pause_button = pause;
+               $(e).bind('click', function(e) { element._dl.clickPauseButton(e); });
+               $(pause).bind('mouseover', function() { $(pause).css("background-image", "url(webUI/images/pause_red.png)"); });
+               $(pause).bind('mouseout', function() { $(pause).css("background-image", "url(webUI/images/pause.png)"); });
+
+
+//             $(e).bind('mouseover', function() { $("div.pause_dl").css("background-image", "url(webUI/images///pause_blue.png)"); });
+//             $(e).bind('mouseout', function() { $("div.pause_dl").css("background-image", "url(webUI/images/pause.png)"); });                                
+
+               
+               // Resume button
+               var resume = document.createElement( 'div' );
+               resume.className = 'resume_dl';
+               e = document.createElement( 'a' );
+               e.appendChild( resume );
+               
+               container.appendChild( e );
+               element._resume_button = resume;
+               $(e).bind('click', function(e) { element._dl.clickResumeButton(e); });
+               $(resume).bind('mouseover', function() { $(resume).css("background-image", "url(webUI/images/resume_green.png)"); });
+               $(resume).bind('mouseout', function() { $(resume).css("background-image", "url(webUI/images/resume.png)"); });                          
+               
+               main.appendChild( container );
+
+               // Update progress bar
+               percentual = Math.floor(100 * this._progress);
+        i.style.width = percentual + '%';
+
+               // Update all the labels etc
+               this._element._name_dl.innerHTML = this._name + '  ' + percentual + '%';
+               this._element._speeds_dl.innerHTML = "Download speed: " + this._DOWNspeed + " KB/s  |  Upload speed: " + this._UPspeed + " KB/s";
+               
+
+
+               if (this._status == "DLSTATUS_STOPPED") 
+               {
+                   i.style.backgroundImage = "url(webUI/images/progress_red.png)";
+                   pause.style.display = "none";
+                   main.style.opacity = 0.3;
+               }
+               
+               if (this._status == "DLSTATUS_DOWNLOADING") 
+               {
+                   i.style.backgroundImage = "url(webUI/images/progress_blue.png)";            
+                   resume.style.display = "none";
+               }
+               
+               if (this._status == "DLSTATUS_SEEDING") 
+               {
+                   i.style.backgroundImage = "url(webUI/images/progress_green.png)";                           
+                   resume.style.display = "none";
+               }
+               // insert the element
+               fragment.appendChild(main);
+       },
+       
+       
+       clickRemoveButton: function( event )
+       {
+           this._controller.removeDownload( this._id );
+       },
+       
+       clickPauseButton: function(event)
+       {
+           this._controller.pauseDownload( this._id );
+       },
+       
+       clickResumeButton: function(event)
+       {
+           this._controller.resumeDownload( this._id );
+       },      
+       
+    id: function() { return this._id; },
+}
+
+
diff --git a/instrumentation/next-share/BaseLib/WebUI/javascript/json.min.js b/instrumentation/next-share/BaseLib/WebUI/javascript/json.min.js
new file mode 100644 (file)
index 0000000..67dae43
--- /dev/null
@@ -0,0 +1,13 @@
+
+(function($){var m={'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','"':'\\"','\\':'\\\\'},s={'array':function(x){var a=['['],b,f,i,l=x.length,v;for(i=0;i<l;i+=1){v=x[i];f=s[typeof v];if(f){v=f(v);if(typeof v=='string'){if(b){a[a.length]=',';}
+a[a.length]=v;b=true;}}}
+a[a.length]=']';return a.join('');},'boolean':function(x){return String(x);},'null':function(x){return"null";},'number':function(x){return isFinite(x)?String(x):'null';},'object':function(x){if(x){if(x instanceof Array){return s.array(x);}
+var a=['{'],b,f,i,v;for(i in x){v=x[i];f=s[typeof v];if(f){v=f(v);if(typeof v=='string'){if(b){a[a.length]=',';}
+a.push(s.string(i),':',v);b=true;}}}
+a[a.length]='}';return a.join('');}
+return'null';},'string':function(x){if(/["\\\x00-\x1f]/.test(x)){x=x.replace(/([\x00-\x1f\\"])/g,function(a,b){var c=m[b];if(c){return c;}
+c=b.charCodeAt();return'\\u00'+
+Math.floor(c/16).toString(16)+
+(c%16).toString(16);});}
+return'"'+x+'"';}};$.toJSON=function(v){var f=isNaN(v)?s[typeof v]:s['number'];if(f)return f(v);};$.parseJSON=function(v,safe){if(safe===undefined)safe=$.parseJSON.safe;if(safe&&!/^("(\\.|[^"\\\n\r])*?"|[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t])+?$/.test(v))
+return undefined;return eval('('+v+')');};$.parseJSON.safe=false;})(jQuery);
\ No newline at end of file
diff --git a/instrumentation/next-share/BaseLib/WebUI/javascript/tribe.interface.js b/instrumentation/next-share/BaseLib/WebUI/javascript/tribe.interface.js
new file mode 100644 (file)
index 0000000..b4fb571
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ *     Written by Riccardo Petrocco
+ *     see LICENSE.txt for license information
+ * 
+ */
+
+
+function TribeInterface( controller )
+{
+       this.initialize( controller );
+       return this;
+}
+
+TribeInterface.prototype =
+{
+       /*
+        * Constructor
+        */
+       initialize: function(controller) {
+               this._controller = controller;
+               this._error = '';
+               this._token = '';
+               this._report  = true;
+       },
+
+       /*
+        * Error handle
+        */
+       ajaxError: function(request, error_string, exception, ajaxObject) {
+               var token;
+               remote = this;
+
+               remote._error = request.responseText
+                                       ? request.responseText.trim().replace(/(<([^>]+)>)/ig,"")
+                                       : "";
+               if( !remote._error.length )
+                       remote._error = 'Server not responding';
+               
+        if (remote._report) {
+               alert(remote._error);
+               
+                   this._controller._BPClosed = true;
+                   remote._report = false
+           }
+                   
+       },
+
+       sendRequest: function( data, success, async ) {
+
+               remote = this;
+               if( typeof async != 'boolean' )
+                 async = true;
+
+               var ajaxSettings = {
+                       url: 'webUI',
+                       type: 'GET',
+                       contentType: 'json',
+                       dataType: 'json',
+                       cache: false,
+                       data: $.toJSON(data),
+                       error: function(request, error_string, exception){ remote.ajaxError(request, error_string, exception, ajaxSettings); },
+                       success: success,
+                       async: async
+               };
+
+               $.ajax( ajaxSettings );
+       },
+
+    // TODO not used now
+       loadStats: function( callback, async ) {
+               var tr = this._controller;
+               var o = { method: 'stats' };
+               this.sendRequest( o, callback, async );
+       },
+
+       getInitialDataFor: function(dl_ids, callback) {
+               var o = {
+                       method: "get_all_downloads"
+               };
+
+               if(dl_ids)
+                       o.arguments.ids = dl_ids;
+
+        //this.sendRequest( o, function(data){ alert( data.downloads )} );
+               this.sendRequest( o, function(data){ callback(data.downloads)} );
+       },
+       
+       pauseAll: function() {
+
+        var tribeif = this;
+        
+           var o = {
+               method: "pause_all"
+           };
+           
+           // Send the request and report a message if some problems occurred
+           this.sendRequest( o, function(data){ if ( !data.success ) alert("Errors occurred while trying to pause the downloads"); } );
+           
+    },
+
+    pauseDownload: function( id ) {
+
+        var tribeif = this;
+        
+           var o = {
+               method: "pause_dl",
+               arguments: {"id" : id}
+           };
+           
+           // Send the request and report a message if some problems occurred
+           this.sendRequest( o, function(data){ if ( !data.success ) alert("Errors occurred while trying to pause the download"); } );
+           
+    },
+
+
+       resumeAll: function() {
+
+        var tribeif = this;
+        
+           var o = {
+               method: "resume_all"
+           };
+           
+           // Send the request and report a message if some problems occurred
+           this.sendRequest( o, function(data){ if ( !data.success ) alert("Errors occurred while trying to resume the downloads"); } );
+           
+    },
+
+
+    resumeDownload: function( id ) {
+
+        var tribeif = this;
+        
+           var o = {
+               method: "resume_dl",
+               arguments: {"id" : id}
+           };
+           
+           // Send the request and report a message if some problems occurred
+           this.sendRequest( o, function(data){ if ( !data.success ) alert("Errors occurred while trying to resume the download"); } );
+           
+    },
+
+
+    removeAll: function() {
+
+        var tribeif = this;
+        
+           var o = {
+               method: "remove_all"
+           };
+           
+           // Send the request and report a message if some problems occurred
+           this.sendRequest( o, function(data){ if ( !data.success ) alert("Errors occurred while trying to remove the downloads"); } );
+           
+    },
+
+    removeDownload: function( id ) {
+
+        var tribeif = this;
+        
+           var o = {
+               method: "remove_dl",
+               arguments: {"id" : id}
+           };
+           
+           // Send the request and report a message if some problems occurred
+           this.sendRequest( o, function(data){ if ( !data.success ) alert("Errors occurred while trying to remove the download"); } );
+           
+    }
+
+
+    
+};
+
diff --git a/instrumentation/next-share/BaseLib/WebUI/javascript/tribe.js b/instrumentation/next-share/BaseLib/WebUI/javascript/tribe.js
new file mode 100644 (file)
index 0000000..dc0daf7
--- /dev/null
@@ -0,0 +1,432 @@
+/*
+ *     Written by Riccardo Petrocco
+ *     see LICENSE.txt for license information
+ * 
+ */
+
+function Tribe(){
+       this.initialize();
+}
+
+Tribe.prototype =
+{
+       /*--------------------------------------------
+        *
+        *  C O N S T R U C T O R
+        *
+        *--------------------------------------------*/
+
+       initialize: function()
+       {
+               // Initialize the helper classes
+               this.tribeif = new TribeInterface(this);
+               
+
+               // Initialize the implementation fields
+               this._downloads               = { };
+               this._dllist                  = [ ];
+               this._activeDLs               = 0;
+               this._refreshSec              = 3;
+               this._BPClosed                = false;
+           this._justPaused              = false;
+
+
+        // Set up user events
+               var controller = this;
+               var tribe = this;
+
+               $('#pause_all').bind('click', function(e){ controller.pauseAll(e); });
+               $('#resume_all').bind('click', function(e){ controller.resumeAll(e); });
+               $('#remove_all').bind('click', function(e){ controller.removeAll(e); });
+               
+               $('#pause_all').mouseover( function(){ $(".head_button:first").css("background-image", "url(webUI/images/users_arrow_red.png)"); } );
+               $('#pause_all').mouseout( function(){ $(".head_button:first").css("background-image", "url(webUI/images/users_arrow_red_light.png)"); } );
+               
+               $('#resume_all').mouseover( function(){ $(".head_button:odd").css("background-image", "url(webUI/images/users_arrow_green.png)"); } );
+               $('#resume_all').mouseout( function(){ $(".head_button:odd").css("background-image", "url(webUI/images/users_arrow.png)"); } );
+
+
+//             $('#pause_all').mouseover( function(){ $(".head_button:first").backgroundImage = "url(webUI/images/pause_big.png)"; } );
+//        $('#pause_all').mouseover( $(this).children.attr("id", "ooo")) );
+               this._downloads_list = $('#downloads_list')[0];
+               
+
+               // TODO
+               this.initializeAllTorrents();
+
+        var timeout = this._refreshSec * 1000
+        setTimeout("tribe.reload(true)", timeout);
+        
+       },
+
+
+       /*--------------------------------------------
+        *
+        *  U T I L I T I E S
+        *
+        *--------------------------------------------*/
+    
+       initializeAllTorrents: function(){
+       
+           if (this._downloads.length > 0) {
+               this._downloads = {};
+           }
+               var tr = this;
+               this.tribeif.getInitialDataFor( null ,function(downloads) { tr.addDownloads(downloads); } );
+       },
+
+       addDownloads: function( new_downloads )
+       {
+       
+           // new_downloads is a list of downloads
+               var fragment = document.createDocumentFragment( );
+
+               for( var i=0, row; row=new_downloads[i]; ++i ) {
+                       var newDL = new Download( fragment, this, row );
+                       this._downloads[newDL.id()] = newDL;
+               }
+               
+               this.updateDLList();
+
+        // torrent container in HTML
+        $('#downloads_list').append( fragment );
+        
+
+        // TODO update!
+               this.refreshStats( );
+       },
+       
+       pauseAll: function( event )
+       {
+           var controller = this;
+           controller.tribeif.pauseAll();
+           
+           $('li.dl').fadeTo("slow", 0.3);
+           this._justPaused = true;
+           //this.setHeadButtons("resume");
+           //$('#pause_all_link').parent().attr("id", "resume_all");
+           
+       },
+       
+       pauseDownload: function( id )
+       {
+           var controller = this;
+           controller.tribeif.pauseDownload( id );
+
+        // JQuery does not accept special char like the '%'
+        // we have as id :-(.. so replace it
+        var encID = this.replaceSpecialChar(id, '%', "\\%");
+
+        $('#' + encID).fadeTo("slow", 0.3);
+        this._justPaused = true;
+//        $('#' + encID).fadeIn("slow");
+           
+           //controller.reload(false);
+       },
+       
+       resumeAll: function( event )
+       {
+           var controller = this;
+           controller.tribeif.resumeAll();
+           $('li.dl').fadeTo("slow", 1);
+       },
+       
+       resumeDownload: function( id )
+       {
+           var controller = this;
+           controller.tribeif.resumeDownload( id );
+
+        var encID = this.replaceSpecialChar(id, '%', "\\%");
+
+        $('#' + encID).fadeTo("slow", 1);
+           
+       },
+
+
+       removeAll: function( event )
+       {
+           var controller = this;
+           controller.tribeif.removeAll();
+           
+           $('li.dl').hide("slow");
+           
+           this._downloads = {};
+        this.updateDLList();
+           
+       },
+       
+       
+       removeDownload: function( id )
+       {
+           var controller = this;
+           controller.tribeif.removeDownload( id );
+
+        // JQuery does not accept special char like the '%'
+        // we have as id :-(.. so replace it
+        var encID = this.replaceSpecialChar(id, '%', "\\%");
+        
+        // remove from list, causing problems when reloading
+        //alert(this._dllist);
+//        this._dllist.splice(this._dllist.indexOf(id), 1);
+//        alert(this._dllist);        
+        delete this._downloads[id];
+        this.updateDLList();
+        
+        $('#' + encID).hide("slow");
+
+       },
+       
+       
+       updateDLList: function()
+       {
+           var dllist = [];
+           var dls = this._downloads;
+         
+           for (var key in dls){
+               dllist.push( key );
+           }
+           
+           this._dllist = dllist;
+       },
+       
+       reload: function( schedule )
+       {
+//        alert("reload");
+               var tr = this;
+               this.tribeif.getInitialDataFor( null ,function(downloads) 
+                   {
+                if (downloads) {
+                           // don't use effects adding torrents if we are just 
+                           // updating the current list
+                           // TODO now checking only the changes in amount of downloads.
+                           // There might be problems when removing and adding a download 
+                           // at the same time => ignore since it's not possible trough the interface!
+                           if ( downloads.length == tr._dllist.length ) {
+                               tr.updateDownloads(downloads);
+                           }
+                           
+                           else {
+                           
+                               // if we started a new video
+                               if ( downloads.length > tr._dllist.length ) {
+                                   // add the new downloads
+                                   tr.fadeInDL(downloads);
+                               }
+                               
+                               else if ( downloads.length < tr._dllist.length ) {
+
+                                   // it might be that the downloads have been removed
+                                   // by the engine since it has not been completly downloaded
+                                   var n = 0;
+                                   while ( n<tr._dllist.length) {                        
+                                   
+                                var missing = true;                                
+                                   for( var i=0, dl; dl=downloads[i]; ++i ) {   
+                                       if (tr._dllist[n] == dl.id) { 
+                                        missing = false;
+                                    }
+                                }
+                                
+                                if (missing) {
+                                    var encID = tr.replaceSpecialChar(tr._dllist[n], '%', "\\%");
+                                    delete tr._downloads[ tr._dllist[n] ];
+                                    tr.updateDLList();
+                                    $('#' + encID).hide("slow");                                
+                                }
+                                
+                                n++;
+                            }
+                            tr.refreshStats();
+
+                               }
+                                                               
+                               else {
+                                   alert ("TODO, different size");
+                           }
+                           
+                           }
+                }                                              
+               } );
+               
+           if ( schedule && !this._BPClosed) {
+               var timeout = this._refreshSec * 1000
+               setTimeout("tribe.reload(true)", timeout);
+           }
+
+       },
+       
+       updateDownloads: function( dls )
+       {
+
+        this._activeDLs = 0;
+        // Check if we are considering the same torrents.
+               for( var i=0, dl; dl=dls[i]; ++i ) {
+
+            var n=0;
+            var confirm = false;
+            while ( n<this._dllist.length ) {
+                
+                if (this._dllist[n] == dl.id) {
+//alert(this._downloads[ this._dllist[n] ]._status );
+                    // we update the statistics only if all the 
+                    // downloads are currently active!
+                    if (dl.status != "DLSTATUS_STOPPED") {
+                    //if (this._downloads[ this._dllist[n] ]._status != "DLSTATUS_STOPPED") {
+                        this._activeDLs++;
+                    }
+                    
+                    
+                    confirm = true;
+                }
+                
+                n++;
+            }
+            
+            // it might be that we changed page while a video was
+            // playing. => the video seems replaced
+            if (confirm == false) {
+                //TODO check, this is a hack to have an item reload
+                this._justPaused = true;
+                //alert("TODO, different hash"); 
+                //break;
+            }
+
+               }
+//        alert(activeDLs);
+//        alert(this._dllist.length);
+        // Update the active downloads
+        
+        // Replace all the list if all the downloads are being 
+        // updated
+        if (this._activeDLs || this._justPaused) {             
+            this._justPaused = false;
+                   // Remove the list of downloads and recreate it
+            $('#downloads_list').children(".dl").remove();
+
+            this.addDownloads( dls );
+        }
+
+        this.setHeadButtons();
+        //else { alert(activeDLs); }
+
+       },
+       
+       
+       // Now it just appends the new dowloads at the end of the list
+       // TODO put them in order immediatly
+       fadeInDL: function( downloads ) {
+       
+           this._activeDLs = 0;
+           var fragment = document.createDocumentFragment( );
+        var newDLs = [];
+        
+               for( var i=0, row; row=downloads[i]; ++i ) {
+                   
+                   var n=0;
+            var missing = true;
+
+
+            while ( n<this._dllist.length && missing) {
+
+                if (this._dllist[n] == row.id) {
+                    missing = false;
+                }
+
+                ++n;
+            }
+            
+            if (missing) {
+                newDLs.push(row);
+            }
+            
+            if (row._status != "DLSTATUS_STOPPED") {
+                this._activeDLs++;
+            }
+               
+               }
+               
+           if (newDLs.length > 0) {
+
+               for( var i=0, row; row=newDLs[i]; ++i ) {
+                           // create the new download elements
+                           var newDL = new Download( fragment, this, row );
+                           this._downloads[newDL.id()] = newDL;
+                   }
+                               
+               }
+               
+               this.updateDLList();
+
+        this.setHeadButtons();
+        // torrent container in HTML
+        $('#downloads_list').append( fragment );
+       },
+       
+       refreshStats: function()
+       {
+           var num_dl = this._dllist.length;
+
+        $('#num_dls')[0].innerHTML =  num_dl;
+        
+        var totUP = 0;
+        var totDOWN = 0;
+        
+        
+        for (var id in this._downloads)
+        {
+            totUP += parseFloat( this._downloads[id]._UPspeed );
+            totDOWN += parseFloat( this._downloads[id]._DOWNspeed );
+          
+        }
+        
+        $('#total_upload')[0].innerHTML =  totUP.toFixed(2) + " KB/s";
+        $('#total_download')[0].innerHTML =  totDOWN.toFixed(2) + " KB/s";
+
+       },
+       
+       replaceSpecialChar: function(stringIn, oldChar, newString)
+       {
+           var tmp = stringIn.split( oldChar );
+
+           var res = "";
+           
+           for (i in tmp)
+           {
+               if (tmp[i] != '')
+               {
+                res += newString + tmp[i];   
+               }
+           }
+           
+           return res;
+
+       },
+       
+       setHeadButtons: function ()
+       {
+           
+           var button = $('#pause_resume_all').children();
+           
+           // check which should be the status
+           if (this._activeDLs || !this._dllist.length ) {
+               //
+            $('#resume_all').hide();
+            $('#pause_all').show();
+       //        alert(this._activeDLs);
+    //     $('#pause_resume_all').parent().attr("id", "pause_all");
+    //        button.css("background-image", "url(webUI/images/pause_big.png)");
+    //        alert(button.text());
+//             $('#pause_all').mouseover( function(){ $(".head_button:first").css("background-image", "url(webUI/images/pause_big_blue.png)"); } );
+           }
+           
+           // show the resume button
+           
+           else {
+            $('#pause_all').hide();
+            $('#resume_all').show();
+//         $('#pause_resume_all').parent().attr("id", "resume_all");
+//            button.css("background-image", "url(webUI/images/resume_big.png)");
+           }
+       }
+       
+};
diff --git a/instrumentation/next-share/BaseLib/__init__.py b/instrumentation/next-share/BaseLib/__init__.py
new file mode 100644 (file)
index 0000000..32b0f65
--- /dev/null
@@ -0,0 +1,18 @@
+# Written by Arno Bakker
+# see LICENSE.txt for license information
+
+LIBRARYNAME = 'BaseLib'
+
+"""
+Path: Tribler
+URL: https://svn.tribler.org/abc/branches/release-m32/Tribler
+Repository Root: https://svn.tribler.org
+Repository UUID: 001aeff7-3401-0410-a489-f7902fc005dd
+Revision: 17078
+Node Kind: directory
+Schedule: normal
+Last Changed Author: arno
+Last Changed Rev: 17078
+Last Changed Date: 2010-08-27 14:53:15 +0200 (Fri, 27 Aug 2010)
+
+"""
diff --git a/instrumentation/next-share/BaseLib/binary-LICENSE-postfix.txt b/instrumentation/next-share/BaseLib/binary-LICENSE-postfix.txt
new file mode 100644 (file)
index 0000000..936128f
--- /dev/null
@@ -0,0 +1,865 @@
+\r
+\r
+------------------------------Python------------------------------------------\r
+A. HISTORY OF THE SOFTWARE\r
+==========================\r
+\r
+Python was created in the early 1990s by Guido van Rossum at Stichting\r
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\r
+as a successor of a language called ABC.  Guido remains Python's\r
+principal author, although it includes many contributions from others.\r
+\r
+In 1995, Guido continued his work on Python at the Corporation for\r
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\r
+in Reston, Virginia where he released several versions of the\r
+software.\r
+\r
+In May 2000, Guido and the Python core development team moved to\r
+BeOpen.com to form the BeOpen PythonLabs team.  In October of the same\r
+year, the PythonLabs team moved to Digital Creations (now Zope\r
+Corporation, see http://www.zope.com).  In 2001, the Python Software\r
+Foundation (PSF, see http://www.python.org/psf/) was formed, a\r
+non-profit organization created specifically to own Python-related\r
+Intellectual Property.  Zope Corporation is a sponsoring member of\r
+the PSF.\r
+\r
+All Python releases are Open Source (see http://www.opensource.org for\r
+the Open Source Definition).  Historically, most, but not all, Python\r
+releases have also been GPL-compatible; the table below summarizes\r
+the various releases.\r
+\r
+    Release         Derived     Year        Owner       GPL-\r
+                    from                                compatible? (1)\r
+\r
+    0.9.0 thru 1.2              1991-1995   CWI         yes\r
+    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes\r
+    1.6             1.5.2       2000        CNRI        no\r
+    2.0             1.6         2000        BeOpen.com  no\r
+    1.6.1           1.6         2001        CNRI        yes (2)\r
+    2.1             2.0+1.6.1   2001        PSF         no\r
+    2.0.1           2.0+1.6.1   2001        PSF         yes\r
+    2.1.1           2.1+2.0.1   2001        PSF         yes\r
+    2.2             2.1.1       2001        PSF         yes\r
+    2.1.2           2.1.1       2002        PSF         yes\r
+    2.1.3           2.1.2       2002        PSF         yes\r
+    2.2.1           2.2         2002        PSF         yes\r
+    2.2.2           2.2.1       2002        PSF         yes\r
+    2.2.3           2.2.2       2003        PSF         yes\r
+    2.3             2.2.2       2002-2003   PSF         yes\r
+    2.3.1           2.3         2002-2003   PSF         yes\r
+    2.3.2           2.3.1       2002-2003   PSF         yes\r
+    2.3.3           2.3.2       2002-2003   PSF         yes\r
+    2.3.4           2.3.3       2004        PSF         yes\r
+    2.3.5           2.3.4       2005        PSF         yes\r
+    2.4             2.3         2004        PSF         yes\r
+    2.4.1           2.4         2005        PSF         yes\r
+    2.4.2           2.4.1       2005        PSF         yes\r
+    2.4.3           2.4.2       2006        PSF         yes\r
+    2.4.4           2.4.3       2006        PSF         yes\r
+    2.5             2.4         2006        PSF         yes\r
+    2.5.1           2.5         2007        PSF         yes\r
+    2.5.2           2.5.2       2008        PSF         yes\r
+\r
+Footnotes:\r
+\r
+(1) GPL-compatible doesn't mean that we're distributing Python under\r
+    the GPL.  All Python licenses, unlike the GPL, let you distribute\r
+    a modified version without making your changes open source.  The\r
+    GPL-compatible licenses make it possible to combine Python with\r
+    other software that is released under the GPL; the others don't.\r
+\r
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\r
+    because its license has a choice of law clause.  According to\r
+    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\r
+    is "not incompatible" with the GPL.\r
+\r
+Thanks to the many outside volunteers who have worked under Guido's\r
+direction to make these releases possible.\r
+\r
+\r
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\r
+===============================================================\r
+\r
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\r
+--------------------------------------------\r
+\r
+1. This LICENSE AGREEMENT is between the Python Software Foundation\r
+("PSF"), and the Individual or Organization ("Licensee") accessing and\r
+otherwise using this software ("Python") in source or binary form and\r
+its associated documentation.\r
+\r
+2. Subject to the terms and conditions of this License Agreement, PSF\r
+hereby grants Licensee a nonexclusive, royalty-free, world-wide\r
+license to reproduce, analyze, test, perform and/or display publicly,\r
+prepare derivative works, distribute, and otherwise use Python\r
+alone or in any derivative version, provided, however, that PSF's\r
+License Agreement and PSF's notice of copyright, i.e., "Copyright (c)\r
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Python Software Foundation; \r
+All Rights Reserved" are retained in Python alone or in any derivative \r
+version prepared by Licensee.\r
+\r
+3. In the event Licensee prepares a derivative work that is based on\r
+or incorporates Python or any part thereof, and wants to make\r
+the derivative work available to others as provided herein, then\r
+Licensee hereby agrees to include in any such work a brief summary of\r
+the changes made to Python.\r
+\r
+4. PSF is making Python available to Licensee on an "AS IS"\r
+basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\r
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\r
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\r
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\r
+INFRINGE ANY THIRD PARTY RIGHTS.\r
+\r
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\r
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\r
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\r
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\r
+\r
+6. This License Agreement will automatically terminate upon a material\r
+breach of its terms and conditions.\r
+\r
+7. Nothing in this License Agreement shall be deemed to create any\r
+relationship of agency, partnership, or joint venture between PSF and\r
+Licensee.  This License Agreement does not grant permission to use PSF\r
+trademarks or trade name in a trademark sense to endorse or promote\r
+products or services of Licensee, or any third party.\r
+\r
+8. By copying, installing or otherwise using Python, Licensee\r
+agrees to be bound by the terms and conditions of this License\r
+Agreement.\r
+\r
+\r
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\r
+-------------------------------------------\r
+\r
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\r
+\r
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an\r
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\r
+Individual or Organization ("Licensee") accessing and otherwise using\r
+this software in source or binary form and its associated\r
+documentation ("the Software").\r
+\r
+2. Subject to the terms and conditions of this BeOpen Python License\r
+Agreement, BeOpen hereby grants Licensee a non-exclusive,\r
+royalty-free, world-wide license to reproduce, analyze, test, perform\r
+and/or display publicly, prepare derivative works, distribute, and\r
+otherwise use the Software alone or in any derivative version,\r
+provided, however, that the BeOpen Python License is retained in the\r
+Software, alone or in any derivative version prepared by Licensee.\r
+\r
+3. BeOpen is making the Software available to Licensee on an "AS IS"\r
+basis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\r
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\r
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\r
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\r
+INFRINGE ANY THIRD PARTY RIGHTS.\r
+\r
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\r
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\r
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\r
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\r
+\r
+5. This License Agreement will automatically terminate upon a material\r
+breach of its terms and conditions.\r
+\r
+6. This License Agreement shall be governed by and interpreted in all\r
+respects by the law of the State of California, excluding conflict of\r
+law provisions.  Nothing in this License Agreement shall be deemed to\r
+create any relationship of agency, partnership, or joint venture\r
+between BeOpen and Licensee.  This License Agreement does not grant\r
+permission to use BeOpen trademarks or trade names in a trademark\r
+sense to endorse or promote products or services of Licensee, or any\r
+third party.  As an exception, the "BeOpen Python" logos available at\r
+http://www.pythonlabs.com/logos.html may be used according to the\r
+permissions granted on that web page.\r
+\r
+7. By copying, installing or otherwise using the software, Licensee\r
+agrees to be bound by the terms and conditions of this License\r
+Agreement.\r
+\r
+\r
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\r
+---------------------------------------\r
+\r
+1. This LICENSE AGREEMENT is between the Corporation for National\r
+Research Initiatives, having an office at 1895 Preston White Drive,\r
+Reston, VA 20191 ("CNRI"), and the Individual or Organization\r
+("Licensee") accessing and otherwise using Python 1.6.1 software in\r
+source or binary form and its associated documentation.\r
+\r
+2. Subject to the terms and conditions of this License Agreement, CNRI\r
+hereby grants Licensee a nonexclusive, royalty-free, world-wide\r
+license to reproduce, analyze, test, perform and/or display publicly,\r
+prepare derivative works, distribute, and otherwise use Python 1.6.1\r
+alone or in any derivative version, provided, however, that CNRI's\r
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)\r
+1995-2001 Corporation for National Research Initiatives; All Rights\r
+Reserved" are retained in Python 1.6.1 alone or in any derivative\r
+version prepared by Licensee.  Alternately, in lieu of CNRI's License\r
+Agreement, Licensee may substitute the following text (omitting the\r
+quotes): "Python 1.6.1 is made available subject to the terms and\r
+conditions in CNRI's License Agreement.  This Agreement together with\r
+Python 1.6.1 may be located on the Internet using the following\r
+unique, persistent identifier (known as a handle): 1895.22/1013.  This\r
+Agreement may also be obtained from a proxy server on the Internet\r
+using the following URL: http://hdl.handle.net/1895.22/1013".\r
+\r
+3. In the event Licensee prepares a derivative work that is based on\r
+or incorporates Python 1.6.1 or any part thereof, and wants to make\r
+the derivative work available to others as provided herein, then\r
+Licensee hereby agrees to include in any such work a brief summary of\r
+the changes made to Python 1.6.1.\r
+\r
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"\r
+basis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\r
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\r
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\r
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\r
+INFRINGE ANY THIRD PARTY RIGHTS.\r
+\r
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\r
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\r
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\r
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\r
+\r
+6. This License Agreement will automatically terminate upon a material\r
+breach of its terms and conditions.\r
+\r
+7. This License Agreement shall be governed by the federal\r
+intellectual property law of the United States, including without\r
+limitation the federal copyright law, and, to the extent such\r
+U.S. federal law does not apply, by the law of the Commonwealth of\r
+Virginia, excluding Virginia's conflict of law provisions.\r
+Notwithstanding the foregoing, with regard to derivative works based\r
+on Python 1.6.1 that incorporate non-separable material that was\r
+previously distributed under the GNU General Public License (GPL), the\r
+law of the Commonwealth of Virginia shall govern this License\r
+Agreement only as to issues arising under or with respect to\r
+Paragraphs 4, 5, and 7 of this License Agreement.  Nothing in this\r
+License Agreement shall be deemed to create any relationship of\r
+agency, partnership, or joint venture between CNRI and Licensee.  This\r
+License Agreement does not grant permission to use CNRI trademarks or\r
+trade name in a trademark sense to endorse or promote products or\r
+services of Licensee, or any third party.\r
+\r
+8. By clicking on the "ACCEPT" button where indicated, or by copying,\r
+installing or otherwise using Python 1.6.1, Licensee agrees to be\r
+bound by the terms and conditions of this License Agreement.\r
+\r
+        ACCEPT\r
+\r
+\r
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\r
+--------------------------------------------------\r
+\r
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\r
+The Netherlands.  All rights reserved.\r
+\r
+Permission to use, copy, modify, and distribute this software and its\r
+documentation for any purpose and without fee is hereby granted,\r
+provided that the above copyright notice appear in all copies and that\r
+both that copyright notice and this permission notice appear in\r
+supporting documentation, and that the name of Stichting Mathematisch\r
+Centrum or CWI not be used in advertising or publicity pertaining to\r
+distribution of the software without specific, written prior\r
+permission.\r
+\r
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\r
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\r
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\r
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\r
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\r
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\r
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\r
+\r
+\r
+\r
+------------------------------OpenSSL-----------------------------------------\r
+\r
+\r
+  LICENSE ISSUES\r
+  ==============\r
+\r
+  The OpenSSL toolkit stays under a dual license, i.e. both the conditions of\r
+  the OpenSSL License and the original SSLeay license apply to the toolkit.\r
+  See below for the actual license texts. Actually both licenses are BSD-style\r
+  Open Source licenses. In case of any license issues related to OpenSSL\r
+  please contact openssl-core@openssl.org.\r
+\r
+  OpenSSL License\r
+  ---------------\r
+\r
+/* ====================================================================\r
+ * Copyright (c) 1998-2008 The OpenSSL Project.  All rights reserved.\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * 1. Redistributions of source code must retain the above copyright\r
+ *    notice, this list of conditions and the following disclaimer. \r
+ *\r
+ * 2. Redistributions in binary form must reproduce the above copyright\r
+ *    notice, this list of conditions and the following disclaimer in\r
+ *    the documentation and/or other materials provided with the\r
+ *    distribution.\r
+ *\r
+ * 3. All advertising materials mentioning features or use of this\r
+ *    software must display the following acknowledgment:\r
+ *    "This product includes software developed by the OpenSSL Project\r
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"\r
+ *\r
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to\r
+ *    endorse or promote products derived from this software without\r
+ *    prior written permission. For written permission, please contact\r
+ *    openssl-core@openssl.org.\r
+ *\r
+ * 5. Products derived from this software may not be called "OpenSSL"\r
+ *    nor may "OpenSSL" appear in their names without prior written\r
+ *    permission of the OpenSSL Project.\r
+ *\r
+ * 6. Redistributions of any form whatsoever must retain the following\r
+ *    acknowledgment:\r
+ *    "This product includes software developed by the OpenSSL Project\r
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY\r
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\r
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR\r
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\r
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\r
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\r
+ * OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ * ====================================================================\r
+ *\r
+ * This product includes cryptographic software written by Eric Young\r
+ * (eay@cryptsoft.com).  This product includes software written by Tim\r
+ * Hudson (tjh@cryptsoft.com).\r
+ *\r
+ */\r
+\r
+ Original SSLeay License\r
+ -----------------------\r
+\r
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)\r
+ * All rights reserved.\r
+ *\r
+ * This package is an SSL implementation written\r
+ * by Eric Young (eay@cryptsoft.com).\r
+ * The implementation was written so as to conform with Netscapes SSL.\r
+ * \r
+ * This library is free for commercial and non-commercial use as long as\r
+ * the following conditions are aheared to.  The following conditions\r
+ * apply to all code found in this distribution, be it the RC4, RSA,\r
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation\r
+ * included with this distribution is covered by the same copyright terms\r
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).\r
+ * \r
+ * Copyright remains Eric Young's, and as such any Copyright notices in\r
+ * the code are not to be removed.\r
+ * If this package is used in a product, Eric Young should be given attribution\r
+ * as the author of the parts of the library used.\r
+ * This can be in the form of a textual message at program startup or\r
+ * in documentation (online or textual) provided with the package.\r
+ * \r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ * 1. Redistributions of source code must retain the copyright\r
+ *    notice, this list of conditions and the following disclaimer.\r
+ * 2. Redistributions in binary form must reproduce the above copyright\r
+ *    notice, this list of conditions and the following disclaimer in the\r
+ *    documentation and/or other materials provided with the distribution.\r
+ * 3. All advertising materials mentioning features or use of this software\r
+ *    must display the following acknowledgement:\r
+ *    "This product includes cryptographic software written by\r
+ *     Eric Young (eay@cryptsoft.com)"\r
+ *    The word 'cryptographic' can be left out if the rouines from the library\r
+ *    being used are not cryptographic related :-).\r
+ * 4. If you include any Windows specific code (or a derivative thereof) from \r
+ *    the apps directory (application code) you must include an acknowledgement:\r
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"\r
+ * \r
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND\r
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\r
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\r
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\r
+ * SUCH DAMAGE.\r
+ * \r
+ * The licence and distribution terms for any publically available version or\r
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be\r
+ * copied and put under another distribution licence\r
+ * [including the GNU Public Licence.]\r
+ */\r
+\r
+\r
+\r
+------------------------------wxPython----------------------------------------\r
+\r
+                wxWindows Library Licence, Version 3.1\r
+                ======================================\r
+\r
+  Copyright (c) 1998-2005 Julian Smart, Robert Roebling et al\r
+\r
+  Everyone is permitted to copy and distribute verbatim copies\r
+  of this licence document, but changing it is not allowed.\r
+\r
+                       WXWINDOWS LIBRARY LICENCE\r
+     TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+  \r
+  This library is free software; you can redistribute it and/or modify it\r
+  under the terms of the GNU Library General Public Licence as published by\r
+  the Free Software Foundation; either version 2 of the Licence, or (at\r
+  your option) any later version.\r
+  \r
+  This library is distributed in the hope that it will be useful, but\r
+  WITHOUT ANY WARRANTY; without even the implied warranty of\r
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library\r
+  General Public Licence for more details.\r
+\r
+  You should have received a copy of the GNU Library General Public Licence\r
+  along with this software, usually in a file named COPYING.LIB.  If not,\r
+  write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,\r
+  Boston, MA 02111-1307 USA.\r
+\r
+  EXCEPTION NOTICE\r
+\r
+  1. As a special exception, the copyright holders of this library give\r
+  permission for additional uses of the text contained in this release of\r
+  the library as licenced under the wxWindows Library Licence, applying\r
+  either version 3.1 of the Licence, or (at your option) any later version of\r
+  the Licence as published by the copyright holders of version\r
+  3.1 of the Licence document.\r
+\r
+  2. The exception is that you may use, copy, link, modify and distribute\r
+  under your own terms, binary object code versions of works based\r
+  on the Library.\r
+\r
+  3. If you copy code from files distributed under the terms of the GNU\r
+  General Public Licence or the GNU Library General Public Licence into a\r
+  copy of this library, as this licence permits, the exception does not\r
+  apply to the code that you add in this way.  To avoid misleading anyone as\r
+  to the status of such modified files, you must delete this exception\r
+  notice from such code and/or adjust the licensing conditions notice\r
+  accordingly.\r
+\r
+  4. If you write modifications of your own for this library, it is your\r
+  choice whether to permit this exception to apply to your modifications. \r
+  If you do not wish that, you must delete the exception notice from such\r
+  code and/or adjust the licensing conditions notice accordingly.\r
+\r
+\r
+\r
+\r
+\r
+------------------------------VideoLAN Client---------------------------------\r
+\r
+                   GNU GENERAL PUBLIC LICENSE\r
+                      Version 2, June 1991\r
+\r
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.\r
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r
+ Everyone is permitted to copy and distribute verbatim copies\r
+ of this license document, but changing it is not allowed.\r
+\r
+                           Preamble\r
+\r
+  The licenses for most software are designed to take away your\r
+freedom to share and change it.  By contrast, the GNU General Public\r
+License is intended to guarantee your freedom to share and change free\r
+software--to make sure the software is free for all its users.  This\r
+General Public License applies to most of the Free Software\r
+Foundation's software and to any other program whose authors commit to\r
+using it.  (Some other Free Software Foundation software is covered by\r
+the GNU Library General Public License instead.)  You can apply it to\r
+your programs, too.\r
+\r
+  When we speak of free software, we are referring to freedom, not\r
+price.  Our General Public Licenses are designed to make sure that you\r
+have the freedom to distribute copies of free software (and charge for\r
+this service if you wish), that you receive source code or can get it\r
+if you want it, that you can change the software or use pieces of it\r
+in new free programs; and that you know you can do these things.\r
+\r
+  To protect your rights, we need to make restrictions that forbid\r
+anyone to deny you these rights or to ask you to surrender the rights.\r
+These restrictions translate to certain responsibilities for you if you\r
+distribute copies of the software, or if you modify it.\r
+\r
+  For example, if you distribute copies of such a program, whether\r
+gratis or for a fee, you must give the recipients all the rights that\r
+you have.  You must make sure that they, too, receive or can get the\r
+source code.  And you must show them these terms so they know their\r
+rights.\r
+\r
+  We protect your rights with two steps: (1) copyright the software, and\r
+(2) offer you this license which gives you legal permission to copy,\r
+distribute and/or modify the software.\r
+\r
+  Also, for each author's protection and ours, we want to make certain\r
+that everyone understands that there is no warranty for this free\r
+software.  If the software is modified by someone else and passed on, we\r
+want its recipients to know that what they have is not the original, so\r
+that any problems introduced by others will not reflect on the original\r
+authors' reputations.\r
+\r
+  Finally, any free program is threatened constantly by software\r
+patents.  We wish to avoid the danger that redistributors of a free\r
+program will individually obtain patent licenses, in effect making the\r
+program proprietary.  To prevent this, we have made it clear that any\r
+patent must be licensed for everyone's free use or not licensed at all.\r
+\r
+  The precise terms and conditions for copying, distribution and\r
+modification follow.\r
+\f\r
+                   GNU GENERAL PUBLIC LICENSE\r
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+\r
+  0. This License applies to any program or other work which contains\r
+a notice placed by the copyright holder saying it may be distributed\r
+under the terms of this General Public License.  The "Program", below,\r
+refers to any such program or work, and a "work based on the Program"\r
+means either the Program or any derivative work under copyright law:\r
+that is to say, a work containing the Program or a portion of it,\r
+either verbatim or with modifications and/or translated into another\r
+language.  (Hereinafter, translation is included without limitation in\r
+the term "modification".)  Each licensee is addressed as "you".\r
+\r
+Activities other than copying, distribution and modification are not\r
+covered by this License; they are outside its scope.  The act of\r
+running the Program is not restricted, and the output from the Program\r
+is covered only if its contents constitute a work based on the\r
+Program (independent of having been made by running the Program).\r
+Whether that is true depends on what the Program does.\r
+\r
+  1. You may copy and distribute verbatim copies of the Program's\r
+source code as you receive it, in any medium, provided that you\r
+conspicuously and appropriately publish on each copy an appropriate\r
+copyright notice and disclaimer of warranty; keep intact all the\r
+notices that refer to this License and to the absence of any warranty;\r
+and give any other recipients of the Program a copy of this License\r
+along with the Program.\r
+\r
+You may charge a fee for the physical act of transferring a copy, and\r
+you may at your option offer warranty protection in exchange for a fee.\r
+\r
+  2. You may modify your copy or copies of the Program or any portion\r
+of it, thus forming a work based on the Program, and copy and\r
+distribute such modifications or work under the terms of Section 1\r
+above, provided that you also meet all of these conditions:\r
+\r
+    a) You must cause the modified files to carry prominent notices\r
+    stating that you changed the files and the date of any change.\r
+\r
+    b) You must cause any work that you distribute or publish, that in\r
+    whole or in part contains or is derived from the Program or any\r
+    part thereof, to be licensed as a whole at no charge to all third\r
+    parties under the terms of this License.\r
+\r
+    c) If the modified program normally reads commands interactively\r
+    when run, you must cause it, when started running for such\r
+    interactive use in the most ordinary way, to print or display an\r
+    announcement including an appropriate copyright notice and a\r
+    notice that there is no warranty (or else, saying that you provide\r
+    a warranty) and that users may redistribute the program under\r
+    these conditions, and telling the user how to view a copy of this\r
+    License.  (Exception: if the Program itself is interactive but\r
+    does not normally print such an announcement, your work based on\r
+    the Program is not required to print an announcement.)\r
+\f\r
+These requirements apply to the modified work as a whole.  If\r
+identifiable sections of that work are not derived from the Program,\r
+and can be reasonably considered independent and separate works in\r
+themselves, then this License, and its terms, do not apply to those\r
+sections when you distribute them as separate works.  But when you\r
+distribute the same sections as part of a whole which is a work based\r
+on the Program, the distribution of the whole must be on the terms of\r
+this License, whose permissions for other licensees extend to the\r
+entire whole, and thus to each and every part regardless of who wrote it.\r
+\r
+Thus, it is not the intent of this section to claim rights or contest\r
+your rights to work written entirely by you; rather, the intent is to\r
+exercise the right to control the distribution of derivative or\r
+collective works based on the Program.\r
+\r
+In addition, mere aggregation of another work not based on the Program\r
+with the Program (or with a work based on the Program) on a volume of\r
+a storage or distribution medium does not bring the other work under\r
+the scope of this License.\r
+\r
+  3. You may copy and distribute the Program (or a work based on it,\r
+under Section 2) in object code or executable form under the terms of\r
+Sections 1 and 2 above provided that you also do one of the following:\r
+\r
+    a) Accompany it with the complete corresponding machine-readable\r
+    source code, which must be distributed under the terms of Sections\r
+    1 and 2 above on a medium customarily used for software interchange; or,\r
+\r
+    b) Accompany it with a written offer, valid for at least three\r
+    years, to give any third party, for a charge no more than your\r
+    cost of physically performing source distribution, a complete\r
+    machine-readable copy of the corresponding source code, to be\r
+    distributed under the terms of Sections 1 and 2 above on a medium\r
+    customarily used for software interchange; or,\r
+\r
+    c) Accompany it with the information you received as to the offer\r
+    to distribute corresponding source code.  (This alternative is\r
+    allowed only for noncommercial distribution and only if you\r
+    received the program in object code or executable form with such\r
+    an offer, in accord with Subsection b above.)\r
+\r
+The source code for a work means the preferred form of the work for\r
+making modifications to it.  For an executable work, complete source\r
+code means all the source code for all modules it contains, plus any\r
+associated interface definition files, plus the scripts used to\r
+control compilation and installation of the executable.  However, as a\r
+special exception, the source code distributed need not include\r
+anything that is normally distributed (in either source or binary\r
+form) with the major components (compiler, kernel, and so on) of the\r
+operating system on which the executable runs, unless that component\r
+itself accompanies the executable.\r
+\r
+If distribution of executable or object code is made by offering\r
+access to copy from a designated place, then offering equivalent\r
+access to copy the source code from the same place counts as\r
+distribution of the source code, even though third parties are not\r
+compelled to copy the source along with the object code.\r
+\f\r
+  4. You may not copy, modify, sublicense, or distribute the Program\r
+except as expressly provided under this License.  Any attempt\r
+otherwise to copy, modify, sublicense or distribute the Program is\r
+void, and will automatically terminate your rights under this License.\r
+However, parties who have received copies, or rights, from you under\r
+this License will not have their licenses terminated so long as such\r
+parties remain in full compliance.\r
+\r
+  5. You are not required to accept this License, since you have not\r
+signed it.  However, nothing else grants you permission to modify or\r
+distribute the Program or its derivative works.  These actions are\r
+prohibited by law if you do not accept this License.  Therefore, by\r
+modifying or distributing the Program (or any work based on the\r
+Program), you indicate your acceptance of this License to do so, and\r
+all its terms and conditions for copying, distributing or modifying\r
+the Program or works based on it.\r
+\r
+  6. Each time you redistribute the Program (or any work based on the\r
+Program), the recipient automatically receives a license from the\r
+original licensor to copy, distribute or modify the Program subject to\r
+these terms and conditions.  You may not impose any further\r
+restrictions on the recipients' exercise of the rights granted herein.\r
+You are not responsible for enforcing compliance by third parties to\r
+this License.\r
+\r
+  7. If, as a consequence of a court judgment or allegation of patent\r
+infringement or for any other reason (not limited to patent issues),\r
+conditions are imposed on you (whether by court order, agreement or\r
+otherwise) that contradict the conditions of this License, they do not\r
+excuse you from the conditions of this License.  If you cannot\r
+distribute so as to satisfy simultaneously your obligations under this\r
+License and any other pertinent obligations, then as a consequence you\r
+may not distribute the Program at all.  For example, if a patent\r
+license would not permit royalty-free redistribution of the Program by\r
+all those who receive copies directly or indirectly through you, then\r
+the only way you could satisfy both it and this License would be to\r
+refrain entirely from distribution of the Program.\r
+\r
+If any portion of this section is held invalid or unenforceable under\r
+any particular circumstance, the balance of the section is intended to\r
+apply and the section as a whole is intended to apply in other\r
+circumstances.\r
+\r
+It is not the purpose of this section to induce you to infringe any\r
+patents or other property right claims or to contest validity of any\r
+such claims; this section has the sole purpose of protecting the\r
+integrity of the free software distribution system, which is\r
+implemented by public license practices.  Many people have made\r
+generous contributions to the wide range of software distributed\r
+through that system in reliance on consistent application of that\r
+system; it is up to the author/donor to decide if he or she is willing\r
+to distribute software through any other system and a licensee cannot\r
+impose that choice.\r
+\r
+This section is intended to make thoroughly clear what is believed to\r
+be a consequence of the rest of this License.\r
+\f\r
+  8. If the distribution and/or use of the Program is restricted in\r
+certain countries either by patents or by copyrighted interfaces, the\r
+original copyright holder who places the Program under this License\r
+may add an explicit geographical distribution limitation excluding\r
+those countries, so that distribution is permitted only in or among\r
+countries not thus excluded.  In such case, this License incorporates\r
+the limitation as if written in the body of this License.\r
+\r
+  9. The Free Software Foundation may publish revised and/or new versions\r
+of the General Public License from time to time.  Such new versions will\r
+be similar in spirit to the present version, but may differ in detail to\r
+address new problems or concerns.\r
+\r
+Each version is given a distinguishing version number.  If the Program\r
+specifies a version number of this License which applies to it and "any\r
+later version", you have the option of following the terms and conditions\r
+either of that version or of any later version published by the Free\r
+Software Foundation.  If the Program does not specify a version number of\r
+this License, you may choose any version ever published by the Free Software\r
+Foundation.\r
+\r
+  10. If you wish to incorporate parts of the Program into other free\r
+programs whose distribution conditions are different, write to the author\r
+to ask for permission.  For software which is copyrighted by the Free\r
+Software Foundation, write to the Free Software Foundation; we sometimes\r
+make exceptions for this.  Our decision will be guided by the two goals\r
+of preserving the free status of all derivatives of our free software and\r
+of promoting the sharing and reuse of software generally.\r
+\r
+                           NO WARRANTY\r
+\r
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\r
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\r
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\r
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\r
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\r
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\r
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\r
+REPAIR OR CORRECTION.\r
+\r
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\r
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\r
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\r
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\r
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\r
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\r
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\r
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\r
+POSSIBILITY OF SUCH DAMAGES.\r
+\r
+                    END OF TERMS AND CONDITIONS\r
+\f\r
+           How to Apply These Terms to Your New Programs\r
+\r
+  If you develop a new program, and you want it to be of the greatest\r
+possible use to the public, the best way to achieve this is to make it\r
+free software which everyone can redistribute and change under these terms.\r
+\r
+  To do so, attach the following notices to the program.  It is safest\r
+to attach them to the start of each source file to most effectively\r
+convey the exclusion of warranty; and each file should have at least\r
+the "copyright" line and a pointer to where the full notice is found.\r
+\r
+    <one line to give the program's name and a brief idea of what it does.>\r
+    Copyright (C) <year>  <name of author>\r
+\r
+    This program is free software; you can redistribute it and/or modify\r
+    it under the terms of the GNU General Public License as published by\r
+    the Free Software Foundation; either version 2 of the License, or\r
+    (at your option) any later version.\r
+\r
+    This program is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
+    GNU General Public License for more details.\r
+\r
+    You should have received a copy of the GNU General Public License\r
+    along with this program; if not, write to the Free Software\r
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r
+\r
+\r
+Also add information on how to contact you by electronic and paper mail.\r
+\r
+If the program is interactive, make it output a short notice like this\r
+when it starts in an interactive mode:\r
+\r
+    Gnomovision version 69, Copyright (C) year  name of author\r
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\r
+    This is free software, and you are welcome to redistribute it\r
+    under certain conditions; type `show c' for details.\r
+\r
+The hypothetical commands `show w' and `show c' should show the appropriate\r
+parts of the General Public License.  Of course, the commands you use may\r
+be called something other than `show w' and `show c'; they could even be\r
+mouse-clicks or menu items--whatever suits your program.\r
+\r
+You should also get your employer (if you work as a programmer) or your\r
+school, if any, to sign a "copyright disclaimer" for the program, if\r
+necessary.  Here is a sample; alter the names:\r
+\r
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\r
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.\r
+\r
+  <signature of Ty Coon>, 1 April 1989\r
+  Ty Coon, President of Vice\r
+\r
+This General Public License does not permit incorporating your program into\r
+proprietary programs.  If your program is a subroutine library, you may\r
+consider it more useful to permit linking proprietary applications with the\r
+library.  If this is what you want to do, use the GNU Library General\r
+Public License instead of this License.\r
+\r
+\r
+-------------------------------FFMPEG-----------------------------------------\r
+\r
+FFmpeg:\r
+-------\r
+\r
+Most files in FFmpeg are under the GNU Lesser General Public License version 2.1\r
+or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other\r
+files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to\r
+FFmpeg.\r
+\r
+Some optional parts of FFmpeg are licensed under the GNU General Public License\r
+version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of\r
+these parts are used by default, you have to explicitly pass --enable-gpl to\r
+configure to activate them. In this case, FFmpeg's license changes to GPL v2+.\r
+\r
+Specifically, the GPL parts of FFmpeg are\r
+\r
+- libpostproc\r
+- some x86 optimizations in libswscale\r
+- optional x86 optimizations in the files\r
+  libavcodec/x86/h264_deblock_sse2.asm\r
+  libavcodec/x86/h264_idct_sse2.asm\r
+  libavcodec/x86/idct_mmx.c\r
+- the X11 grabber in libavdevice/x11grab.c\r
+\r
+There are a handful of files under other licensing terms, namely:\r
+\r
+* The files libavcodec/jfdctfst.c, libavcodec/jfdctint.c, libavcodec/jrevdct.c\r
+  are taken from libjpeg, see the top of the files for licensing details.\r
+\r
+Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then\r
+the configure parameter --enable-version3 will activate this licensing option\r
+for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,\r
+COPYING.GPLv3 to learn the exact legal terms that apply in this case.\r
+\r
+\r
+external libraries:\r
+-------------------\r
+\r
+Some external libraries, e.g. libx264, are under GPL and can be used in\r
+conjunction with FFmpeg. They require --enable-gpl to be passed to configure\r
+as well.\r
+\r
+The OpenCORE external libraries are under the Apache License 2.0. That license\r
+is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of\r
+those licenses. So to combine the OpenCORE libraries with FFmpeg, the license\r
+version needs to be upgraded by passing --enable-version3 to configure.\r
+\r
+The nonfree external libraries libamrnb, libamrwb and libfaac can be hooked up\r
+in FFmpeg. You need to pass --enable-nonfree to configure to enable them. Employ\r
+this option with care as FFmpeg then becomes nonfree and unredistributable.\r
+Note that libfaac claims to be LGPL, but is not.\r
+\r
+-------------------------------------------------------------------------------\r
diff --git a/instrumentation/next-share/BaseLib/ns-LICENSE.txt b/instrumentation/next-share/BaseLib/ns-LICENSE.txt
new file mode 100644 (file)
index 0000000..ef95946
--- /dev/null
@@ -0,0 +1,970 @@
+------------------------------------------------------------------------------\r
+\r
+    Next-Share content-delivery library.\r
+\r
+    The research leading to this library has received funding from the European\r
+    Community's Seventh Framework Programme in the P2P-Next project under grant\r
+    agreement no 216217.\r
+\r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    The following library modules are Copyright (c) 2008-2012, VTT Technical Research Centre of Finland; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Norut AS; All rights reserved:\r
+      BaseLib/Core/Multicast/*\r
+      BaseLib/Core/Statistics/Status/*\r
+      BaseLib/Core/ClosedSwarm/*\r
+      BaseLib/Player/swarmplayer-njaal.py\r
+      BaseLib/Plugin/BackgroundProcess-njaal.py\r
+      BaseLib/Test/test_closedswarm.py\r
+      BaseLib/Test/test_status.py\r
+      BaseLib/Tools/createlivestream-njaal.py\r
+      BaseLib/Tools/createpoa.py\r
+      BaseLib/Tools/trial_poa_server.py\r
+      BaseLib/UPnP/*\r
+      BaseLib/Test/test_upnp.py\r
+\r
+    The following library modules are Copyright (c) 2008-2012, DACC Systems AB; All rights reserved:\r
+      DACC/transfer.php\r
\r
+    The following library modules are Copyright (c) 2008-2012, Lancaster University; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Jožef Stefan Institute; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, First Oversi Ltd.; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; All rights reserved:\r
+      BaseLib/Core/NATFirewall/NatCheck.py\r
+      BaseLib/Core/NATFirewall/TimeoutCheck.py\r
+      BaseLib/Core/NATFirewall/NatCheckMsgHandler.py\r
+      BaseLib/Policies/SeedingManager.py\r
+      BaseLib/Core/Statistics/SeedingStatsCrawler.py\r
+      BaseLib/Core/CacheDB/SqliteSeedingStatsCacheDB.py\r
+      BaseLib/Core/BuddyCast/moderationcast.py\r
+      BaseLib/Core/BuddyCast/moderationcast_util.py\r
+      BaseLib/Core/BuddyCast/votecast.py\r
+      BaseLib/Core/CacheDB/maxflow.py\r
+      BaseLib/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py\r
+      BaseLib/Core/NATFirewall/ConnectionCheck.py\r
+      BaseLib/Core/NATFirewall/NatTraversal.py\r
+      BaseLib/Core/Search/Reranking.py\r
+      BaseLib/Core/Statistics/tribler_videoplayback_stats.sql\r
+      BaseLib/Core/Statistics/VideoPlaybackCrawler.py\r
+      BaseLib/Core/Utilities/Crypto.py\r
+      BaseLib/Images/\r
+      BaseLib/Player/BaseApp.py\r
+      BaseLib/Player/EmbeddedPlayer4Frame.py\r
+      BaseLib/Player/PlayerVideoFrame.py\r
+      BaseLib/Plugin\r
+      BaseLib/Test/test_multicast.py\r
+      BaseLib/Test/test_na_extend_hs.py\r
+      BaseLib/Test/test_na_extend_hs.sh\r
+      BaseLib/Test/test_sqlitecachedbhandler.sh\r
+      BaseLib/Tools/dirtrackerseeder.py\r
+      BaseLib/Tools/pipe-babscam-h264-nosound-mencoder.sh\r
+      BaseLib/Tools/superpeer.py\r
+      BaseLib/Utilities/LinuxSingleInstanceChecker.py\r
+      BaseLib/Video/Images\r
+      BaseLib/Video/VideoFrame.py\r
+      reset.bat\r
+      reset-keepid.bat\r
+      BaseLib/Core/Video/PiecePickerSVC.py\r
+      BaseLib/Core/Video/SVCTransporter.py\r
+      BaseLib/Core/Video/SVCVideoStatus.py\r
+      BaseLib/schema_sdb_v5.sql\r
+      BaseLib/Core/APIImplementation/makeurl.py\r
+      BaseLib/Core/BuddyCast/channelcast.py\r
+      BaseLib/Core/DecentralizedTracking/repex.py\r
+      BaseLib/Core/NATFirewall/TimeoutFinder.py\r
+      BaseLib/Core/NATFirewall/UDPPuncture.py\r
+      BaseLib/Core/Statistics/RepexCrawler.py\r
+      BaseLib/Debug/*\r
+      BaseLib/Tools/createtorrent.py\r
+      BaseLib/Tools/pingbackserver.py\r
+      BaseLib/Tools/seeking.py\r
+      BaseLib/Tools/stunserver.py\r
+      lucid-xpicreate.sh\r
+      patentfreevlc.bat\r
+      BaseLib/Core/BitTornado/BT1/GetRightHTTPDownloader.py\r
+      BaseLib/Core/BitTornado/BT1/HoffmanHTTPDownloader.py\r
+      BaseLib/Core/CacheDB/MetadataDBHandler.py\r
+      BaseLib/Core/DecentralizedTracking/MagnetLink/*\r
+      BaseLib/Core/Subtitles/*\r
+      BaseLib/Images/SwarmServerIcon.ico\r
+      BaseLib/Main/Build/Ubuntu/tribler.gconf-defaults\r
+      BaseLib/Main/Utility/logging_util.py\r
+      BaseLib/Main/vwxGUI/ChannelsPanel.py\r
+      BaseLib/Main/vwxGUI/images/iconSaved_state4.png\r
+      BaseLib/schema_sdb_v5.sql\r
+      BaseLib/Test/Core/*\r
+      BaseLib/Test/extend_hs_dir/proxyservice.test.torrent\r
+      BaseLib/Test/subtitles_test_res\r
+      BaseLib/Test/test_channelcast_plus_subtitles.py\r
+      BaseLib/Test/test_magnetlink.py\r
+      BaseLib/Test/test_miscutils.py\r
+      BaseLib/Test/test_subtitles.bat\r
+      BaseLib/Test/test_subtitles_isolation.py\r
+      BaseLib/Test/test_subtitles_msgs.py\r
+      BaseLib/Test/test_subtitles.sh\r
+      BaseLib/Test/test_threadpool.py\r
+      BaseLib/Tools/dirtracker.py\r
+      BaseLib/Tools/duration2torrent.py\r
+      BaseLib/Tools/httpseeder.py\r
+      BaseLib/Transport/*\r
+      BaseLib/Video/Ogg.py\r
+      BaseLib/WebUI/*\r
+      xpitransmakedeb.sh\r
+      xpitransmakedist.bat\r
+      xpitransmakedist.sh\r
+      xpitransmakedistmac.sh\r
+      xie8transmakedist.bat\r
+      TUD/swift-spbackend-r1598/*\r
+      vlc-1.0.5-swarmplugin-switch-kcc-src-aug2010-r16968.patch (except bindings/python)\r
+\r
+    The following library modules are Copyright (c) 2008-2012, STMicroelectronics S.r.l.; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Kungliga Tekniska Högskolan (The Royal Institute of Technology); All rights reserved:\r
+      BaseLib/Core/DecentralizedTracking/kadtracker/*\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Markenfilm GmbH & Co. KG; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Radiotelevizija Slovenija Javni Zavvod Ljubljana; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Kendra Foundation; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Universitaet Klagenfurt; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, AG Projects; All rights reserved:\r
+      AGP/sipsimple-20100621.tgz\r
+      \r
+    The following library modules are Copyright (c) 2008-2012, The British Broadcasting Corporation; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Pioneer Digital Design Centre Limited; All rights reserved:\r
+  \r
+    The following library modules are Copyright (c) 2008-2012, INSTITUT FUER RUNDFUNKTECHNIK GMBH; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Fabchannel BV; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, University Politehnica Bucharest; All rights reserved:\r
+      BaseLib/Core/ProxyService/*\r
+      BaseLib/Tools/proxy-cmdline.py\r
+      BaseLib/Test/test_proxyservice_as_coord.bat\r
+      BaseLib/Test/test_proxyservice_as_coord.py\r
+      BaseLib/Test/test_proxyservice_as_coord.sh\r
+      BaseLib/Test/test_proxyservice.bat\r
+      BaseLib/Test/test_proxyservice.py\r
+      BaseLib/Test/test_proxyservice.sh\r
+      BaseLib/Test/extend_hs_dir/proxyservice.test.torrent\r
+      \r
+\r
+    The following library modules are Copyright (c) 2008-2012, EBU-UER; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Università di Roma Sapienza; All rights reserved:\r
+\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    VTT Technical Research Centre of Finland, \r
+    Tekniikankatu 1, \r
+    FIN-33710 Tampere, \r
+    Finland\r
+\r
+    Norut AS,\r
+    Postboks 6434 \r
+    Forskningsparken, \r
+    9294 Tromsø,\r
+    Norway\r
+\r
+    DACC Systems AB\r
+    Glimmervägen 4, \r
+    SE18734, Täby,\r
+    Sweden\r
+\r
+    Lancaster University, \r
+    University House, \r
+    Bailrigg, Lancaster, LA1 4YW\r
+    United Kingdom\r
+\r
+    Jožef Stefan Institute, \r
+    Jamova cesta 39, \r
+    1000 Ljubljana, \r
+    Slovenia\r
+\r
+    First Oversi Ltd.,\r
+    Rishon Lezion 1,\r
+    Petah Tikva 49723, \r
+    Israel\r
+\r
+    TECHNISCHE UNIVERSITEIT DELFT, \r
+    Faculty of Electrical Engineering, Mathematics and Computer Science, \r
+    Mekelweg 4, \r
+    2628 CD Delft, \r
+    The Netherlands\r
+\r
+    STMicroelectronics S.r.l., \r
+    via C.Olivetti 2, \r
+    I-20041 Agrate Brianza,\r
+    Italy\r
+\r
+    Kungliga Tekniska Högskolan (The Royal Institute of Technology), \r
+    KTH/ICT/ECS/TSLab\r
+    Electrum 229\r
+    164 40 Kista\r
+    Sweden\r
+\r
+    Markenfilm GmbH & Co. KG, \r
+    Schulauer Moorweg 25, \r
+    22880 Wedel, \r
+    Germany\r
+\r
+    Radiotelevizija Slovenija Javni Zavvod Ljubljana, \r
+    Kolodvorska 2, \r
+    SI-1000 Ljubljana,\r
+    Slovenia\r
+\r
+\r
+    Kendra Foundation, \r
+    Meadow Barn, Holne, \r
+    Newton Abbot, Devon, TQ13 7SP,\r
+    United Kingdom\r
+\r
+\r
+    Universitaet Klagenfurt, \r
+    Universitaetstrasse 65-67, \r
+    9020 Klagenfurt, \r
+    Austria\r
+\r
+    AG Projects, \r
+    Dr. Leijdsstraat 92,\r
+    2021RK Haarlem, \r
+    The Netherlands\r
+\r
+    The British Broadcasting Corporation,\r
+    Broadcasting House, Portland Place, \r
+    London, W1A 1AA \r
+    United Kingdom\r
+\r
+    Pioneer Digital Design Centre Limited, \r
+    Pioneer House, Hollybush Hill, Stoke Poges, \r
+    Slough, SL2 4QP\r
+    United Kingdom\r
+\r
+    INSTITUT FUER RUNDFUNKTECHNIK GMBH\r
+    Floriansmuehlstrasse 60,\r
+    80939 München, \r
+    Germany\r
+\r
+    Fabchannel BV, \r
+    Kleine-Gartmanplantsoen 21, \r
+    1017 RP Amsterdam, \r
+    The Netherlands\r
+\r
+    University Politehnica Bucharest, \r
+    313 Splaiul Independentei, \r
+    District 6, cod 060042, Bucharest,\r
+    Romania\r
+\r
+    EBU-UER, \r
+    L'Ancienne Route 17A, 1218\r
+    Grand Saconnex - Geneva, \r
+    Switzerland\r
+\r
+    Università di Roma Sapienza\r
+    Dipartimento di Informatica e Sistemistica (DIS),\r
+    Via Ariosto 25, \r
+    00185 Rome, \r
+    Italy\r
+\r
+\r
+------------------------------------------------------------------------------\r
+\r
+    BaseLib content-delivery library.\r
+\r
+    Development of the BaseLib library was supported by various research \r
+    grants:\r
+\r
+     - BSIK Freeband Communication I-Share project (Dutch Ministry of Economic \r
+       Affairs)\r
+     - Netherlands Organisation for Scientific Research (NWO) grant 612.060.215\r
+     - Dutch Technology Foundation STW: Veni project DTC.7299\r
+     - European Community's Sixth Framework Programme in the P2P-FUSION project\r
+       under contract no 035249.\r
\r
+    The following library modules are Copyright (c) 2005-2010,\r
+    Delft University of Technology and Vrije Universiteit Amsterdam; \r
+    All rights reserved.\r
+\r
+      BaseLib/*\r
+\r
+    This library is free software; you can redistribute it and/or\r
+    modify it under the terms of the GNU Lesser General Public\r
+    License as published by the Free Software Foundation; either\r
+    version 2.1 of the License, or (at your option) any later version.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    Delft University of Technology\r
+    Postbus 5\r
+    2600 AA Delft\r
+    The Netherlands\r
+    \r
+    Vrije Universiteit\r
+    De Boelelaan 1105\r
+    1081 HV Amsterdam\r
+    The Netherlands\r
+\r
+\r
\r
+-------------------------------------------------------------------------------\r
+\r
+    BuddyCast4 content-recommendation library.\r
+\r
+    The research leading to this library has received funding from the\r
+    European Community's Seventh Framework Programme [FP7/2007-2011] \r
+    in the Petamedia project under grant agreement no. 216444\r
+\r
+    The following library modules are Copyright (c) 2008-2010,\r
+    Delft University of Technology and Technische Universität Berlin; \r
+    All rights reserved.\r
+\r
+      BaseLib/Core/BuddyCast/buddycast.py\r
+\r
+    The following library modules are Copyright (c) 2008-2010,\r
+    Technische Universität Berlin; \r
+    All rights reserved.\r
+\r
+      BaseLib/Core/Search/Reranking.py\r
+      BaseLib/Test/test_buddycast4.py\r
+      BaseLib/Test/test_buddycast4_stresstest.py\r
+      \r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    Delft University of Technology\r
+    Postbus 5\r
+    2600 AA Delft\r
+    The Netherlands\r
+    \r
+    Technische Universität Berlin\r
+    Strasse des 17. Juni 135\r
+    10623 Berlin\r
+    Germany\r
+\r
+\r
+------------------------------------------------------------------------------\r
+\r
+    SwarmTransport/SwarmPlayer Firefox library.\r
+\r
+    The research leading to this library has received funding from the European\r
+    Community's Seventh Framework Programme in the P2P-Next project under grant\r
+    agreement no 216217.\r
+\r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+\r
+    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; \r
+    and Jan Gerber; All rights reserved:\r
+      BaseLib/Transport/tribeIChannel.idl\r
+      BaseLib/Transport/tribeISwarmTransport.idl\r
+      BaseLib/Transport/components/TribeChannel.js\r
+      BaseLib/Transport/components/TribeProtocolHandler.js\r
+      BaseLib/Transport/components/SwarmTransport.js\r
+      BaseLib/Transport/install.rdf\r
+      BaseLib/Transport/chrome.manifest\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    TECHNISCHE UNIVERSITEIT DELFT, \r
+    Faculty of Electrical Engineering, Mathematics and Computer Science, \r
+    Mekelweg 4, \r
+    2628 CD Delft, \r
+    The Netherlands\r
+    \r
+    Jan Gerber\r
+    j@thing.net\r
+\r
+-------------------------------------------------------------------------------\r
+\r
+Unless otherwise noted, all files written by Bram Cohen, John Hoffman, Petru \r
+Paler, Uoti Urpala, Ross Cohen, Tim Tucker, Choopan RATTANAPOKA, Yejun Yang,\r
+Myers Carpenter, Bill Bumgarner, Henry 'Pi' James, Loring Holden, \r
+Dustin Pate ("noirsoldats@codemeu.com"), kratoak5, Roee Shlomo, Greg Fleming, \r
+N. Goldmann ("Pir4nhaX,www.clanyakuza.com"), and Michel Hartmann is released\r
+under the MIT license, exceptions contain licensing information in them.\r
+\r
+Copyright (C) 2001-2002 Bram Cohen\r
+\r
+Permission is hereby granted, free of charge, to any person\r
+obtaining a copy of this software and associated documentation files\r
+(the "Software"), to deal in the Software without restriction,\r
+including without limitation the rights to use, copy, modify, merge,\r
+publish, distribute, sublicense, and/or sell copies of the Software,\r
+and to permit persons to whom the Software is furnished to do so,\r
+subject to the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be\r
+included in all copies or substantial portions of the Software.\r
+\r
+The Software is provided "AS IS", without warranty of any kind,\r
+express or implied, including but not limited to the warranties of\r
+merchantability,  fitness for a particular purpose and\r
+noninfringement. In no event shall the  authors or copyright holders\r
+be liable for any claim, damages or other liability, whether in an\r
+action of contract, tort or otherwise, arising from, out of or in\r
+connection with the Software or the use or other dealings in the\r
+Software.\r
+\r
+-------------------------------------------------------------------------------\r
+\r
+                  GNU LESSER GENERAL PUBLIC LICENSE\r
+                       Version 2.1, February 1999\r
+\r
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.\r
+     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+ Everyone is permitted to copy and distribute verbatim copies\r
+ of this license document, but changing it is not allowed.\r
+\r
+[This is the first released version of the Lesser GPL.  It also counts\r
+ as the successor of the GNU Library Public License, version 2, hence\r
+ the version number 2.1.]\r
+\r
+                            Preamble\r
+\r
+  The licenses for most software are designed to take away your\r
+freedom to share and change it.  By contrast, the GNU General Public\r
+Licenses are intended to guarantee your freedom to share and change\r
+free software--to make sure the software is free for all its users.\r
+\r
+  This license, the Lesser General Public License, applies to some\r
+specially designated software packages--typically libraries--of the\r
+Free Software Foundation and other authors who decide to use it.  You\r
+can use it too, but we suggest you first think carefully about whether\r
+this license or the ordinary General Public License is the better\r
+strategy to use in any particular case, based on the explanations below.\r
+\r
+  When we speak of free software, we are referring to freedom of use,\r
+not price.  Our General Public Licenses are designed to make sure that\r
+you have the freedom to distribute copies of free software (and charge\r
+for this service if you wish); that you receive source code or can get\r
+it if you want it; that you can change the software and use pieces of\r
+it in new free programs; and that you are informed that you can do\r
+these things.\r
+\r
+  To protect your rights, we need to make restrictions that forbid\r
+distributors to deny you these rights or to ask you to surrender these\r
+rights.  These restrictions translate to certain responsibilities for\r
+you if you distribute copies of the library or if you modify it.\r
+\r
+  For example, if you distribute copies of the library, whether gratis\r
+or for a fee, you must give the recipients all the rights that we gave\r
+you.  You must make sure that they, too, receive or can get the source\r
+code.  If you link other code with the library, you must provide\r
+complete object files to the recipients, so that they can relink them\r
+with the library after making changes to the library and recompiling\r
+it.  And you must show them these terms so they know their rights.\r
+\r
+  We protect your rights with a two-step method: (1) we copyright the\r
+library, and (2) we offer you this license, which gives you legal\r
+permission to copy, distribute and/or modify the library.\r
+\r
+  To protect each distributor, we want to make it very clear that\r
+there is no warranty for the free library.  Also, if the library is\r
+modified by someone else and passed on, the recipients should know\r
+that what they have is not the original version, so that the original\r
+author's reputation will not be affected by problems that might be\r
+introduced by others.\r
+\r
+\r
+  Finally, software patents pose a constant threat to the existence of\r
+any free program.  We wish to make sure that a company cannot\r
+effectively restrict the users of a free program by obtaining a\r
+restrictive license from a patent holder.  Therefore, we insist that\r
+any patent license obtained for a version of the library must be\r
+consistent with the full freedom of use specified in this license.\r
+\r
+  Most GNU software, including some libraries, is covered by the\r
+ordinary GNU General Public License.  This license, the GNU Lesser\r
+General Public License, applies to certain designated libraries, and\r
+is quite different from the ordinary General Public License.  We use\r
+this license for certain libraries in order to permit linking those\r
+libraries into non-free programs.\r
+\r
+  When a program is linked with a library, whether statically or using\r
+a shared library, the combination of the two is legally speaking a\r
+combined work, a derivative of the original library.  The ordinary\r
+General Public License therefore permits such linking only if the\r
+entire combination fits its criteria of freedom.  The Lesser General\r
+Public License permits more lax criteria for linking other code with\r
+the library.\r
+\r
+  We call this license the "Lesser" General Public License because it\r
+does Less to protect the user's freedom than the ordinary General\r
+Public License.  It also provides other free software developers Less\r
+of an advantage over competing non-free programs.  These disadvantages\r
+are the reason we use the ordinary General Public License for many\r
+libraries.  However, the Lesser license provides advantages in certain\r
+special circumstances.\r
+\r
+  For example, on rare occasions, there may be a special need to\r
+encourage the widest possible use of a certain library, so that it becomes\r
+a de-facto standard.  To achieve this, non-free programs must be\r
+allowed to use the library.  A more frequent case is that a free\r
+library does the same job as widely used non-free libraries.  In this\r
+case, there is little to gain by limiting the free library to free\r
+software only, so we use the Lesser General Public License.\r
+\r
+  In other cases, permission to use a particular library in non-free\r
+programs enables a greater number of people to use a large body of\r
+free software.  For example, permission to use the GNU C Library in\r
+non-free programs enables many more people to use the whole GNU\r
+operating system, as well as its variant, the GNU/Linux operating\r
+system.\r
+\r
+  Although the Lesser General Public License is Less protective of the\r
+users' freedom, it does ensure that the user of a program that is\r
+linked with the Library has the freedom and the wherewithal to run\r
+that program using a modified version of the Library.\r
+\r
+  The precise terms and conditions for copying, distribution and\r
+modification follow.  Pay close attention to the difference between a\r
+"work based on the library" and a "work that uses the library".  The\r
+former contains code derived from the library, whereas the latter must\r
+be combined with the library in order to run.\r
+\r
+\r
+                  GNU LESSER GENERAL PUBLIC LICENSE\r
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+\r
+  0. This License Agreement applies to any software library or other\r
+program which contains a notice placed by the copyright holder or\r
+other authorized party saying it may be distributed under the terms of\r
+this Lesser General Public License (also called "this License").\r
+Each licensee is addressed as "you".\r
+\r
+  A "library" means a collection of software functions and/or data\r
+prepared so as to be conveniently linked with application programs\r
+(which use some of those functions and data) to form executables.\r
+\r
+  The "Library", below, refers to any such software library or work\r
+which has been distributed under these terms.  A "work based on the\r
+Library" means either the Library or any derivative work under\r
+copyright law: that is to say, a work containing the Library or a\r
+portion of it, either verbatim or with modifications and/or translated\r
+straightforwardly into another language.  (Hereinafter, translation is\r
+included without limitation in the term "modification".)\r
+\r
+  "Source code" for a work means the preferred form of the work for\r
+making modifications to it.  For a library, complete source code means\r
+all the source code for all modules it contains, plus any associated\r
+interface definition files, plus the scripts used to control compilation\r
+and installation of the library.\r
+\r
+  Activities other than copying, distribution and modification are not\r
+covered by this License; they are outside its scope.  The act of\r
+running a program using the Library is not restricted, and output from\r
+such a program is covered only if its contents constitute a work based\r
+on the Library (independent of the use of the Library in a tool for\r
+writing it).  Whether that is true depends on what the Library does\r
+and what the program that uses the Library does.\r
+  \r
+  1. You may copy and distribute verbatim copies of the Library's\r
+complete source code as you receive it, in any medium, provided that\r
+you conspicuously and appropriately publish on each copy an\r
+appropriate copyright notice and disclaimer of warranty; keep intact\r
+all the notices that refer to this License and to the absence of any\r
+warranty; and distribute a copy of this License along with the\r
+Library.\r
+\r
+  You may charge a fee for the physical act of transferring a copy,\r
+and you may at your option offer warranty protection in exchange for a\r
+fee.\r
+\r
+\r
+  2. You may modify your copy or copies of the Library or any portion\r
+of it, thus forming a work based on the Library, and copy and\r
+distribute such modifications or work under the terms of Section 1\r
+above, provided that you also meet all of these conditions:\r
+\r
+    a) The modified work must itself be a software library.\r
+\r
+    b) You must cause the files modified to carry prominent notices\r
+    stating that you changed the files and the date of any change.\r
+\r
+    c) You must cause the whole of the work to be licensed at no\r
+    charge to all third parties under the terms of this License.\r
+\r
+    d) If a facility in the modified Library refers to a function or a\r
+    table of data to be supplied by an application program that uses\r
+    the facility, other than as an argument passed when the facility\r
+    is invoked, then you must make a good faith effort to ensure that,\r
+    in the event an application does not supply such function or\r
+    table, the facility still operates, and performs whatever part of\r
+    its purpose remains meaningful.\r
+\r
+    (For example, a function in a library to compute square roots has\r
+    a purpose that is entirely well-defined independent of the\r
+    application.  Therefore, Subsection 2d requires that any\r
+    application-supplied function or table used by this function must\r
+    be optional: if the application does not supply it, the square\r
+    root function must still compute square roots.)\r
+\r
+These requirements apply to the modified work as a whole.  If\r
+identifiable sections of that work are not derived from the Library,\r
+and can be reasonably considered independent and separate works in\r
+themselves, then this License, and its terms, do not apply to those\r
+sections when you distribute them as separate works.  But when you\r
+distribute the same sections as part of a whole which is a work based\r
+on the Library, the distribution of the whole must be on the terms of\r
+this License, whose permissions for other licensees extend to the\r
+entire whole, and thus to each and every part regardless of who wrote\r
+it.\r
+\r
+Thus, it is not the intent of this section to claim rights or contest\r
+your rights to work written entirely by you; rather, the intent is to\r
+exercise the right to control the distribution of derivative or\r
+collective works based on the Library.\r
+\r
+In addition, mere aggregation of another work not based on the Library\r
+with the Library (or with a work based on the Library) on a volume of\r
+a storage or distribution medium does not bring the other work under\r
+the scope of this License.\r
+\r
+  3. You may opt to apply the terms of the ordinary GNU General Public\r
+License instead of this License to a given copy of the Library.  To do\r
+this, you must alter all the notices that refer to this License, so\r
+that they refer to the ordinary GNU General Public License, version 2,\r
+instead of to this License.  (If a newer version than version 2 of the\r
+ordinary GNU General Public License has appeared, then you can specify\r
+that version instead if you wish.)  Do not make any other change in\r
+these notices.\r
+\r
+\r
+  Once this change is made in a given copy, it is irreversible for\r
+that copy, so the ordinary GNU General Public License applies to all\r
+subsequent copies and derivative works made from that copy.\r
+\r
+  This option is useful when you wish to copy part of the code of\r
+the Library into a program that is not a library.\r
+\r
+  4. You may copy and distribute the Library (or a portion or\r
+derivative of it, under Section 2) in object code or executable form\r
+under the terms of Sections 1 and 2 above provided that you accompany\r
+it with the complete corresponding machine-readable source code, which\r
+must be distributed under the terms of Sections 1 and 2 above on a\r
+medium customarily used for software interchange.\r
+\r
+  If distribution of object code is made by offering access to copy\r
+from a designated place, then offering equivalent access to copy the\r
+source code from the same place satisfies the requirement to\r
+distribute the source code, even though third parties are not\r
+compelled to copy the source along with the object code.\r
+\r
+  5. A program that contains no derivative of any portion of the\r
+Library, but is designed to work with the Library by being compiled or\r
+linked with it, is called a "work that uses the Library".  Such a\r
+work, in isolation, is not a derivative work of the Library, and\r
+therefore falls outside the scope of this License.\r
+\r
+  However, linking a "work that uses the Library" with the Library\r
+creates an executable that is a derivative of the Library (because it\r
+contains portions of the Library), rather than a "work that uses the\r
+library".  The executable is therefore covered by this License.\r
+Section 6 states terms for distribution of such executables.\r
+\r
+  When a "work that uses the Library" uses material from a header file\r
+that is part of the Library, the object code for the work may be a\r
+derivative work of the Library even though the source code is not.\r
+Whether this is true is especially significant if the work can be\r
+linked without the Library, or if the work is itself a library.  The\r
+threshold for this to be true is not precisely defined by law.\r
+\r
+  If such an object file uses only numerical parameters, data\r
+structure layouts and accessors, and small macros and small inline\r
+functions (ten lines or less in length), then the use of the object\r
+file is unrestricted, regardless of whether it is legally a derivative\r
+work.  (Executables containing this object code plus portions of the\r
+Library will still fall under Section 6.)\r
+\r
+  Otherwise, if the work is a derivative of the Library, you may\r
+distribute the object code for the work under the terms of Section 6.\r
+Any executables containing that work also fall under Section 6,\r
+whether or not they are linked directly with the Library itself.\r
+\r
+\r
+  6. As an exception to the Sections above, you may also combine or\r
+link a "work that uses the Library" with the Library to produce a\r
+work containing portions of the Library, and distribute that work\r
+under terms of your choice, provided that the terms permit\r
+modification of the work for the customer's own use and reverse\r
+engineering for debugging such modifications.\r
+\r
+  You must give prominent notice with each copy of the work that the\r
+Library is used in it and that the Library and its use are covered by\r
+this License.  You must supply a copy of this License.  If the work\r
+during execution displays copyright notices, you must include the\r
+copyright notice for the Library among them, as well as a reference\r
+directing the user to the copy of this License.  Also, you must do one\r
+of these things:\r
+\r
+    a) Accompany the work with the complete corresponding\r
+    machine-readable source code for the Library including whatever\r
+    changes were used in the work (which must be distributed under\r
+    Sections 1 and 2 above); and, if the work is an executable linked\r
+    with the Library, with the complete machine-readable "work that\r
+    uses the Library", as object code and/or source code, so that the\r
+    user can modify the Library and then relink to produce a modified\r
+    executable containing the modified Library.  (It is understood\r
+    that the user who changes the contents of definitions files in the\r
+    Library will not necessarily be able to recompile the application\r
+    to use the modified definitions.)\r
+\r
+    b) Use a suitable shared library mechanism for linking with the\r
+    Library.  A suitable mechanism is one that (1) uses at run time a\r
+    copy of the library already present on the user's computer system,\r
+    rather than copying library functions into the executable, and (2)\r
+    will operate properly with a modified version of the library, if\r
+    the user installs one, as long as the modified version is\r
+    interface-compatible with the version that the work was made with.\r
+\r
+    c) Accompany the work with a written offer, valid for at\r
+    least three years, to give the same user the materials\r
+    specified in Subsection 6a, above, for a charge no more\r
+    than the cost of performing this distribution.\r
+\r
+    d) If distribution of the work is made by offering access to copy\r
+    from a designated place, offer equivalent access to copy the above\r
+    specified materials from the same place.\r
+\r
+    e) Verify that the user has already received a copy of these\r
+    materials or that you have already sent this user a copy.\r
+\r
+  For an executable, the required form of the "work that uses the\r
+Library" must include any data and utility programs needed for\r
+reproducing the executable from it.  However, as a special exception,\r
+the materials to be distributed need not include anything that is\r
+normally distributed (in either source or binary form) with the major\r
+components (compiler, kernel, and so on) of the operating system on\r
+which the executable runs, unless that component itself accompanies\r
+the executable.\r
+\r
+  It may happen that this requirement contradicts the license\r
+restrictions of other proprietary libraries that do not normally\r
+accompany the operating system.  Such a contradiction means you cannot\r
+use both them and the Library together in an executable that you\r
+distribute.\r
+\r
+\r
+  7. You may place library facilities that are a work based on the\r
+Library side-by-side in a single library together with other library\r
+facilities not covered by this License, and distribute such a combined\r
+library, provided that the separate distribution of the work based on\r
+the Library and of the other library facilities is otherwise\r
+permitted, and provided that you do these two things:\r
+\r
+    a) Accompany the combined library with a copy of the same work\r
+    based on the Library, uncombined with any other library\r
+    facilities.  This must be distributed under the terms of the\r
+    Sections above.\r
+\r
+    b) Give prominent notice with the combined library of the fact\r
+    that part of it is a work based on the Library, and explaining\r
+    where to find the accompanying uncombined form of the same work.\r
+\r
+  8. You may not copy, modify, sublicense, link with, or distribute\r
+the Library except as expressly provided under this License.  Any\r
+attempt otherwise to copy, modify, sublicense, link with, or\r
+distribute the Library is void, and will automatically terminate your\r
+rights under this License.  However, parties who have received copies,\r
+or rights, from you under this License will not have their licenses\r
+terminated so long as such parties remain in full compliance.\r
+\r
+  9. You are not required to accept this License, since you have not\r
+signed it.  However, nothing else grants you permission to modify or\r
+distribute the Library or its derivative works.  These actions are\r
+prohibited by law if you do not accept this License.  Therefore, by\r
+modifying or distributing the Library (or any work based on the\r
+Library), you indicate your acceptance of this License to do so, and\r
+all its terms and conditions for copying, distributing or modifying\r
+the Library or works based on it.\r
+\r
+  10. Each time you redistribute the Library (or any work based on the\r
+Library), the recipient automatically receives a license from the\r
+original licensor to copy, distribute, link with or modify the Library\r
+subject to these terms and conditions.  You may not impose any further\r
+restrictions on the recipients' exercise of the rights granted herein.\r
+You are not responsible for enforcing compliance by third parties with\r
+this License.\r
+\r
+\r
+  11. If, as a consequence of a court judgment or allegation of patent\r
+infringement or for any other reason (not limited to patent issues),\r
+conditions are imposed on you (whether by court order, agreement or\r
+otherwise) that contradict the conditions of this License, they do not\r
+excuse you from the conditions of this License.  If you cannot\r
+distribute so as to satisfy simultaneously your obligations under this\r
+License and any other pertinent obligations, then as a consequence you\r
+may not distribute the Library at all.  For example, if a patent\r
+license would not permit royalty-free redistribution of the Library by\r
+all those who receive copies directly or indirectly through you, then\r
+the only way you could satisfy both it and this License would be to\r
+refrain entirely from distribution of the Library.\r
+\r
+If any portion of this section is held invalid or unenforceable under any\r
+particular circumstance, the balance of the section is intended to apply,\r
+and the section as a whole is intended to apply in other circumstances.\r
+\r
+It is not the purpose of this section to induce you to infringe any\r
+patents or other property right claims or to contest validity of any\r
+such claims; this section has the sole purpose of protecting the\r
+integrity of the free software distribution system which is\r
+implemented by public license practices.  Many people have made\r
+generous contributions to the wide range of software distributed\r
+through that system in reliance on consistent application of that\r
+system; it is up to the author/donor to decide if he or she is willing\r
+to distribute software through any other system and a licensee cannot\r
+impose that choice.\r
+\r
+This section is intended to make thoroughly clear what is believed to\r
+be a consequence of the rest of this License.\r
+\r
+  12. If the distribution and/or use of the Library is restricted in\r
+certain countries either by patents or by copyrighted interfaces, the\r
+original copyright holder who places the Library under this License may add\r
+an explicit geographical distribution limitation excluding those countries,\r
+so that distribution is permitted only in or among countries not thus\r
+excluded.  In such case, this License incorporates the limitation as if\r
+written in the body of this License.\r
+\r
+  13. The Free Software Foundation may publish revised and/or new\r
+versions of the Lesser General Public License from time to time.\r
+Such new versions will be similar in spirit to the present version,\r
+but may differ in detail to address new problems or concerns.\r
+\r
+Each version is given a distinguishing version number.  If the Library\r
+specifies a version number of this License which applies to it and\r
+"any later version", you have the option of following the terms and\r
+conditions either of that version or of any later version published by\r
+the Free Software Foundation.  If the Library does not specify a\r
+license version number, you may choose any version ever published by\r
+the Free Software Foundation.\r
+\r
+\r
+  14. If you wish to incorporate parts of the Library into other free\r
+programs whose distribution conditions are incompatible with these,\r
+write to the author to ask for permission.  For software which is\r
+copyrighted by the Free Software Foundation, write to the Free\r
+Software Foundation; we sometimes make exceptions for this.  Our\r
+decision will be guided by the two goals of preserving the free status\r
+of all derivatives of our free software and of promoting the sharing\r
+and reuse of software generally.\r
+\r
+                            NO WARRANTY\r
+\r
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\r
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\r
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\r
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY\r
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\r
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\r
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\r
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\r
+\r
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\r
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\r
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\r
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\r
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\r
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\r
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\r
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\r
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\r
+DAMAGES.\r
+\r
+                     END OF TERMS AND CONDITIONS\r
+\r
+\r
+           How to Apply These Terms to Your New Libraries\r
+\r
+  If you develop a new library, and you want it to be of the greatest\r
+possible use to the public, we recommend making it free software that\r
+everyone can redistribute and change.  You can do so by permitting\r
+redistribution under these terms (or, alternatively, under the terms of the\r
+ordinary General Public License).\r
+\r
+  To apply these terms, attach the following notices to the library.  It is\r
+safest to attach them to the start of each source file to most effectively\r
+convey the exclusion of warranty; and each file should have at least the\r
+"copyright" line and a pointer to where the full notice is found.\r
+\r
+    <one line to give the library's name and a brief idea of what it does.>\r
+    Copyright (C) <year>  <name of author>\r
+\r
+    This library is free software; you can redistribute it and/or\r
+    modify it under the terms of the GNU Lesser General Public\r
+    License as published by the Free Software Foundation; either\r
+    version 2.1 of the License, or (at your option) any later version.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+Also add information on how to contact you by electronic and paper mail.\r
+\r
+You should also get your employer (if you work as a programmer) or your\r
+school, if any, to sign a "copyright disclaimer" for the library, if\r
+necessary.  Here is a sample; alter the names:\r
+\r
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the\r
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.\r
+\r
+  <signature of Ty Coon>, 1 April 1990\r
+  Ty Coon, President of Vice\r
+\r
+That's all there is to it!\r
+\r
+-------------------------------------------------------------------------------\r
+\r
diff --git a/instrumentation/next-share/BaseLib/schema_sdb_v5.sql b/instrumentation/next-share/BaseLib/schema_sdb_v5.sql
new file mode 100644 (file)
index 0000000..1b971ec
--- /dev/null
@@ -0,0 +1,450 @@
+-- Tribler SQLite Database
+-- Version: 5
+--
+-- History:
+--   v1: Published as part of Tribler 4.5
+--   v2: Published as part of Tribler 5.0
+--   v3: Published as part of Next-Share M16
+--   v4: Published as part of Tribler 5.2
+--   v5: Published as part of Next-Share M30 for subtitles integration
+
+-- 
+-- See Tribler/Core/CacheDB/sqlitecachedb.py updateDB() for exact version diffs.
+--
+-- v4: ChannelCast is an extension of the concept of ModerationCast, with 
+--     an additional integrity measure. 'torrenthash' field is used to protect 
+--     the integrity of the torrent file created by the publisher, from fake-
+--     tracker attack, by including sha1 hash of the dictionary corresponding 
+--     to the entire torrent.
+--
+--     'InvertedIndex' table is used for precise keyword matching than 
+--     substring search that was used previously.
+
+BEGIN TRANSACTION create_table;
+
+----------------------------------------
+
+CREATE TABLE BarterCast (
+  peer_id_from  integer,
+  peer_id_to    integer,
+  downloaded    numeric,
+  uploaded      numeric,
+  last_seen     numeric,
+  value         numeric
+);
+
+CREATE UNIQUE INDEX bartercast_idx
+  ON BarterCast
+  (peer_id_from, peer_id_to);
+
+----------------------------------------
+
+CREATE TABLE Category (
+  category_id    integer PRIMARY KEY NOT NULL,
+  name           text NOT NULL,
+  description    text
+);
+
+----------------------------------------
+
+CREATE TABLE MyInfo (
+  entry  PRIMARY KEY,
+  value  text
+);
+
+----------------------------------------
+
+CREATE TABLE MyPreference (
+  torrent_id     integer PRIMARY KEY NOT NULL,
+  destination_path text NOT NULL,
+  progress       numeric,
+  creation_time  integer NOT NULL,
+  -- V2: Patch for BuddyCast 4
+  click_position INTEGER DEFAULT -1,
+  reranking_strategy INTEGER DEFAULT -1
+);
+
+----------------------------------------
+
+CREATE TABLE Peer (
+  peer_id              integer PRIMARY KEY AUTOINCREMENT NOT NULL,
+  permid               text NOT NULL,
+  name                 text,
+  ip                   text,
+  port                 integer,
+  thumbnail            text,
+  oversion             integer,
+  similarity           numeric DEFAULT 0,
+  friend               integer DEFAULT 0,
+  superpeer            integer DEFAULT 0,
+  last_seen            numeric DEFAULT 0,
+  last_connected       numeric,
+  last_buddycast       numeric,
+  connected_times      integer DEFAULT 0,
+  buddycast_times      integer DEFAULT 0,
+  num_peers            integer,
+  num_torrents         integer,
+  num_prefs            integer,
+  num_queries          integer,
+  -- V3: Addition for local peer discovery
+  is_local            integer DEFAULT 0
+);
+
+CREATE UNIQUE INDEX permid_idx
+  ON Peer
+  (permid);
+
+CREATE INDEX Peer_name_idx
+  ON Peer
+  (name);
+
+CREATE INDEX Peer_ip_idx
+  ON Peer
+  (ip);
+
+CREATE INDEX Peer_similarity_idx
+  ON Peer
+  (similarity);
+
+CREATE INDEX Peer_last_seen_idx
+  ON Peer
+  (last_seen);
+
+CREATE INDEX Peer_last_connected_idx
+  ON Peer
+  (last_connected);
+
+CREATE INDEX Peer_num_peers_idx
+  ON Peer
+  (num_peers);
+
+CREATE INDEX Peer_num_torrents_idx
+  ON Peer
+  (num_torrents);
+
+----------------------------------------
+
+CREATE TABLE Preference (
+  peer_id     integer NOT NULL,
+  torrent_id  integer NOT NULL,
+  -- V2: Patch for BuddyCast 4
+  click_position INTEGER DEFAULT -1,
+  reranking_strategy INTEGER DEFAULT -1
+);
+
+CREATE INDEX Preference_peer_id_idx
+  ON Preference
+  (peer_id);
+
+CREATE INDEX Preference_torrent_id_idx
+  ON Preference
+  (torrent_id);
+
+CREATE UNIQUE INDEX pref_idx
+  ON Preference
+  (peer_id, torrent_id);
+
+----------------------------------------
+
+CREATE TABLE Torrent (
+  torrent_id       integer PRIMARY KEY AUTOINCREMENT NOT NULL,
+  infohash                text NOT NULL,
+  name             text,
+  torrent_file_name text,
+  length           integer,
+  creation_date    integer,
+  num_files        integer,
+  thumbnail        integer,
+  insert_time      numeric,
+  secret           integer,
+  relevance        numeric DEFAULT 0,
+  source_id        integer,
+  category_id      integer,
+  status_id        integer,
+  num_seeders      integer,
+  num_leechers     integer,
+  comment          text
+);
+
+CREATE UNIQUE INDEX infohash_idx
+  ON Torrent
+  (infohash);
+
+CREATE INDEX Torrent_length_idx
+  ON Torrent
+  (length);
+
+CREATE INDEX Torrent_creation_date_idx
+  ON Torrent
+  (creation_date);
+
+CREATE INDEX Torrent_relevance_idx
+  ON Torrent
+  (relevance);
+
+CREATE INDEX Torrent_num_seeders_idx
+  ON Torrent
+  (num_seeders);
+
+CREATE INDEX Torrent_num_leechers_idx
+  ON Torrent
+  (num_leechers);
+
+CREATE INDEX Torrent_name_idx 
+  ON Torrent
+  (name);
+
+----------------------------------------
+
+CREATE TABLE TorrentSource (
+  source_id    integer PRIMARY KEY NOT NULL,
+  name         text NOT NULL,
+  description  text
+);
+
+CREATE UNIQUE INDEX torrent_source_idx
+  ON TorrentSource
+  (name);
+
+----------------------------------------
+
+CREATE TABLE TorrentStatus (
+  status_id    integer PRIMARY KEY NOT NULL,
+  name         text NOT NULL,
+  description  text
+);
+
+----------------------------------------
+
+CREATE TABLE TorrentTracker (
+  torrent_id   integer NOT NULL,
+  tracker      text NOT NULL,
+  announce_tier    integer,
+  ignored_times    integer,
+  retried_times    integer,
+  last_check       numeric
+);
+
+CREATE UNIQUE INDEX torrent_tracker_idx
+  ON TorrentTracker
+  (torrent_id, tracker);
+  
+----------------------------------------
+
+CREATE VIEW SuperPeer AS SELECT * FROM Peer WHERE superpeer=1;
+
+CREATE VIEW Friend AS SELECT * FROM Peer WHERE friend=1;
+
+CREATE VIEW CollectedTorrent AS SELECT * FROM Torrent WHERE torrent_file_name IS NOT NULL;
+
+
+-- V2: Patch for VoteCast
+            
+CREATE TABLE VoteCast (
+mod_id text,
+voter_id text,
+vote integer,
+time_stamp integer
+);
+
+CREATE INDEX mod_id_idx
+on VoteCast 
+(mod_id);
+
+CREATE INDEX voter_id_idx
+on VoteCast 
+(voter_id);
+
+CREATE UNIQUE INDEX votecast_idx
+ON VoteCast
+(mod_id, voter_id);
+
+
+-- V2: Patch for BuddyCast 4
+
+CREATE TABLE ClicklogSearch (
+                     peer_id INTEGER DEFAULT 0,
+                     torrent_id INTEGER DEFAULT 0,
+                     term_id INTEGER DEFAULT 0,
+                     term_order INTEGER DEFAULT 0
+                     );
+CREATE INDEX idx_search_term ON ClicklogSearch (term_id);
+CREATE INDEX idx_search_torrent ON ClicklogSearch (torrent_id);
+
+CREATE TABLE ClicklogTerm (
+                    term_id INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT 0,
+                    term VARCHAR(255) NOT NULL,
+                    times_seen INTEGER DEFAULT 0 NOT NULL
+                    );
+CREATE INDEX idx_terms_term ON ClicklogTerm(term);  
+
+
+
+
+
+--v4: Path for BuddyCast 5. Adding Popularity table
+
+CREATE TABLE Popularity (
+                         torrent_id INTEGER,
+                         peer_id INTEGER,
+                         msg_receive_time NUMERIC,
+                         size_calc_age NUMERIC,
+                         num_seeders INTEGER DEFAULT 0,
+                         num_leechers INTEGER DEFAULT 0,
+                         num_of_sources INTEGER DEFAULT 0
+                     );
+
+CREATE INDEX Message_receive_time_idx 
+  ON Popularity 
+   (msg_receive_time);
+
+CREATE INDEX Size_calc_age_idx 
+  ON Popularity 
+   (size_calc_age);
+
+CREATE INDEX Number_of_seeders_idx 
+  ON Popularity 
+   (num_seeders);
+
+CREATE INDEX Number_of_leechers_idx 
+  ON Popularity 
+   (num_leechers);
+
+CREATE UNIQUE INDEX Popularity_idx
+  ON Popularity
+   (torrent_id, peer_id, msg_receive_time);
+
+
+
+-- v4: Patch for ChannelCast, Search
+
+CREATE TABLE ChannelCast (
+publisher_id text,
+publisher_name text,
+infohash text,
+torrenthash text,
+torrentname text,
+time_stamp integer,
+signature text
+);
+
+CREATE INDEX pub_id_idx
+on ChannelCast
+(publisher_id);
+
+CREATE INDEX pub_name_idx
+on ChannelCast
+(publisher_name);
+
+CREATE INDEX infohash_ch_idx
+on ChannelCast
+(infohash);
+
+----------------------------------------
+
+CREATE TABLE InvertedIndex (
+word               text NOT NULL,
+torrent_id         integer
+);
+
+CREATE INDEX word_idx
+on InvertedIndex
+(word);
+
+CREATE UNIQUE INDEX invertedindex_idx
+on InvertedIndex
+(word,torrent_id);
+--------------------------------------
+
+-- v5 Subtitles DB
+CREATE TABLE Metadata (
+  metadata_id integer PRIMARY KEY ASC AUTOINCREMENT NOT NULL,
+  publisher_id text NOT NULL,
+  infohash text NOT NULL,
+  description text,
+  timestamp integer NOT NULL,
+  signature text NOT NULL,
+  UNIQUE (publisher_id, infohash),
+  FOREIGN KEY (publisher_id, infohash) 
+    REFERENCES ChannelCast(publisher_id, infohash) 
+    ON DELETE CASCADE -- the fk constraint is not enforced by sqlite
+);
+
+CREATE INDEX infohash_md_idx
+on Metadata(infohash);
+
+CREATE INDEX pub_md_idx
+on Metadata(publisher_id);
+
+
+CREATE TABLE Subtitles (
+  metadata_id_fk integer,
+  subtitle_lang text NOT NULL,
+  subtitle_location text,
+  checksum text NOT NULL,
+  UNIQUE (metadata_id_fk,subtitle_lang),
+  FOREIGN KEY (metadata_id_fk) 
+    REFERENCES Metadata(metadata_id) 
+    ON DELETE CASCADE, -- the fk constraint is not enforced by sqlite
+  
+  -- ISO639-2 uses 3 characters for lang codes
+  CONSTRAINT lang_code_length 
+    CHECK ( length(subtitle_lang) == 3 ) 
+);
+
+
+CREATE INDEX metadata_sub_idx
+on Subtitles(metadata_id_fk);
+
+-- Stores the subtitles that peers have as an integer bitmask
+ CREATE TABLE SubtitlesHave (
+    metadata_id_fk integer,
+    peer_id text NOT NULL,
+    have_mask integer NOT NULL,
+    received_ts integer NOT NULL, --timestamp indicating when the mask was received
+    UNIQUE (metadata_id_fk, peer_id),
+    FOREIGN KEY (metadata_id_fk)
+      REFERENCES Metadata(metadata_id)
+      ON DELETE CASCADE, -- the fk constraint is not enforced by sqlite
+
+    -- 32 bit unsigned integer
+    CONSTRAINT have_mask_length
+      CHECK (have_mask >= 0 AND have_mask < 4294967296)
+);
+
+CREATE INDEX subtitles_have_idx
+on SubtitlesHave(metadata_id_fk);
+
+-- this index can boost queries
+-- ordered by timestamp on the SubtitlesHave DB
+CREATE INDEX subtitles_have_ts
+on SubtitlesHave(received_ts);
+
+-------------------------------------
+
+COMMIT TRANSACTION create_table;
+
+----------------------------------------
+
+BEGIN TRANSACTION init_values;
+
+INSERT INTO Category VALUES (1, 'Video', 'Video Files');
+INSERT INTO Category VALUES (2, 'VideoClips', 'Video Clips');
+INSERT INTO Category VALUES (3, 'Audio', 'Audio');
+INSERT INTO Category VALUES (4, 'Compressed', 'Compressed');
+INSERT INTO Category VALUES (5, 'Document', 'Documents');
+INSERT INTO Category VALUES (6, 'Picture', 'Pictures');
+INSERT INTO Category VALUES (7, 'xxx', 'XXX');
+INSERT INTO Category VALUES (8, 'other', 'Other');
+
+INSERT INTO TorrentStatus VALUES (0, 'unknown', NULL);
+INSERT INTO TorrentStatus VALUES (1, 'good', NULL);
+INSERT INTO TorrentStatus VALUES (2, 'dead', NULL);
+
+INSERT INTO TorrentSource VALUES (0, '', 'Unknown');
+INSERT INTO TorrentSource VALUES (1, 'BC', 'Received from other user');
+
+INSERT INTO MyInfo VALUES ('version', 5);
+
+COMMIT TRANSACTION init_values;
+
diff --git a/instrumentation/next-share/LICENSE.txt b/instrumentation/next-share/LICENSE.txt
new file mode 100644 (file)
index 0000000..ef95946
--- /dev/null
@@ -0,0 +1,970 @@
+------------------------------------------------------------------------------\r
+\r
+    Next-Share content-delivery library.\r
+\r
+    The research leading to this library has received funding from the European\r
+    Community's Seventh Framework Programme in the P2P-Next project under grant\r
+    agreement no 216217.\r
+\r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    The following library modules are Copyright (c) 2008-2012, VTT Technical Research Centre of Finland; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Norut AS; All rights reserved:\r
+      BaseLib/Core/Multicast/*\r
+      BaseLib/Core/Statistics/Status/*\r
+      BaseLib/Core/ClosedSwarm/*\r
+      BaseLib/Player/swarmplayer-njaal.py\r
+      BaseLib/Plugin/BackgroundProcess-njaal.py\r
+      BaseLib/Test/test_closedswarm.py\r
+      BaseLib/Test/test_status.py\r
+      BaseLib/Tools/createlivestream-njaal.py\r
+      BaseLib/Tools/createpoa.py\r
+      BaseLib/Tools/trial_poa_server.py\r
+      BaseLib/UPnP/*\r
+      BaseLib/Test/test_upnp.py\r
+\r
+    The following library modules are Copyright (c) 2008-2012, DACC Systems AB; All rights reserved:\r
+      DACC/transfer.php\r
\r
+    The following library modules are Copyright (c) 2008-2012, Lancaster University; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Jožef Stefan Institute; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, First Oversi Ltd.; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; All rights reserved:\r
+      BaseLib/Core/NATFirewall/NatCheck.py\r
+      BaseLib/Core/NATFirewall/TimeoutCheck.py\r
+      BaseLib/Core/NATFirewall/NatCheckMsgHandler.py\r
+      BaseLib/Policies/SeedingManager.py\r
+      BaseLib/Core/Statistics/SeedingStatsCrawler.py\r
+      BaseLib/Core/CacheDB/SqliteSeedingStatsCacheDB.py\r
+      BaseLib/Core/BuddyCast/moderationcast.py\r
+      BaseLib/Core/BuddyCast/moderationcast_util.py\r
+      BaseLib/Core/BuddyCast/votecast.py\r
+      BaseLib/Core/CacheDB/maxflow.py\r
+      BaseLib/Core/CacheDB/SqliteVideoPlaybackStatsCacheDB.py\r
+      BaseLib/Core/NATFirewall/ConnectionCheck.py\r
+      BaseLib/Core/NATFirewall/NatTraversal.py\r
+      BaseLib/Core/Search/Reranking.py\r
+      BaseLib/Core/Statistics/tribler_videoplayback_stats.sql\r
+      BaseLib/Core/Statistics/VideoPlaybackCrawler.py\r
+      BaseLib/Core/Utilities/Crypto.py\r
+      BaseLib/Images/\r
+      BaseLib/Player/BaseApp.py\r
+      BaseLib/Player/EmbeddedPlayer4Frame.py\r
+      BaseLib/Player/PlayerVideoFrame.py\r
+      BaseLib/Plugin\r
+      BaseLib/Test/test_multicast.py\r
+      BaseLib/Test/test_na_extend_hs.py\r
+      BaseLib/Test/test_na_extend_hs.sh\r
+      BaseLib/Test/test_sqlitecachedbhandler.sh\r
+      BaseLib/Tools/dirtrackerseeder.py\r
+      BaseLib/Tools/pipe-babscam-h264-nosound-mencoder.sh\r
+      BaseLib/Tools/superpeer.py\r
+      BaseLib/Utilities/LinuxSingleInstanceChecker.py\r
+      BaseLib/Video/Images\r
+      BaseLib/Video/VideoFrame.py\r
+      reset.bat\r
+      reset-keepid.bat\r
+      BaseLib/Core/Video/PiecePickerSVC.py\r
+      BaseLib/Core/Video/SVCTransporter.py\r
+      BaseLib/Core/Video/SVCVideoStatus.py\r
+      BaseLib/schema_sdb_v5.sql\r
+      BaseLib/Core/APIImplementation/makeurl.py\r
+      BaseLib/Core/BuddyCast/channelcast.py\r
+      BaseLib/Core/DecentralizedTracking/repex.py\r
+      BaseLib/Core/NATFirewall/TimeoutFinder.py\r
+      BaseLib/Core/NATFirewall/UDPPuncture.py\r
+      BaseLib/Core/Statistics/RepexCrawler.py\r
+      BaseLib/Debug/*\r
+      BaseLib/Tools/createtorrent.py\r
+      BaseLib/Tools/pingbackserver.py\r
+      BaseLib/Tools/seeking.py\r
+      BaseLib/Tools/stunserver.py\r
+      lucid-xpicreate.sh\r
+      patentfreevlc.bat\r
+      BaseLib/Core/BitTornado/BT1/GetRightHTTPDownloader.py\r
+      BaseLib/Core/BitTornado/BT1/HoffmanHTTPDownloader.py\r
+      BaseLib/Core/CacheDB/MetadataDBHandler.py\r
+      BaseLib/Core/DecentralizedTracking/MagnetLink/*\r
+      BaseLib/Core/Subtitles/*\r
+      BaseLib/Images/SwarmServerIcon.ico\r
+      BaseLib/Main/Build/Ubuntu/tribler.gconf-defaults\r
+      BaseLib/Main/Utility/logging_util.py\r
+      BaseLib/Main/vwxGUI/ChannelsPanel.py\r
+      BaseLib/Main/vwxGUI/images/iconSaved_state4.png\r
+      BaseLib/schema_sdb_v5.sql\r
+      BaseLib/Test/Core/*\r
+      BaseLib/Test/extend_hs_dir/proxyservice.test.torrent\r
+      BaseLib/Test/subtitles_test_res\r
+      BaseLib/Test/test_channelcast_plus_subtitles.py\r
+      BaseLib/Test/test_magnetlink.py\r
+      BaseLib/Test/test_miscutils.py\r
+      BaseLib/Test/test_subtitles.bat\r
+      BaseLib/Test/test_subtitles_isolation.py\r
+      BaseLib/Test/test_subtitles_msgs.py\r
+      BaseLib/Test/test_subtitles.sh\r
+      BaseLib/Test/test_threadpool.py\r
+      BaseLib/Tools/dirtracker.py\r
+      BaseLib/Tools/duration2torrent.py\r
+      BaseLib/Tools/httpseeder.py\r
+      BaseLib/Transport/*\r
+      BaseLib/Video/Ogg.py\r
+      BaseLib/WebUI/*\r
+      xpitransmakedeb.sh\r
+      xpitransmakedist.bat\r
+      xpitransmakedist.sh\r
+      xpitransmakedistmac.sh\r
+      xie8transmakedist.bat\r
+      TUD/swift-spbackend-r1598/*\r
+      vlc-1.0.5-swarmplugin-switch-kcc-src-aug2010-r16968.patch (except bindings/python)\r
+\r
+    The following library modules are Copyright (c) 2008-2012, STMicroelectronics S.r.l.; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Kungliga Tekniska Högskolan (The Royal Institute of Technology); All rights reserved:\r
+      BaseLib/Core/DecentralizedTracking/kadtracker/*\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Markenfilm GmbH & Co. KG; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Radiotelevizija Slovenija Javni Zavvod Ljubljana; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Kendra Foundation; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Universitaet Klagenfurt; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, AG Projects; All rights reserved:\r
+      AGP/sipsimple-20100621.tgz\r
+      \r
+    The following library modules are Copyright (c) 2008-2012, The British Broadcasting Corporation; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Pioneer Digital Design Centre Limited; All rights reserved:\r
+  \r
+    The following library modules are Copyright (c) 2008-2012, INSTITUT FUER RUNDFUNKTECHNIK GMBH; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Fabchannel BV; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, University Politehnica Bucharest; All rights reserved:\r
+      BaseLib/Core/ProxyService/*\r
+      BaseLib/Tools/proxy-cmdline.py\r
+      BaseLib/Test/test_proxyservice_as_coord.bat\r
+      BaseLib/Test/test_proxyservice_as_coord.py\r
+      BaseLib/Test/test_proxyservice_as_coord.sh\r
+      BaseLib/Test/test_proxyservice.bat\r
+      BaseLib/Test/test_proxyservice.py\r
+      BaseLib/Test/test_proxyservice.sh\r
+      BaseLib/Test/extend_hs_dir/proxyservice.test.torrent\r
+      \r
+\r
+    The following library modules are Copyright (c) 2008-2012, EBU-UER; All rights reserved:\r
+\r
+    The following library modules are Copyright (c) 2008-2012, Università di Roma Sapienza; All rights reserved:\r
+\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    VTT Technical Research Centre of Finland, \r
+    Tekniikankatu 1, \r
+    FIN-33710 Tampere, \r
+    Finland\r
+\r
+    Norut AS,\r
+    Postboks 6434 \r
+    Forskningsparken, \r
+    9294 Tromsø,\r
+    Norway\r
+\r
+    DACC Systems AB\r
+    Glimmervägen 4, \r
+    SE18734, Täby,\r
+    Sweden\r
+\r
+    Lancaster University, \r
+    University House, \r
+    Bailrigg, Lancaster, LA1 4YW\r
+    United Kingdom\r
+\r
+    Jožef Stefan Institute, \r
+    Jamova cesta 39, \r
+    1000 Ljubljana, \r
+    Slovenia\r
+\r
+    First Oversi Ltd.,\r
+    Rishon Lezion 1,\r
+    Petah Tikva 49723, \r
+    Israel\r
+\r
+    TECHNISCHE UNIVERSITEIT DELFT, \r
+    Faculty of Electrical Engineering, Mathematics and Computer Science, \r
+    Mekelweg 4, \r
+    2628 CD Delft, \r
+    The Netherlands\r
+\r
+    STMicroelectronics S.r.l., \r
+    via C.Olivetti 2, \r
+    I-20041 Agrate Brianza,\r
+    Italy\r
+\r
+    Kungliga Tekniska Högskolan (The Royal Institute of Technology), \r
+    KTH/ICT/ECS/TSLab\r
+    Electrum 229\r
+    164 40 Kista\r
+    Sweden\r
+\r
+    Markenfilm GmbH & Co. KG, \r
+    Schulauer Moorweg 25, \r
+    22880 Wedel, \r
+    Germany\r
+\r
+    Radiotelevizija Slovenija Javni Zavvod Ljubljana, \r
+    Kolodvorska 2, \r
+    SI-1000 Ljubljana,\r
+    Slovenia\r
+\r
+\r
+    Kendra Foundation, \r
+    Meadow Barn, Holne, \r
+    Newton Abbot, Devon, TQ13 7SP,\r
+    United Kingdom\r
+\r
+\r
+    Universitaet Klagenfurt, \r
+    Universitaetstrasse 65-67, \r
+    9020 Klagenfurt, \r
+    Austria\r
+\r
+    AG Projects, \r
+    Dr. Leijdsstraat 92,\r
+    2021RK Haarlem, \r
+    The Netherlands\r
+\r
+    The British Broadcasting Corporation,\r
+    Broadcasting House, Portland Place, \r
+    London, W1A 1AA \r
+    United Kingdom\r
+\r
+    Pioneer Digital Design Centre Limited, \r
+    Pioneer House, Hollybush Hill, Stoke Poges, \r
+    Slough, SL2 4QP\r
+    United Kingdom\r
+\r
+    INSTITUT FUER RUNDFUNKTECHNIK GMBH\r
+    Floriansmuehlstrasse 60,\r
+    80939 München, \r
+    Germany\r
+\r
+    Fabchannel BV, \r
+    Kleine-Gartmanplantsoen 21, \r
+    1017 RP Amsterdam, \r
+    The Netherlands\r
+\r
+    University Politehnica Bucharest, \r
+    313 Splaiul Independentei, \r
+    District 6, cod 060042, Bucharest,\r
+    Romania\r
+\r
+    EBU-UER, \r
+    L'Ancienne Route 17A, 1218\r
+    Grand Saconnex - Geneva, \r
+    Switzerland\r
+\r
+    Università di Roma Sapienza\r
+    Dipartimento di Informatica e Sistemistica (DIS),\r
+    Via Ariosto 25, \r
+    00185 Rome, \r
+    Italy\r
+\r
+\r
+------------------------------------------------------------------------------\r
+\r
+    BaseLib content-delivery library.\r
+\r
+    Development of the BaseLib library was supported by various research \r
+    grants:\r
+\r
+     - BSIK Freeband Communication I-Share project (Dutch Ministry of Economic \r
+       Affairs)\r
+     - Netherlands Organisation for Scientific Research (NWO) grant 612.060.215\r
+     - Dutch Technology Foundation STW: Veni project DTC.7299\r
+     - European Community's Sixth Framework Programme in the P2P-FUSION project\r
+       under contract no 035249.\r
\r
+    The following library modules are Copyright (c) 2005-2010,\r
+    Delft University of Technology and Vrije Universiteit Amsterdam; \r
+    All rights reserved.\r
+\r
+      BaseLib/*\r
+\r
+    This library is free software; you can redistribute it and/or\r
+    modify it under the terms of the GNU Lesser General Public\r
+    License as published by the Free Software Foundation; either\r
+    version 2.1 of the License, or (at your option) any later version.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    Delft University of Technology\r
+    Postbus 5\r
+    2600 AA Delft\r
+    The Netherlands\r
+    \r
+    Vrije Universiteit\r
+    De Boelelaan 1105\r
+    1081 HV Amsterdam\r
+    The Netherlands\r
+\r
+\r
\r
+-------------------------------------------------------------------------------\r
+\r
+    BuddyCast4 content-recommendation library.\r
+\r
+    The research leading to this library has received funding from the\r
+    European Community's Seventh Framework Programme [FP7/2007-2011] \r
+    in the Petamedia project under grant agreement no. 216444\r
+\r
+    The following library modules are Copyright (c) 2008-2010,\r
+    Delft University of Technology and Technische Universität Berlin; \r
+    All rights reserved.\r
+\r
+      BaseLib/Core/BuddyCast/buddycast.py\r
+\r
+    The following library modules are Copyright (c) 2008-2010,\r
+    Technische Universität Berlin; \r
+    All rights reserved.\r
+\r
+      BaseLib/Core/Search/Reranking.py\r
+      BaseLib/Test/test_buddycast4.py\r
+      BaseLib/Test/test_buddycast4_stresstest.py\r
+      \r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    Delft University of Technology\r
+    Postbus 5\r
+    2600 AA Delft\r
+    The Netherlands\r
+    \r
+    Technische Universität Berlin\r
+    Strasse des 17. Juni 135\r
+    10623 Berlin\r
+    Germany\r
+\r
+\r
+------------------------------------------------------------------------------\r
+\r
+    SwarmTransport/SwarmPlayer Firefox library.\r
+\r
+    The research leading to this library has received funding from the European\r
+    Community's Seventh Framework Programme in the P2P-Next project under grant\r
+    agreement no 216217.\r
+\r
+    All library modules are free software, unless stated otherwise; you can \r
+    redistribute them and/or modify them under the terms of the GNU Lesser \r
+    General Public License as published by the Free Software Foundation; in \r
+    particular, version 2.1 of the License.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+\r
+    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; \r
+    and Jan Gerber; All rights reserved:\r
+      BaseLib/Transport/tribeIChannel.idl\r
+      BaseLib/Transport/tribeISwarmTransport.idl\r
+      BaseLib/Transport/components/TribeChannel.js\r
+      BaseLib/Transport/components/TribeProtocolHandler.js\r
+      BaseLib/Transport/components/SwarmTransport.js\r
+      BaseLib/Transport/install.rdf\r
+      BaseLib/Transport/chrome.manifest\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+    TECHNISCHE UNIVERSITEIT DELFT, \r
+    Faculty of Electrical Engineering, Mathematics and Computer Science, \r
+    Mekelweg 4, \r
+    2628 CD Delft, \r
+    The Netherlands\r
+    \r
+    Jan Gerber\r
+    j@thing.net\r
+\r
+-------------------------------------------------------------------------------\r
+\r
+Unless otherwise noted, all files written by Bram Cohen, John Hoffman, Petru \r
+Paler, Uoti Urpala, Ross Cohen, Tim Tucker, Choopan RATTANAPOKA, Yejun Yang,\r
+Myers Carpenter, Bill Bumgarner, Henry 'Pi' James, Loring Holden, \r
+Dustin Pate ("noirsoldats@codemeu.com"), kratoak5, Roee Shlomo, Greg Fleming, \r
+N. Goldmann ("Pir4nhaX,www.clanyakuza.com"), and Michel Hartmann is released\r
+under the MIT license, exceptions contain licensing information in them.\r
+\r
+Copyright (C) 2001-2002 Bram Cohen\r
+\r
+Permission is hereby granted, free of charge, to any person\r
+obtaining a copy of this software and associated documentation files\r
+(the "Software"), to deal in the Software without restriction,\r
+including without limitation the rights to use, copy, modify, merge,\r
+publish, distribute, sublicense, and/or sell copies of the Software,\r
+and to permit persons to whom the Software is furnished to do so,\r
+subject to the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be\r
+included in all copies or substantial portions of the Software.\r
+\r
+The Software is provided "AS IS", without warranty of any kind,\r
+express or implied, including but not limited to the warranties of\r
+merchantability,  fitness for a particular purpose and\r
+noninfringement. In no event shall the  authors or copyright holders\r
+be liable for any claim, damages or other liability, whether in an\r
+action of contract, tort or otherwise, arising from, out of or in\r
+connection with the Software or the use or other dealings in the\r
+Software.\r
+\r
+-------------------------------------------------------------------------------\r
+\r
+                  GNU LESSER GENERAL PUBLIC LICENSE\r
+                       Version 2.1, February 1999\r
+\r
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.\r
+     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+ Everyone is permitted to copy and distribute verbatim copies\r
+ of this license document, but changing it is not allowed.\r
+\r
+[This is the first released version of the Lesser GPL.  It also counts\r
+ as the successor of the GNU Library Public License, version 2, hence\r
+ the version number 2.1.]\r
+\r
+                            Preamble\r
+\r
+  The licenses for most software are designed to take away your\r
+freedom to share and change it.  By contrast, the GNU General Public\r
+Licenses are intended to guarantee your freedom to share and change\r
+free software--to make sure the software is free for all its users.\r
+\r
+  This license, the Lesser General Public License, applies to some\r
+specially designated software packages--typically libraries--of the\r
+Free Software Foundation and other authors who decide to use it.  You\r
+can use it too, but we suggest you first think carefully about whether\r
+this license or the ordinary General Public License is the better\r
+strategy to use in any particular case, based on the explanations below.\r
+\r
+  When we speak of free software, we are referring to freedom of use,\r
+not price.  Our General Public Licenses are designed to make sure that\r
+you have the freedom to distribute copies of free software (and charge\r
+for this service if you wish); that you receive source code or can get\r
+it if you want it; that you can change the software and use pieces of\r
+it in new free programs; and that you are informed that you can do\r
+these things.\r
+\r
+  To protect your rights, we need to make restrictions that forbid\r
+distributors to deny you these rights or to ask you to surrender these\r
+rights.  These restrictions translate to certain responsibilities for\r
+you if you distribute copies of the library or if you modify it.\r
+\r
+  For example, if you distribute copies of the library, whether gratis\r
+or for a fee, you must give the recipients all the rights that we gave\r
+you.  You must make sure that they, too, receive or can get the source\r
+code.  If you link other code with the library, you must provide\r
+complete object files to the recipients, so that they can relink them\r
+with the library after making changes to the library and recompiling\r
+it.  And you must show them these terms so they know their rights.\r
+\r
+  We protect your rights with a two-step method: (1) we copyright the\r
+library, and (2) we offer you this license, which gives you legal\r
+permission to copy, distribute and/or modify the library.\r
+\r
+  To protect each distributor, we want to make it very clear that\r
+there is no warranty for the free library.  Also, if the library is\r
+modified by someone else and passed on, the recipients should know\r
+that what they have is not the original version, so that the original\r
+author's reputation will not be affected by problems that might be\r
+introduced by others.\r
+\r
+\r
+  Finally, software patents pose a constant threat to the existence of\r
+any free program.  We wish to make sure that a company cannot\r
+effectively restrict the users of a free program by obtaining a\r
+restrictive license from a patent holder.  Therefore, we insist that\r
+any patent license obtained for a version of the library must be\r
+consistent with the full freedom of use specified in this license.\r
+\r
+  Most GNU software, including some libraries, is covered by the\r
+ordinary GNU General Public License.  This license, the GNU Lesser\r
+General Public License, applies to certain designated libraries, and\r
+is quite different from the ordinary General Public License.  We use\r
+this license for certain libraries in order to permit linking those\r
+libraries into non-free programs.\r
+\r
+  When a program is linked with a library, whether statically or using\r
+a shared library, the combination of the two is legally speaking a\r
+combined work, a derivative of the original library.  The ordinary\r
+General Public License therefore permits such linking only if the\r
+entire combination fits its criteria of freedom.  The Lesser General\r
+Public License permits more lax criteria for linking other code with\r
+the library.\r
+\r
+  We call this license the "Lesser" General Public License because it\r
+does Less to protect the user's freedom than the ordinary General\r
+Public License.  It also provides other free software developers Less\r
+of an advantage over competing non-free programs.  These disadvantages\r
+are the reason we use the ordinary General Public License for many\r
+libraries.  However, the Lesser license provides advantages in certain\r
+special circumstances.\r
+\r
+  For example, on rare occasions, there may be a special need to\r
+encourage the widest possible use of a certain library, so that it becomes\r
+a de-facto standard.  To achieve this, non-free programs must be\r
+allowed to use the library.  A more frequent case is that a free\r
+library does the same job as widely used non-free libraries.  In this\r
+case, there is little to gain by limiting the free library to free\r
+software only, so we use the Lesser General Public License.\r
+\r
+  In other cases, permission to use a particular library in non-free\r
+programs enables a greater number of people to use a large body of\r
+free software.  For example, permission to use the GNU C Library in\r
+non-free programs enables many more people to use the whole GNU\r
+operating system, as well as its variant, the GNU/Linux operating\r
+system.\r
+\r
+  Although the Lesser General Public License is Less protective of the\r
+users' freedom, it does ensure that the user of a program that is\r
+linked with the Library has the freedom and the wherewithal to run\r
+that program using a modified version of the Library.\r
+\r
+  The precise terms and conditions for copying, distribution and\r
+modification follow.  Pay close attention to the difference between a\r
+"work based on the library" and a "work that uses the library".  The\r
+former contains code derived from the library, whereas the latter must\r
+be combined with the library in order to run.\r
+\r
+\r
+                  GNU LESSER GENERAL PUBLIC LICENSE\r
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+\r
+  0. This License Agreement applies to any software library or other\r
+program which contains a notice placed by the copyright holder or\r
+other authorized party saying it may be distributed under the terms of\r
+this Lesser General Public License (also called "this License").\r
+Each licensee is addressed as "you".\r
+\r
+  A "library" means a collection of software functions and/or data\r
+prepared so as to be conveniently linked with application programs\r
+(which use some of those functions and data) to form executables.\r
+\r
+  The "Library", below, refers to any such software library or work\r
+which has been distributed under these terms.  A "work based on the\r
+Library" means either the Library or any derivative work under\r
+copyright law: that is to say, a work containing the Library or a\r
+portion of it, either verbatim or with modifications and/or translated\r
+straightforwardly into another language.  (Hereinafter, translation is\r
+included without limitation in the term "modification".)\r
+\r
+  "Source code" for a work means the preferred form of the work for\r
+making modifications to it.  For a library, complete source code means\r
+all the source code for all modules it contains, plus any associated\r
+interface definition files, plus the scripts used to control compilation\r
+and installation of the library.\r
+\r
+  Activities other than copying, distribution and modification are not\r
+covered by this License; they are outside its scope.  The act of\r
+running a program using the Library is not restricted, and output from\r
+such a program is covered only if its contents constitute a work based\r
+on the Library (independent of the use of the Library in a tool for\r
+writing it).  Whether that is true depends on what the Library does\r
+and what the program that uses the Library does.\r
+  \r
+  1. You may copy and distribute verbatim copies of the Library's\r
+complete source code as you receive it, in any medium, provided that\r
+you conspicuously and appropriately publish on each copy an\r
+appropriate copyright notice and disclaimer of warranty; keep intact\r
+all the notices that refer to this License and to the absence of any\r
+warranty; and distribute a copy of this License along with the\r
+Library.\r
+\r
+  You may charge a fee for the physical act of transferring a copy,\r
+and you may at your option offer warranty protection in exchange for a\r
+fee.\r
+\r
+\r
+  2. You may modify your copy or copies of the Library or any portion\r
+of it, thus forming a work based on the Library, and copy and\r
+distribute such modifications or work under the terms of Section 1\r
+above, provided that you also meet all of these conditions:\r
+\r
+    a) The modified work must itself be a software library.\r
+\r
+    b) You must cause the files modified to carry prominent notices\r
+    stating that you changed the files and the date of any change.\r
+\r
+    c) You must cause the whole of the work to be licensed at no\r
+    charge to all third parties under the terms of this License.\r
+\r
+    d) If a facility in the modified Library refers to a function or a\r
+    table of data to be supplied by an application program that uses\r
+    the facility, other than as an argument passed when the facility\r
+    is invoked, then you must make a good faith effort to ensure that,\r
+    in the event an application does not supply such function or\r
+    table, the facility still operates, and performs whatever part of\r
+    its purpose remains meaningful.\r
+\r
+    (For example, a function in a library to compute square roots has\r
+    a purpose that is entirely well-defined independent of the\r
+    application.  Therefore, Subsection 2d requires that any\r
+    application-supplied function or table used by this function must\r
+    be optional: if the application does not supply it, the square\r
+    root function must still compute square roots.)\r
+\r
+These requirements apply to the modified work as a whole.  If\r
+identifiable sections of that work are not derived from the Library,\r
+and can be reasonably considered independent and separate works in\r
+themselves, then this License, and its terms, do not apply to those\r
+sections when you distribute them as separate works.  But when you\r
+distribute the same sections as part of a whole which is a work based\r
+on the Library, the distribution of the whole must be on the terms of\r
+this License, whose permissions for other licensees extend to the\r
+entire whole, and thus to each and every part regardless of who wrote\r
+it.\r
+\r
+Thus, it is not the intent of this section to claim rights or contest\r
+your rights to work written entirely by you; rather, the intent is to\r
+exercise the right to control the distribution of derivative or\r
+collective works based on the Library.\r
+\r
+In addition, mere aggregation of another work not based on the Library\r
+with the Library (or with a work based on the Library) on a volume of\r
+a storage or distribution medium does not bring the other work under\r
+the scope of this License.\r
+\r
+  3. You may opt to apply the terms of the ordinary GNU General Public\r
+License instead of this License to a given copy of the Library.  To do\r
+this, you must alter all the notices that refer to this License, so\r
+that they refer to the ordinary GNU General Public License, version 2,\r
+instead of to this License.  (If a newer version than version 2 of the\r
+ordinary GNU General Public License has appeared, then you can specify\r
+that version instead if you wish.)  Do not make any other change in\r
+these notices.\r
+\r
+\r
+  Once this change is made in a given copy, it is irreversible for\r
+that copy, so the ordinary GNU General Public License applies to all\r
+subsequent copies and derivative works made from that copy.\r
+\r
+  This option is useful when you wish to copy part of the code of\r
+the Library into a program that is not a library.\r
+\r
+  4. You may copy and distribute the Library (or a portion or\r
+derivative of it, under Section 2) in object code or executable form\r
+under the terms of Sections 1 and 2 above provided that you accompany\r
+it with the complete corresponding machine-readable source code, which\r
+must be distributed under the terms of Sections 1 and 2 above on a\r
+medium customarily used for software interchange.\r
+\r
+  If distribution of object code is made by offering access to copy\r
+from a designated place, then offering equivalent access to copy the\r
+source code from the same place satisfies the requirement to\r
+distribute the source code, even though third parties are not\r
+compelled to copy the source along with the object code.\r
+\r
+  5. A program that contains no derivative of any portion of the\r
+Library, but is designed to work with the Library by being compiled or\r
+linked with it, is called a "work that uses the Library".  Such a\r
+work, in isolation, is not a derivative work of the Library, and\r
+therefore falls outside the scope of this License.\r
+\r
+  However, linking a "work that uses the Library" with the Library\r
+creates an executable that is a derivative of the Library (because it\r
+contains portions of the Library), rather than a "work that uses the\r
+library".  The executable is therefore covered by this License.\r
+Section 6 states terms for distribution of such executables.\r
+\r
+  When a "work that uses the Library" uses material from a header file\r
+that is part of the Library, the object code for the work may be a\r
+derivative work of the Library even though the source code is not.\r
+Whether this is true is especially significant if the work can be\r
+linked without the Library, or if the work is itself a library.  The\r
+threshold for this to be true is not precisely defined by law.\r
+\r
+  If such an object file uses only numerical parameters, data\r
+structure layouts and accessors, and small macros and small inline\r
+functions (ten lines or less in length), then the use of the object\r
+file is unrestricted, regardless of whether it is legally a derivative\r
+work.  (Executables containing this object code plus portions of the\r
+Library will still fall under Section 6.)\r
+\r
+  Otherwise, if the work is a derivative of the Library, you may\r
+distribute the object code for the work under the terms of Section 6.\r
+Any executables containing that work also fall under Section 6,\r
+whether or not they are linked directly with the Library itself.\r
+\r
+\r
+  6. As an exception to the Sections above, you may also combine or\r
+link a "work that uses the Library" with the Library to produce a\r
+work containing portions of the Library, and distribute that work\r
+under terms of your choice, provided that the terms permit\r
+modification of the work for the customer's own use and reverse\r
+engineering for debugging such modifications.\r
+\r
+  You must give prominent notice with each copy of the work that the\r
+Library is used in it and that the Library and its use are covered by\r
+this License.  You must supply a copy of this License.  If the work\r
+during execution displays copyright notices, you must include the\r
+copyright notice for the Library among them, as well as a reference\r
+directing the user to the copy of this License.  Also, you must do one\r
+of these things:\r
+\r
+    a) Accompany the work with the complete corresponding\r
+    machine-readable source code for the Library including whatever\r
+    changes were used in the work (which must be distributed under\r
+    Sections 1 and 2 above); and, if the work is an executable linked\r
+    with the Library, with the complete machine-readable "work that\r
+    uses the Library", as object code and/or source code, so that the\r
+    user can modify the Library and then relink to produce a modified\r
+    executable containing the modified Library.  (It is understood\r
+    that the user who changes the contents of definitions files in the\r
+    Library will not necessarily be able to recompile the application\r
+    to use the modified definitions.)\r
+\r
+    b) Use a suitable shared library mechanism for linking with the\r
+    Library.  A suitable mechanism is one that (1) uses at run time a\r
+    copy of the library already present on the user's computer system,\r
+    rather than copying library functions into the executable, and (2)\r
+    will operate properly with a modified version of the library, if\r
+    the user installs one, as long as the modified version is\r
+    interface-compatible with the version that the work was made with.\r
+\r
+    c) Accompany the work with a written offer, valid for at\r
+    least three years, to give the same user the materials\r
+    specified in Subsection 6a, above, for a charge no more\r
+    than the cost of performing this distribution.\r
+\r
+    d) If distribution of the work is made by offering access to copy\r
+    from a designated place, offer equivalent access to copy the above\r
+    specified materials from the same place.\r
+\r
+    e) Verify that the user has already received a copy of these\r
+    materials or that you have already sent this user a copy.\r
+\r
+  For an executable, the required form of the "work that uses the\r
+Library" must include any data and utility programs needed for\r
+reproducing the executable from it.  However, as a special exception,\r
+the materials to be distributed need not include anything that is\r
+normally distributed (in either source or binary form) with the major\r
+components (compiler, kernel, and so on) of the operating system on\r
+which the executable runs, unless that component itself accompanies\r
+the executable.\r
+\r
+  It may happen that this requirement contradicts the license\r
+restrictions of other proprietary libraries that do not normally\r
+accompany the operating system.  Such a contradiction means you cannot\r
+use both them and the Library together in an executable that you\r
+distribute.\r
+\r
+\r
+  7. You may place library facilities that are a work based on the\r
+Library side-by-side in a single library together with other library\r
+facilities not covered by this License, and distribute such a combined\r
+library, provided that the separate distribution of the work based on\r
+the Library and of the other library facilities is otherwise\r
+permitted, and provided that you do these two things:\r
+\r
+    a) Accompany the combined library with a copy of the same work\r
+    based on the Library, uncombined with any other library\r
+    facilities.  This must be distributed under the terms of the\r
+    Sections above.\r
+\r
+    b) Give prominent notice with the combined library of the fact\r
+    that part of it is a work based on the Library, and explaining\r
+    where to find the accompanying uncombined form of the same work.\r
+\r
+  8. You may not copy, modify, sublicense, link with, or distribute\r
+the Library except as expressly provided under this License.  Any\r
+attempt otherwise to copy, modify, sublicense, link with, or\r
+distribute the Library is void, and will automatically terminate your\r
+rights under this License.  However, parties who have received copies,\r
+or rights, from you under this License will not have their licenses\r
+terminated so long as such parties remain in full compliance.\r
+\r
+  9. You are not required to accept this License, since you have not\r
+signed it.  However, nothing else grants you permission to modify or\r
+distribute the Library or its derivative works.  These actions are\r
+prohibited by law if you do not accept this License.  Therefore, by\r
+modifying or distributing the Library (or any work based on the\r
+Library), you indicate your acceptance of this License to do so, and\r
+all its terms and conditions for copying, distributing or modifying\r
+the Library or works based on it.\r
+\r
+  10. Each time you redistribute the Library (or any work based on the\r
+Library), the recipient automatically receives a license from the\r
+original licensor to copy, distribute, link with or modify the Library\r
+subject to these terms and conditions.  You may not impose any further\r
+restrictions on the recipients' exercise of the rights granted herein.\r
+You are not responsible for enforcing compliance by third parties with\r
+this License.\r
+\r
+\r
+  11. If, as a consequence of a court judgment or allegation of patent\r
+infringement or for any other reason (not limited to patent issues),\r
+conditions are imposed on you (whether by court order, agreement or\r
+otherwise) that contradict the conditions of this License, they do not\r
+excuse you from the conditions of this License.  If you cannot\r
+distribute so as to satisfy simultaneously your obligations under this\r
+License and any other pertinent obligations, then as a consequence you\r
+may not distribute the Library at all.  For example, if a patent\r
+license would not permit royalty-free redistribution of the Library by\r
+all those who receive copies directly or indirectly through you, then\r
+the only way you could satisfy both it and this License would be to\r
+refrain entirely from distribution of the Library.\r
+\r
+If any portion of this section is held invalid or unenforceable under any\r
+particular circumstance, the balance of the section is intended to apply,\r
+and the section as a whole is intended to apply in other circumstances.\r
+\r
+It is not the purpose of this section to induce you to infringe any\r
+patents or other property right claims or to contest validity of any\r
+such claims; this section has the sole purpose of protecting the\r
+integrity of the free software distribution system which is\r
+implemented by public license practices.  Many people have made\r
+generous contributions to the wide range of software distributed\r
+through that system in reliance on consistent application of that\r
+system; it is up to the author/donor to decide if he or she is willing\r
+to distribute software through any other system and a licensee cannot\r
+impose that choice.\r
+\r
+This section is intended to make thoroughly clear what is believed to\r
+be a consequence of the rest of this License.\r
+\r
+  12. If the distribution and/or use of the Library is restricted in\r
+certain countries either by patents or by copyrighted interfaces, the\r
+original copyright holder who places the Library under this License may add\r
+an explicit geographical distribution limitation excluding those countries,\r
+so that distribution is permitted only in or among countries not thus\r
+excluded.  In such case, this License incorporates the limitation as if\r
+written in the body of this License.\r
+\r
+  13. The Free Software Foundation may publish revised and/or new\r
+versions of the Lesser General Public License from time to time.\r
+Such new versions will be similar in spirit to the present version,\r
+but may differ in detail to address new problems or concerns.\r
+\r
+Each version is given a distinguishing version number.  If the Library\r
+specifies a version number of this License which applies to it and\r
+"any later version", you have the option of following the terms and\r
+conditions either of that version or of any later version published by\r
+the Free Software Foundation.  If the Library does not specify a\r
+license version number, you may choose any version ever published by\r
+the Free Software Foundation.\r
+\r
+\r
+  14. If you wish to incorporate parts of the Library into other free\r
+programs whose distribution conditions are incompatible with these,\r
+write to the author to ask for permission.  For software which is\r
+copyrighted by the Free Software Foundation, write to the Free\r
+Software Foundation; we sometimes make exceptions for this.  Our\r
+decision will be guided by the two goals of preserving the free status\r
+of all derivatives of our free software and of promoting the sharing\r
+and reuse of software generally.\r
+\r
+                            NO WARRANTY\r
+\r
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\r
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\r
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\r
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY\r
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\r
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\r
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\r
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\r
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\r
+\r
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\r
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\r
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\r
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\r
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\r
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\r
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\r
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\r
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\r
+DAMAGES.\r
+\r
+                     END OF TERMS AND CONDITIONS\r
+\r
+\r
+           How to Apply These Terms to Your New Libraries\r
+\r
+  If you develop a new library, and you want it to be of the greatest\r
+possible use to the public, we recommend making it free software that\r
+everyone can redistribute and change.  You can do so by permitting\r
+redistribution under these terms (or, alternatively, under the terms of the\r
+ordinary General Public License).\r
+\r
+  To apply these terms, attach the following notices to the library.  It is\r
+safest to attach them to the start of each source file to most effectively\r
+convey the exclusion of warranty; and each file should have at least the\r
+"copyright" line and a pointer to where the full notice is found.\r
+\r
+    <one line to give the library's name and a brief idea of what it does.>\r
+    Copyright (C) <year>  <name of author>\r
+\r
+    This library is free software; you can redistribute it and/or\r
+    modify it under the terms of the GNU Lesser General Public\r
+    License as published by the Free Software Foundation; either\r
+    version 2.1 of the License, or (at your option) any later version.\r
+\r
+    This library is distributed in the hope that it will be useful,\r
+    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
+    Lesser General Public License for more details.\r
+\r
+    You should have received a copy of the GNU Lesser General Public\r
+    License along with this library; if not, write to the Free Software\r
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\r
+\r
+Also add information on how to contact you by electronic and paper mail.\r
+\r
+You should also get your employer (if you work as a programmer) or your\r
+school, if any, to sign a "copyright disclaimer" for the library, if\r
+necessary.  Here is a sample; alter the names:\r
+\r
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the\r
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.\r
+\r
+  <signature of Ty Coon>, 1 April 1990\r
+  Ty Coon, President of Vice\r
+\r
+That's all there is to it!\r
+\r
+-------------------------------------------------------------------------------\r
+\r
diff --git a/instrumentation/next-share/README.txt b/instrumentation/next-share/README.txt
new file mode 100644 (file)
index 0000000..0d9b39c
--- /dev/null
@@ -0,0 +1,71 @@
+\r
+==============================================================================\r
+                             Next-Share: \r
+      The next generation Peer-to-Peer content delivery platform\r
+    \r
+                       http://www.p2p-next.org/\r
+==============================================================================\r
+\r
+LICENSE\r
+-------\r
+See LICENSE.txt and binary-LICENSE.txt.\r
+\r
+\r
+PREREQUISITES\r
+-------------\r
+\r
+To run the Next-Share platform from source you will need to install the\r
+following software packages. See www.p2p-next.org for binary distributions.\r
+\r
+   Python >= 2.5\r
+   M2Crypto >= 0.16\r
+   wxPython >= 2.8 UNICODE (i.e., use --enable-unicode to build)\r
+   APSW aka. python-apsw >= 3.6.x (Python wrappers for SQLite database)\r
+   pywin32 >= Build 208 (Windows only, for e.g. UPnP support)\r
+   vlc:\r
+        For SwarmPlayer V1: VLC >= 1.0.5 with its Python bindings\r
+       For SwarmPlugin: VLC >= 1.0.5 with P2P-Next extension\r
+   simplejson >= 2.1.1 (if Python < 2.6)\r
+   xulrunner-sdk >= 1.9.1.5 < 1.9.2 (optional, to run SwarmPlayer V2/SwarmTransport)\r
+   7-Zip >= 4.6.5 (optional, to build SwarmPlayer V2/SwarmTransport)\r
+\r
+Next-Share runs on Windows (XP,Vista), Mac OS X and Linux. On Linux, it is \r
+easiest to try to install these packages via a package manager such as\r
+Synaptic (on Ubuntu). To run from the source on Windows it is easiest to use\r
+binary distribution of all packages. On Mac, we advice to use MacPorts.\r
+\r
+INSTALLING ON LINUX\r
+-------------------\r
\r
+1. Unpack the main source code.\r
+\r
+2. Change to the Next-Share directory.\r
+\r
+2. The peer-to-peer video player SwarmPlayer that is part of Next-Share can now\r
+   be started by running\r
+\r
+     PYTHONPATH="$PYTHONPATH":Next-Share:.\r
+     export PYTHONPATH\r
+     python2.5 BaseLib/Player/swarmplayer.py\r
+  \r
+\r
+INSTALLING ON WINDOWS\r
+---------------------\r
+\r
+1. Unpack the main source code.\r
+\r
+2. Open an CMD Prompt, change to the Next-Share directory.\r
+   \r
+3. The peer-to-peer video player SwarmPlayer that is part of Next-Share can now\r
+   be started by running\r
+\r
+     set PYTHONPATH=%PYTHONPATH%:Next-Share:.\r
+     C:\Python25\python2.5.exe BaseLib\Player\swarmplayer.py\r
+   \r
+To build the SwarmPlugin, i.e., the browser plugin for P2P-based video\r
+delivery and playback, or the SwarmPlayer V2 aka SwarmTransport, i.e., the\r
+browser extension that adds P2P-based delivery as a new tribe:// transport\r
+protocol, see the instructions in the D6.5.4 deliverable in the Publications\r
+section of www.p2p-next.org.\r
+\r
+Arno Bakker, 2010-08-16\r
diff --git a/instrumentation/next-share/clean.bat b/instrumentation/next-share/clean.bat
new file mode 100644 (file)
index 0000000..406032b
--- /dev/null
@@ -0,0 +1,3 @@
+rmdir /S /Q build\r
+rmdir /S /Q dist\r
+del /S /Q *.pyc\r
diff --git a/instrumentation/next-share/lucid-xpicreate.sh b/instrumentation/next-share/lucid-xpicreate.sh
new file mode 100644 (file)
index 0000000..e505ad0
--- /dev/null
@@ -0,0 +1,66 @@
+#!/bin/sh -x
+#
+# Script to build SwarmTransport on Ubuntu Linux
+#
+
+export LIBRARYNAME=BaseLib
+export XULRUNNER_IDL=$HOME/pkgs/xulrunner-1.9.1.7/share/idl/xulrunner-1.9.1.7/stable
+export XULRUNNER_XPIDL=$HOME/pkgs/xulrunner-1.9.1.7/lib/xulrunner-1.9.1.7/xpidl
+
+# ----- Clean up
+
+/bin/rm -rf dist
+
+# ----- Build
+
+# Diego: building the deepest dir we get all of them.
+mkdir -p dist/installdir/bgprocess/$LIBRARYNAME/Images
+
+cp -r $LIBRARYNAME dist/installdir/bgprocess
+
+rm dist/installdir/bgprocess/$LIBRARYNAME/Category/porncat.txt
+rm dist/installdir/bgprocess/$LIBRARYNAME/Category/filter_terms.filter
+rm dist/installdir/bgprocess/$LIBRARYNAME/*.txt
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Main
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Subscriptions
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Test/
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Web2/
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Images/*
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Video/Images
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Tools
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Plugin/*.html
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/*/Build
+rm -rf `find dist/installdir/bgprocess/$LIBRARYNAME -name .svn`
+rm -rf `find dist/installdir/bgprocess/$LIBRARYNAME -name \*.pyc`
+
+cp $LIBRARYNAME/Images/SwarmPlayerIcon.ico dist/installdir/bgprocess/$LIBRARYNAME/Images
+cp $LIBRARYNAME/ns-LICENSE.txt dist/installdir
+cp $LIBRARYNAME/ns-LICENSE.txt dist/installdir/LICENSE.txt
+
+# ----- Build XPI of SwarmTransport
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/icon.png dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/install.rdf dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/chrome.manifest dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/components dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/skin dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/chrome dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/bgprocess/* dist/installdir/bgprocess
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Transport/bgprocess
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.html
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.tstream
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.sh
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.idl
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.txt
+
+
+
+# ----- Turn .idl into .xpt
+$XULRUNNER_XPIDL -m typelib -w -v -I $XULRUNNER_IDL -e dist/installdir/components/tribeIChannel.xpt $LIBRARYNAME/Transport/tribeIChannel.idl
+$XULRUNNER_XPIDL -m typelib -w -v -I $XULRUNNER_IDL -e dist/installdir/components/tribeISwarmTransport.xpt $LIBRARYNAME/Transport/tribeISwarmTransport.idl
+
+cd dist/installdir
+# ----- Turn installdir into .xpi
+zip -9 -r SwarmPlayer.xpi * 
+mv SwarmPlayer.xpi ..
+cd ../..
diff --git a/instrumentation/next-share/playmakedist.bat b/instrumentation/next-share/playmakedist.bat
new file mode 100644 (file)
index 0000000..51da70a
--- /dev/null
@@ -0,0 +1,101 @@
+REM @echo off\r
+set LIBRARYNAME=BaseLib\r
+\r
+set PYTHONHOME=\Python254\r
+REM Arno: Add . to find our core (py 2.5)\r
+set PYTHONPATH=.;%PYTHONHOME%\r
+echo PYTHONPATH SET TO %PYTHONPATH%\r
+\r
+set NSIS="\Program Files\NSIS\makensis.exe"\r
+\r
+REM ----- Check for Python and essential site-packages\r
+\r
+IF NOT EXIST %PYTHONHOME%\python.exe (\r
+  echo .\r
+  echo Could not locate Python in %PYTHONHOME%.\r
+  echo Please modify this script or install python [www.python.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\wx-*-unicode (\r
+  echo .\r
+  echo Could not locate wxPython in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.wxpython.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\py2exe (\r
+  echo .\r
+  echo Could not locate py2exe in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.py2exe.org]\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for NSIS installer\r
+\r
+IF NOT EXIST %NSIS% (\r
+  echo .\r
+  echo Could not locate the NSIS installer at %NSIS%.\r
+  echo Please modify this script or install NSIS [nsis.sf.net]\r
+  exit /b\r
+)\r
+\r
+REM ----- Clean up\r
+\r
+call clean.bat\r
+\r
+REM ----- Build\r
+\r
+REM Arno: When adding files here, make sure tribler.nsi actually\r
+REM packs them in the installer .EXE\r
+\r
+mkdir dist\installdir\r
+\r
+%PYTHONHOME%\python.exe -O %LIBRARYNAME%\Player\Build\Win32\setuptriblerplay.py py2exe\r
+\r
+REM Arno: Move py2exe results to installdir\r
+move dist\*.* dist\installdir\r
+\r
+copy %LIBRARYNAME%\Player\Build\Win32\triblerplay.nsi dist\installdir\r
+copy %LIBRARYNAME%\Player\Build\Win32\swarmplayer.exe.manifest dist\installdir\r
+REM copy %PYTHONHOME%\msvcr71.dll dist\installdir\r
+REM For Vista. This works only when building on XP\r
+REM as Vista doesn't have this DLL by default.\r
+REM JD: My XP SP2 doesn't have it. It /is/ shipped with wxPython though\r
+copy %PYTHONHOME%\Lib\site-packages\wx-2.8-msw-unicode\wx\msvcp71.dll dist\installdir\r
+copy %SystemRoot%\msvcp71.dll dist\installdir\r
+copy %PYTHONHOME%\msvcp60.dll dist\installdir\r
+REM py2exe does this: copy SSLEAY32.dll dist\installdir\r
+REM copy LIBEAY32.dll dist\installdir\r
+\r
+type %LIBRARYNAME%\LICENSE.txt %LIBRARYNAME%\binary-LICENSE-postfix.txt > %LIBRARYNAME%\binary-LICENSE.txt\r
+copy %LIBRARYNAME%\binary-LICENSE.txt dist\installdir\r
+mkdir dist\installdir\%LIBRARYNAME%\r
+mkdir dist\installdir\%LIBRARYNAME%\Core\r
+copy %LIBRARYNAME%\Core\superpeer.txt dist\installdir\%LIBRARYNAME%\Core\r
+mkdir dist\installdir\%LIBRARYNAME%\Core\Statistics\r
+copy %LIBRARYNAME%\Core\Statistics\*.txt dist\installdir\%LIBRARYNAME%\Core\Statistics\r
+copy %LIBRARYNAME%\Core\Statistics\*.sql dist\installdir\%LIBRARYNAME%\Core\Statistics\r
+mkdir dist\installdir\%LIBRARYNAME%\Images\r
+copy %LIBRARYNAME%\Images\*.* dist\installdir\%LIBRARYNAME%\Images\r
+mkdir dist\installdir\%LIBRARYNAME%\Video\r
+mkdir dist\installdir\%LIBRARYNAME%\Video\Images\r
+copy %LIBRARYNAME%\Video\Images\*.* dist\installdir\%LIBRARYNAME%\Video\Images\r
+copy %LIBRARYNAME%\Player\Build\Win32\heading.bmp dist\installdir\r
+mkdir dist\installdir\%LIBRARYNAME%\Lang\r
+copy %LIBRARYNAME%\Lang\*.lang dist\installdir\%LIBRARYNAME%\Lang\r
+\r
+copy ffmpeg.exe dist\installdir\r
+xcopy vlc dist\installdir\vlc /E /I\r
+\r
+copy reset*.bat dist\installdir\r
+\r
+cd dist\installdir\r
+\r
+:makeinstaller\r
+%NSIS% triblerplay.nsi\r
+move swarmplayer_*.exe ..\r
+cd ..\r
+REM Arno : sign SwarmPlayer_*.exe\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlayer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlayer_*.exe"\r
+cd ..\r
diff --git a/instrumentation/next-share/pluginmakedist.bat b/instrumentation/next-share/pluginmakedist.bat
new file mode 100644 (file)
index 0000000..52ce3d8
--- /dev/null
@@ -0,0 +1,106 @@
+REM @echo off\r
+set LIBRARYNAME=BaseLib\r
+\r
+set PYTHONHOME=\Python254\r
+REM Arno: Add . to find our core (py 2.5)\r
+set PYTHONPATH=.;%PYTHONHOME%\r
+echo PYTHONPATH SET TO %PYTHONPATH%\r
+\r
+set NSIS="\Program Files\NSIS\makensis.exe"\r
+\r
+REM ----- Check for Python and essential site-packages\r
+\r
+IF NOT EXIST %PYTHONHOME%\python.exe (\r
+  echo .\r
+  echo Could not locate Python in %PYTHONHOME%.\r
+  echo Please modify this script or install python [www.python.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\wx-*-unicode (\r
+  echo .\r
+  echo Could not locate wxPython in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.wxpython.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\py2exe (\r
+  echo .\r
+  echo Could not locate py2exe in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.py2exe.org]\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for NSIS installer\r
+\r
+IF NOT EXIST %NSIS% (\r
+  echo .\r
+  echo Could not locate the NSIS installer at %NSIS%.\r
+  echo Please modify this script or install NSIS [nsis.sf.net]\r
+  exit /b\r
+)\r
+\r
+REM ----- Clean up\r
+\r
+call clean.bat\r
+\r
+REM ----- Build\r
+\r
+REM Arno: When adding files here, make sure tribler.nsi actually\r
+REM packs them in the installer .EXE\r
+\r
+REM Diego: building the deepest dir we get all of them.\r
+mkdir dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+mkdir dist\installdir\bgprocess\%LIBRARYNAME%\Core\r
+\r
+%PYTHONHOME%\python.exe -O %LIBRARYNAME%\Plugin\Build\Win32\setupBGexe.py py2exe\r
+\r
+REM Arno: Move py2exe results to installdir\r
+move dist\*.* dist\installdir\bgprocess\r
+copy %LIBRARYNAME%\Images\SwarmPluginIcon.ico dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+xcopy vlc4plugin\* dist\installdir /E /I\r
+\r
+REM Riccardo:  move the files needed for the WebUI\r
+xcopy %LIBRARYNAME%\WebUI dist\installdir\bgprocess\%LIBRARYNAME%\WebUI /S /I\r
+del dist\installdir\bgprocess\%LIBRARYNAME%\WebUI\*.py\r
+\r
+REM Diego: replace vlc *.txt with P2P-Next License.txt\r
+del dist\installdir\*.txt\r
+type %LIBRARYNAME%\ns-LICENSE.txt %LIBRARYNAME%\binary-LICENSE-postfix.txt > %LIBRARYNAME%\binary-LICENSE.txt\r
+copy %LIBRARYNAME%\binary-LICENSE.txt dist\installdir\r
+REM NSSA Search API requires overlay\r
+copy %LIBRARYNAME%\schema_sdb_v*.sql dist\installdir\bgprocess\%LIBRARYNAME%\r
+copy %LIBRARYNAME%\Core\superpeer.txt dist\installdir\bgprocess\%LIBRARYNAME%\Core\r
+\r
+REM Diego: sign axvlc.dll\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "dist\installdir\activex\axvlc.dll"\r
+\r
+copy %LIBRARYNAME%\Plugin\Build\Win32\heading.bmp dist\installdir\r
+REM TODO Diego: manifest?\r
+copy %LIBRARYNAME%\Plugin\Build\Win32\swarmplugin.nsi dist\installdir\r
+REM copy %LIBRARYNAME%\Plugin\Build\Win32\swarmplugin.exe.manifest dist\installdir\r
+\r
+copy %PYTHONHOME%\Lib\site-packages\wx-2.8-msw-unicode\wx\msvcp71.dll dist\installdir\bgprocess\r
+\r
+copy reset*.bat dist\installdir\r
+\r
+\r
+cd dist\installdir\r
+\r
+REM Arno: Win7 gives popup if SwarmEngine is not signed\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer and Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" bgprocess\SwarmEngine.exe\r
+\r
+\r
+:makeinstaller\r
+%NSIS% swarmplugin.nsi\r
+\r
+move SwarmPlugin_*.exe ..\r
+cd ..\r
+REM Diego : sign SwarmPlugin*.exe\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer and Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlugin_*.exe"\r
+REM Arno: build .cab file. \r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\CabArc.Exe" -s 6144 n SwarmPlugin.cab ..\%LIBRARYNAME%\Plugin\Build\Win32\SwarmPlugin.inf\r
+REM Arno : sign SwarmPlugin*.cab\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer and Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlugin*.cab"\r
+cd ..\r
diff --git a/instrumentation/next-share/pluginmakedist_FX_only.bat b/instrumentation/next-share/pluginmakedist_FX_only.bat
new file mode 100644 (file)
index 0000000..61dec0e
--- /dev/null
@@ -0,0 +1,96 @@
+REM @echo off\r
+set LIBRARYNAME=BaseLib\r
+\r
+set PYTHONHOME=\Python254\r
+REM Arno: Add . to find our core (py 2.5)\r
+set PYTHONPATH=.;%PYTHONHOME%\r
+echo PYTHONPATH SET TO %PYTHONPATH%\r
+\r
+set NSIS="\Program Files\NSIS\makensis.exe"\r
+\r
+REM ----- Check for Python and essential site-packages\r
+\r
+IF NOT EXIST %PYTHONHOME%\python.exe (\r
+  echo .\r
+  echo Could not locate Python in %PYTHONHOME%.\r
+  echo Please modify this script or install python [www.python.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\wx-*-unicode (\r
+  echo .\r
+  echo Could not locate wxPython in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.wxpython.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\py2exe (\r
+  echo .\r
+  echo Could not locate py2exe in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.py2exe.org]\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for NSIS installer\r
+\r
+IF NOT EXIST %NSIS% (\r
+  echo .\r
+  echo Could not locate the NSIS installer at %NSIS%.\r
+  echo Please modify this script or install NSIS [nsis.sf.net]\r
+  exit /b\r
+)\r
+\r
+REM ----- Clean up\r
+\r
+call clean.bat\r
+\r
+REM ----- Build\r
+\r
+REM Arno: When adding files here, make sure tribler.nsi actually\r
+REM packs them in the installer .EXE\r
+\r
+REM Diego: building the deepest dir we get all of them.\r
+mkdir dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+%PYTHONHOME%\python.exe -O %LIBRARYNAME%\Plugin\Build\Win32\setupBGexe.py py2exe\r
+\r
+REM Arno: Move py2exe results to installdir\r
+move dist\*.* dist\installdir\bgprocess\r
+copy %LIBRARYNAME%\Images\SwarmPluginIcon.ico dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+xcopy vlc4plugin\* dist\installdir /E /I\r
+\r
+REM Riccardo:  move the files needed for the WebUI\r
+xcopy %LIBRARYNAME%\WebUI dist\installdir\bgprocess\%LIBRARYNAME%\WebUI /S /I\r
+del dist\installdir\bgprocess\%LIBRARYNAME%\WebUI\*.py\r
+\r
+REM Diego: replace vlc *.txt with P2P-Next License.txt\r
+del dist\installdir\*.txt\r
+type %LIBRARYNAME%\ns-LICENSE.txt %LIBRARYNAME%\binary-LICENSE-postfix.txt > %LIBRARYNAME%\binary-LICENSE.txt\r
+copy %LIBRARYNAME%\binary-LICENSE.txt dist\installdir\r
+\r
+copy %LIBRARYNAME%\Plugin\Build\Win32\heading.bmp dist\installdir\r
+REM TODO Diego: manifest?\r
+copy %LIBRARYNAME%\Plugin\Build\Win32\swarmplugin_FX_only.nsi dist\installdir\r
+REM copy %LIBRARYNAME%\Plugin\Build\Win32\swarmplugin.exe.manifest dist\installdir\r
+\r
+copy %PYTHONHOME%\Lib\site-packages\wx-2.8-msw-unicode\wx\msvcp71.dll dist\installdir\bgprocess\r
+\r
+copy reset*.bat dist\installdir\r
+\r
+\r
+cd dist\installdir\r
+\r
+REM Arno: Win7 gives popup if SwarmEngine is not signed\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer and Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" bgprocess\SwarmEngine.exe\r
+\r
+\r
+:makeinstaller\r
+%NSIS% swarmplugin_FX_only.nsi\r
+\r
+rename SwarmPlugin_*.exe SwarmPlugin_FX_*.exe\r
+move SwarmPlugin_FX_*.exe ..\r
+cd ..\r
+REM Diego : sign SwarmPlugin_*.exe\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlugin_FX*.exe"\r
+cd ..\r
diff --git a/instrumentation/next-share/pluginmakedist_IE_only.bat b/instrumentation/next-share/pluginmakedist_IE_only.bat
new file mode 100644 (file)
index 0000000..3d097f0
--- /dev/null
@@ -0,0 +1,103 @@
+REM @echo off\r
+set LIBRARYNAME=BaseLib\r
+\r
+set PYTHONHOME=\Python254\r
+REM Arno: Add . to find our core (py 2.5)\r
+set PYTHONPATH=.;%PYTHONHOME%\r
+echo PYTHONPATH SET TO %PYTHONPATH%\r
+\r
+set NSIS="\Program Files\NSIS\makensis.exe"\r
+\r
+REM ----- Check for Python and essential site-packages\r
+\r
+IF NOT EXIST %PYTHONHOME%\python.exe (\r
+  echo .\r
+  echo Could not locate Python in %PYTHONHOME%.\r
+  echo Please modify this script or install python [www.python.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\wx-*-unicode (\r
+  echo .\r
+  echo Could not locate wxPython in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.wxpython.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\py2exe (\r
+  echo .\r
+  echo Could not locate py2exe in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.py2exe.org]\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for NSIS installer\r
+\r
+IF NOT EXIST %NSIS% (\r
+  echo .\r
+  echo Could not locate the NSIS installer at %NSIS%.\r
+  echo Please modify this script or install NSIS [nsis.sf.net]\r
+  exit /b\r
+)\r
+\r
+REM ----- Clean up\r
+\r
+call clean.bat\r
+\r
+REM ----- Build\r
+\r
+REM Arno: When adding files here, make sure tribler.nsi actually\r
+REM packs them in the installer .EXE\r
+\r
+REM Diego: building the deepest dir we get all of them.\r
+mkdir dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+%PYTHONHOME%\python.exe -O %LIBRARYNAME%\Plugin\Build\Win32\setupBGexe.py py2exe\r
+\r
+REM Arno: Move py2exe results to installdir\r
+move dist\*.* dist\installdir\bgprocess\r
+copy %LIBRARYNAME%\Images\SwarmPluginIcon.ico dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+xcopy vlc4plugin\* dist\installdir /E /I\r
+\r
+REM Riccardo:  move the files needed for the WebUI\r
+xcopy %LIBRARYNAME%\WebUI dist\installdir\bgprocess\%LIBRARYNAME%\WebUI /S /I\r
+del dist\installdir\bgprocess\%LIBRARYNAME%\WebUI\*.py\r
+\r
+REM Diego: replace vlc *.txt with P2P-Next License.txt\r
+del dist\installdir\*.txt\r
+type %LIBRARYNAME%\ns-LICENSE.txt %LIBRARYNAME%\binary-LICENSE-postfix.txt > %LIBRARYNAME%\binary-LICENSE.txt\r
+copy %LIBRARYNAME%\binary-LICENSE.txt dist\installdir\r
+\r
+REM Diego: sign axvlc.dll\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "dist\installdir\activex\axvlc.dll"\r
+\r
+copy %LIBRARYNAME%\Plugin\Build\Win32\heading.bmp dist\installdir\r
+REM TODO Diego: manifest?\r
+copy %LIBRARYNAME%\Plugin\Build\Win32\swarmplugin_IE_only.nsi dist\installdir\r
+REM copy %LIBRARYNAME%\Plugin\Build\Win32\swarmplugin.exe.manifest dist\installdir\r
+\r
+copy %PYTHONHOME%\Lib\site-packages\wx-2.8-msw-unicode\wx\msvcp71.dll dist\installdir\bgprocess\r
+\r
+copy reset*.bat dist\installdir\r
+\r
+cd dist\installdir\r
+\r
+REM Arno: Win7 gives popup if SwarmEngine is not signed\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer and Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" bgprocess\SwarmEngine.exe\r
+\r
+\r
+:makeinstaller\r
+%NSIS% swarmplugin_IE_only.nsi\r
+\r
+rename SwarmPlugin_*.exe SwarmPlugin_IE_*.exe\r
+move SwarmPlugin_IE*.exe ..\r
+cd ..\r
+REM Diego : sign SwarmPlugin_*.exe\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlugin_IE*.exe"\r
+REM Arno: build .cab file. \r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\CabArc.Exe" -s 6144 n SwarmPlugin_IE.cab ..\%LIBRARYNAME%\Plugin\Build\Win32\SwarmPlugin_IE.inf\r
+REM Arno : sign SwarmPlugin*.cab\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlugin for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlugin_IE*.cab"\r
+\r
+cd ..\r
diff --git a/instrumentation/next-share/reset-keepid.bat b/instrumentation/next-share/reset-keepid.bat
new file mode 100644 (file)
index 0000000..3ce9430
--- /dev/null
@@ -0,0 +1,82 @@
+\r
+ver | find "Version 6." > nul\r
+if %ERRORLEVEL% == 0 goto IFDEFVISTA\r
+REM No quotes around this, otherwise we have double in the *DIR vars\r
+set APPDIR=%APPDIR%\r
+goto general\r
+\r
+:IFDEFVISTA\r
+set APPDIR=AppData\Roaming\r
+goto general\r
+\r
+:general\r
+\r
+\r
+set TDIR="%USERPROFILE%\%APPDIR%\.Tribler"\r
+set DDIR="%USERPROFILE%\Desktop\TriblerDownloads"\r
+del %TDIR%\torrent*.* /S /F /Q\r
+REM rmdiriver us from Windows, *.* apparently does not include the following:\r
+rmdir %TDIR%\torrent /S /Q\r
+rmdir %TDIR%\torrent2 /S /Q\r
+rmdir %TDIR%\torrentcache /S /Q\r
+rmdir %TDIR%\torrentinfo /S /Q\r
+rmdir %TDIR%\datacache /S /Q\r
+rmdir %TDIR%\piececache /S /Q\r
+rmdir %TDIR%\bsddb /S /Q\r
+rmdir %TDIR%\sqlite /S /Q\r
+rmdir %TDIR%\subscriptions /S /Q\r
+rmdir %TDIR%\icons /S /Q\r
+rmdir %TDIR%\itracker /S /Q\r
+rmdir %TDIR%\dlcheckpoints /S /Q\r
+rmdir %TDIR%\downloads /S /Q\r
+rmdir %TDIR%\collected_torrent_files /S /Q\r
+\r
+del %TDIR%\routing*.*\r
+del %TDIR%\abc.conf\r
+del %TDIR%\*.pickle\r
+\rREM Remove downloads\r
+rmdir %DDIR% /S /Q\r
+\r
+REM SwarmPlayer\r
+set TDIR="%USERPROFILE%\%APPDIR%\.SwarmPlayer"\r
+del %TDIR%\torrent*.* /S /F /Q\r
+REM rmdiriver us from Windows, *.* apparently does not include the following:\r
+rmdir %TDIR%\torrent /S /Q\r
+rmdir %TDIR%\torrent2 /S /Q\r
+rmdir %TDIR%\torrentcache /S /Q\r
+rmdir %TDIR%\torrentinfo /S /Q\r
+rmdir %TDIR%\datacache /S /Q\r
+rmdir %TDIR%\piececache /S /Q\r
+rmdir %TDIR%\bsddb /S /Q\r
+rmdir %TDIR%\subscriptions /S /Q\r
+rmdir %TDIR%\icons /S /Q\r
+rmdir %TDIR%\itracker /S /Q\r
+rmdir %TDIR%\dlcheckpoints /S /Q\r
+rmdir %TDIR%\downloads /S /Q\r
+\r
+del %TDIR%\routing*.*\r
+del %TDIR%\abc.conf\r
+del %TDIR%\*.pickle\r
+\r\r
+\r
+REM SwarmPlugin\r
+set TDIR="%USERPROFILE%\%APPDIR%\.SwarmPlugin"\r
+del %TDIR%\torrent*.* /S /F /Q\r
+REM rmdiriver us from Windows, *.* apparently does not include the following:\r
+rmdir %TDIR%\torrent /S /Q\r
+rmdir %TDIR%\torrent2 /S /Q\r
+rmdir %TDIR%\torrentcache /S /Q\r
+rmdir %TDIR%\torrentinfo /S /Q\r
+rmdir %TDIR%\datacache /S /Q\r
+rmdir %TDIR%\piececache /S /Q\r
+rmdir %TDIR%\bsddb /S /Q\r
+rmdir %TDIR%\subscriptions /S /Q\r
+rmdir %TDIR%\icons /S /Q\r
+rmdir %TDIR%\itracker /S /Q\r
+rmdir %TDIR%\dlcheckpoints /S /Q\r
+rmdir %TDIR%\downloads /S /Q\r
+\r
+del %TDIR%\routing*.*\r
+del %TDIR%\abc.conf\r
+del %TDIR%\*.pickle\r
+\r
diff --git a/instrumentation/next-share/reset.bat b/instrumentation/next-share/reset.bat
new file mode 100644 (file)
index 0000000..1cdd080
--- /dev/null
@@ -0,0 +1,24 @@
+\r
+ver | find "Version 6." > nul\r
+if %ERRORLEVEL% == 0 goto IFDEFVISTA\r
+REM No quotes around this, otherwise we have double in the *DIR vars\r
+set APPDIR=Application Data\r
+goto general\r
+\r
+:IFDEFVISTA\r
+set APPDIR=AppData\Roaming\r
+goto general\r
+\r
+:general\r
+set TDIR="%USERPROFILE%\%APPDIR%\.Tribler"\r
+set DDIR="%USERPROFILE%\Desktop\TriblerDownloads"\r
+rmdir %TDIR% /S /Q\r
+rmdir %DDIR% /S /Q\r
+\r
+REM SwarmPlayer\r
+set TDIR="%USERPROFILE%\%APPDIR%\.SwarmPlayer"\r
+rmdir %TDIR% /S /Q\r
+\r
+REM SwarmPlugin\r
+set TDIR="%USERPROFILE%\%APPDIR%\.SwarmPlugin"\r
+rmdir %TDIR% /S /Q\r
diff --git a/instrumentation/next-share/vlc-1.0.5-swarmplugin-switch-kcc-src-aug2010-renum110-r16968.patch b/instrumentation/next-share/vlc-1.0.5-swarmplugin-switch-kcc-src-aug2010-renum110-r16968.patch
new file mode 100644 (file)
index 0000000..4a93672
--- /dev/null
@@ -0,0 +1,7177 @@
+diff -rbNU 3 vlc-1.0.5/bindings/python/arno-compile-mingw32.sh d10-02-02-tstreamplaylist-p10/bindings/python/arno-compile-mingw32.sh
+--- vlc-1.0.5/bindings/python/arno-compile-mingw32.sh  1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/arno-compile-mingw32.sh      2009-08-26 16:02:21.000000000 +0200
+@@ -0,0 +1,20 @@
++#
++# VLC docs (http://wiki.videolan.org/Python_bindings) say that 
++#    python setup.py build --compiler=mingw32
++#
++# will compile the bindings. This currently doesn't work for me. 
++# Instead:
++#  1. Obtain Python 2.5.4 source, extract in one dir above vlc source
++#     dir. 
++#  2. Remove test for cross-compilation from test %zd printf() format
++#     support from Python's configure script.
++#  3. Configure Python for cross-compile:
++#  $ ./configure --host=mingw32 --with-gcc=i586-mingw32msvc-gcc --with-cxx=i586-mingw32msvc-g++ --enable-shared --with-threads
++# 4. Obtain python25.dll for Python 2.5.4 from \windows\system32 after
++#    Python 2.5.4 binary installer was run on a Windows box, put in 
++#    one dir above vlc source dir.
++# 5. Run below commands:
++#
++
++i586-mingw32msvc-gcc -mthreads -fno-strict-aliasing -g -O3 -Wall -Wstrict-prototypes  -I../.. -I./../../include -I/usr/win32/include -I../../../Python-2.5.4 -I../../../Python-2.5.4/Include -c ./vlc_module.c -o ../../bindings/python/./vlc_module.o -D_FILE_OFFSET_BITS=64 -D__USE_UNIX98 -D_LARGEFILE64_SOURCE -D_REENTRANT -D_THREAD_SAFE -D_GNU_SOURCE  -O3 -ffast-math -funroll-loops -mtune=pentium2 -g
++i586-mingw32msvc-g++  -Wsign-compare -Wall -mms-bitfields -pipe -shared ../../bindings/python/./vlc_module.o ../../src/.libs/libvlc.dll.a ../../../python25.dll -o ../../bindings/python/vlc.pyd -g -lkernel32 -L/usr/win32/lib -liconv /usr/win32/lib/libintl.a /usr/win32/lib/libiconv.a -lws2_32 -lnetapi32 -lwinmm -mwindows 
+diff -rbNU 3 vlc-1.0.5/bindings/python/debian/control d10-02-02-tstreamplaylist-p10/bindings/python/debian/control
+--- vlc-1.0.5/bindings/python/debian/control   1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/debian/control       2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,15 @@
++Source: python-vlc
++Section: contrib/libs
++Priority: optional
++Maintainer: Jason Scheunemann <jason.scheunemann@yahoo.com>
++Build-Depends: cdbs, debhelper (>= 7), python-central (>=0.5.6), python-setuptools, python-dev, libvlc-dev (>= 1.0.0)
++XS-Python-Version: 2.5
++Standards-Version: 3.8.0
++Homepage: http://wiki.videolan.org/PythonBinding
++
++Package: python-vlc
++Architecture: any
++XB-Python-Version: ${python:Versions}
++Depends: ${python:Depends}, ${misc:Depends}, vlc (>= 1.0.0)
++Description: VLC bindings for python.
++ VLC bindings for python.
+diff -rbNU 3 vlc-1.0.5/bindings/python/debian/rules d10-02-02-tstreamplaylist-p10/bindings/python/debian/rules
+--- vlc-1.0.5/bindings/python/debian/rules     1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/debian/rules 2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,8 @@
++#!/usr/bin/make -f
++
++DEB_PYTHON_SYSTEM=pycentral
++
++include /usr/share/cdbs/1/rules/debhelper.mk
++include /usr/share/cdbs/1/class/python-distutils.mk
++
++# Add here any variable or target overrides you need.
+diff -rbNU 3 vlc-1.0.5/bindings/python/GIT-VERSION d10-02-02-tstreamplaylist-p10/bindings/python/GIT-VERSION
+--- vlc-1.0.5/bindings/python/GIT-VERSION      1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/GIT-VERSION  2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,3 @@
++commit 2eeddcf4ae267ad5b2bbac5d518a69c51104cf68
++Author: Rémi Duraffort <ivoire@videolan.org>
++Date:   Tue Aug 25 12:28:49 2009 +0200
+diff -rbNU 3 vlc-1.0.5/bindings/python/MANIFEST.in d10-02-02-tstreamplaylist-p10/bindings/python/MANIFEST.in
+--- vlc-1.0.5/bindings/python/MANIFEST.in      1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/MANIFEST.in  2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,7 @@
++include vlcglue.h
++include vlc_mediacontrol.c
++include vlc_position.c
++include vlc_instance.c
++include vlc_mediaplayer.c
++include vlc_media.c
++include vlcwidget.py
+diff -rbNU 3 vlc-1.0.5/bindings/python/python-vlc.spec d10-02-02-tstreamplaylist-p10/bindings/python/python-vlc.spec
+--- vlc-1.0.5/bindings/python/python-vlc.spec  1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/python-vlc.spec      2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,71 @@
++%define name python-vlc
++%define version 1.0.0.90
++%define unmangled_version 1.0.0.90
++%define release 1
++%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
++
++Summary: VLC bindings for python.
++Name: %{name}
++Version: %{version}
++Release: %{release}
++Source0: %{name}-%{unmangled_version}.tar.gz
++License: GPL
++Group: Development/Libraries
++BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
++Prefix: %{_prefix}
++Vendor: Olivier Aubert <olivier.aubert@liris.cnrs.fr>
++Url: http://wiki.videolan.org/PythonBinding
++
++%description
++VLC bindings for python.
++
++This module provides bindings for the native libvlc API of the VLC
++video player. Documentation can be found on the VLC wiki :
++http://wiki.videolan.org/ExternalAPI
++
++This module also provides a MediaControl object, which implements an
++API inspired from the OMG Audio/Video Stream 1.0 specification.
++Documentation can be found on the VLC wiki :
++http://wiki.videolan.org/PythonBinding
++
++Example session (for the MediaControl API):
++
++import vlc
++mc=vlc.MediaControl(['--verbose', '1'])
++mc.playlist_add_item('movie.mpg')
++
++# Start the movie at 2000ms
++p=vlc.Position()
++p.origin=vlc.RelativePosition
++p.key=vlc.MediaTime
++p.value=2000
++mc.start(p)
++# which could be abbreviated as
++# mc.start(2000)
++# for the default conversion from int is to make a RelativePosition in MediaTime
++
++# Display some text during 2000ms
++mc.display_text('Some useless information', 0, 2000)
++
++# Pause the video
++mc.pause(0)
++
++# Get status information
++mc.get_stream_information()
++       
++
++%prep
++%setup -n %{name}-%{unmangled_version}
++
++%build
++env CFLAGS="$RPM_OPT_FLAGS" python setup.py build
++
++%install
++python setup.py install --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES
++
++%clean
++rm -rf $RPM_BUILD_ROOT
++
++%files -f INSTALLED_FILES
++%defattr(-,root,root)
++%{python_sitelib}/vlcwidget.pyo
+diff -rbNU 3 vlc-1.0.5/bindings/python/README d10-02-02-tstreamplaylist-p10/bindings/python/README
+--- vlc-1.0.5/bindings/python/README   1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/README       2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,21 @@
++* Testing
++
++If you try to compile the bindings from a development tree, you will
++have to specify the path for VLC modules, which cannot be guessed by
++the extension module (and are hardcoded for a standard installation,
++i.e. /usr/lib/vlc on *NIX)
++
++For vlc.MediaControl:
++mc=vlc.MediaControl('--plugin-path /path/to/vlc/directory'.split())
++
++For vlc.Instance:
++i=vlc.Instance('--plugin-path /path/to/vlc/directory'.split())
++
++* Skeleton generation (for developers of the module):
++
++** For method bindings:
++
++perl -n -e 'print "static PyObject *\nvlcInput_$2( PyObject *self, PyObject *args )\n{\n    libvlc_exception_t ex;\n    LIBVLC_TRY;\n    $1_$2( self->p_input, &ex);    LIBVLC_EXCEPT;\n    Py_INCREF( Py_None );\n    return Py_None;\n}\n\n" if /(libvlc_input)_(\w+)/ and ($2 ne "t")' ../../include/vlc/libvlc.h 
++
++** For method table:
++perl -n -e 'print "    { \"$2\", $1_$2, METH_VARARGS,\n      \"$2()\" },\n" if /^(vlcInstance)_(\w+)/' vlc_instance.c 
+diff -rbNU 3 vlc-1.0.5/bindings/python/setup.py d10-02-02-tstreamplaylist-p10/bindings/python/setup.py
+--- vlc-1.0.5/bindings/python/setup.py 1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/setup.py     2009-08-26 10:29:01.000000000 +0200
+@@ -0,0 +1,134 @@
++from distutils.core import setup, Extension
++import os
++import commands
++
++# Get build variables (buildir, srcdir)
++top_builddir = os.path.join( '..', '..' )
++os.environ['top_builddir'] = top_builddir
++
++# Determine the extra link args. Normally, vlc-config should take care
++# of this and return the right path values, from a development tree or
++# an installed version.
++libtool=False
++linkargs=[]
++d=os.path.join(top_builddir, 'src', '.libs')
++if os.path.exists(d):
++    # We are in a development tree, which was compiled with libtool
++    libtool=True
++    linkargs=[ '-L' + d ]
++else:
++    d=os.path.join(top_builddir, 'src')
++    # We are in a development tree, which was compiled without libtool
++    if os.path.exists(d):
++        linkargs=[ '-L' + d ]
++
++# For out-of-tree compilations
++srcdir = '.'
++
++def get_vlcconfig():
++    vlcconfig=None
++    for n in ( 'vlc-config',
++               os.path.join( top_builddir, 'vlc-config' )):
++        if os.path.exists(n):
++            vlcconfig=n
++            break
++    status, output = commands.getstatusoutput('pkg-config libvlc --exists')
++    if status == 0:
++        vlcconfig="pkg-config libvlc"
++    if vlcconfig is None:
++        print "*** Warning *** Cannot find vlc-config. Will try sane defaults."
++    elif os.sys.platform == 'win32':
++        # Win32 does not know how to invoke the shell itself.
++        vlcconfig="sh %s" % vlcconfig
++    return vlcconfig
++
++def get_vlc_version():
++    vlcconfig=get_vlcconfig()
++    if vlcconfig is None:
++        return ""
++    else:
++        version=os.popen('%s --modversion' % vlcconfig, 'r').readline().strip()
++        return version
++
++def get_cflags():
++    vlcconfig=get_vlcconfig()
++    if vlcconfig is None:
++        return []
++    else:
++        cflags=os.popen('%s --cflags ' % vlcconfig, 'r').readline().strip()
++        return cflags
++
++def get_ldflags():
++    vlcconfig=get_vlcconfig()
++    if vlcconfig is None:
++        return [ '-lvlc' ]
++    else:
++      ldflags = []
++      if os.sys.platform == 'darwin':
++          ldflags = "-read_only_relocs warning".split()
++        ldflags.extend(os.popen('%s --libs ' % vlcconfig,
++                                'r').readline().rstrip().split())
++      if os.sys.platform == 'darwin':
++          ldflags.append('-lstdc++')
++        return ldflags
++
++#source_files = [ 'vlc_module.c', 'vlc_mediacontrol.c',
++#                 'vlc_position.c', 'vlc_instance.c', 'vlc_input.c' ]
++source_files = [ 'vlc_module.c' ]
++
++# To compile in a local vlc tree
++vlclocal = Extension('vlc',
++                     sources = [ os.path.join( srcdir, f ) for f in source_files ],
++                     include_dirs = [ top_builddir, os.path.join(top_builddir,"include"),
++                                      srcdir ],
++                     extra_objects = [ ],
++                     extra_compile_args = get_cflags(),
++                     extra_link_args = linkargs + get_ldflags(),
++                     )
++
++setup (name = 'python-vlc',
++       version = '1.0.0.90',
++       author='Olivier Aubert',
++       author_email='olivier.aubert@liris.cnrs.fr',
++       url='http://wiki.videolan.org/PythonBinding',
++       py_modules=['vlcwidget'],
++       keywords = [ 'vlc', 'video' ],
++       license = "GPL",
++       description = "VLC bindings for python.",
++       long_description = """VLC bindings for python.
++
++This module provides bindings for the native libvlc API of the VLC
++video player. Documentation can be found on the VLC wiki :
++http://wiki.videolan.org/ExternalAPI
++
++This module also provides a MediaControl object, which implements an
++API inspired from the OMG Audio/Video Stream 1.0 specification.
++Documentation can be found on the VLC wiki :
++http://wiki.videolan.org/PythonBinding
++
++Example session (for the MediaControl API):
++
++import vlc
++mc=vlc.MediaControl(['--verbose', '1'])
++mc.playlist_add_item('movie.mpg')
++
++# Start the movie at 2000ms
++p=vlc.Position()
++p.origin=vlc.RelativePosition
++p.key=vlc.MediaTime
++p.value=2000
++mc.start(p)
++# which could be abbreviated as
++# mc.start(2000)
++# for the default conversion from int is to make a RelativePosition in MediaTime
++
++# Display some text during 2000ms
++mc.display_text('Some useless information', 0, 2000)
++
++# Pause the video
++mc.pause(0)
++
++# Get status information
++mc.get_stream_information()
++       """,
++       ext_modules = [ vlclocal ])
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlcglue.h d10-02-02-tstreamplaylist-p10/bindings/python/vlcglue.h
+--- vlc-1.0.5/bindings/python/vlcglue.h        1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlcglue.h    2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,162 @@
++/*****************************************************************************
++ * vlcglue.h: Main header for the Python binding
++ *****************************************************************************
++ * Copyright (C) 1998-2004 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *          Clément Stenac <zorglub@videolan.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++#ifndef _VLCGLUE_H
++#define _VLCGLUE_H 1
++
++#include <Python.h>
++#include "structmember.h"
++
++#include <stdio.h>
++#include <vlc/vlc.h>
++#include <vlc/libvlc.h>
++#include <vlc/mediacontrol_structures.h>
++#include <vlc/mediacontrol.h>
++
++/* Python 2.5 64-bit support compatibility define */
++#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
++typedef int Py_ssize_t;
++#define PY_SSIZE_T_MAX INT_MAX
++#define PY_SSIZE_T_MIN INT_MIN
++#endif
++
++
++/**********************************************************************
++ * Exceptions handling
++ **********************************************************************/
++
++#define MC_TRY exception=mediacontrol_exception_create( )
++
++#define MC_EXCEPT  \
++  if( exception && exception->code ) { \
++    PyObject *py_exc = MediaControl_InternalException; \
++    switch( exception->code ) { \
++    case mediacontrol_InternalException: \
++      py_exc = MediaControl_InternalException; \
++      break; \
++    case mediacontrol_PlaylistException: \
++      py_exc = MediaControl_PlaylistException; \
++      break; \
++    case mediacontrol_InvalidPosition: \
++      py_exc = MediaControl_InvalidPosition; \
++      break; \
++    case mediacontrol_PositionKeyNotSupported: \
++      py_exc = MediaControl_PositionKeyNotSupported; \
++      break; \
++    case mediacontrol_PositionOriginNotSupported: \
++      py_exc = MediaControl_PositionOriginNotSupported; \
++      break; \
++    } \
++    PyErr_SetString( py_exc, exception->message ); \
++    mediacontrol_exception_free( exception ); \
++    return NULL; \
++  } else if( exception ) { mediacontrol_exception_free( exception ); }
++
++PyObject *MediaControl_InternalException;
++PyObject *MediaControl_PositionKeyNotSupported;
++PyObject *MediaControl_PositionOriginNotSupported;
++PyObject *MediaControl_InvalidPosition;
++PyObject *MediaControl_PlaylistException;
++PyObject *vlc_Exception;
++
++/**********************************************************************
++ * vlc.Instance Object
++ **********************************************************************/
++typedef struct
++{
++    PyObject_HEAD
++    libvlc_instance_t* p_instance;
++} vlcInstance;
++
++/**********************************************************************
++ * MediaControl Object
++ **********************************************************************/
++typedef struct
++{
++    PyObject_HEAD
++    mediacontrol_Instance* mc;
++    vlcInstance *vlc_instance;
++} MediaControl;
++
++/**********************************************************************
++ * Position Object
++ **********************************************************************/
++typedef struct
++{
++    PyObject_HEAD
++    int origin;
++    int key;
++    PY_LONG_LONG value;
++} PyPosition;
++
++/**********************************************************************
++ * vlc.MediaPlayer Object
++ **********************************************************************/
++typedef struct
++{
++    PyObject_HEAD
++    libvlc_media_player_t* p_mp;
++} vlcMediaPlayer;
++
++/**********************************************************************
++ * vlc.Media Object
++ **********************************************************************/
++typedef struct
++{
++    PyObject_HEAD
++    libvlc_media_t* p_media;
++} vlcMedia;
++
++/* Forward declarations */
++staticforward PyTypeObject MediaControl_Type;
++staticforward PyTypeObject PyPosition_Type;
++staticforward PyTypeObject vlcInstance_Type;
++staticforward PyTypeObject vlcMediaPlayer_Type;
++staticforward PyTypeObject vlcMedia_Type;
++
++#define LIBVLC_INSTANCE(self) (((vlcInstance*)self)->p_instance)
++#define LIBVLC_MEDIAPLAYER(self) (((vlcMediaPlayer*)self)->p_mp)
++#define LIBVLC_MEDIA(self) (((vlcMedia*)self)->p_media)
++#define LIBVLC_MC(self) (((MediaControl*)self)->mc)
++
++#define LIBVLC_TRY libvlc_exception_init( &ex );
++
++#define LIBVLC_EXCEPT if( libvlc_exception_raised( &ex ) ) { \
++    PyObject *py_exc = vlc_Exception; \
++    PyErr_SetString( py_exc, libvlc_exception_get_message( &ex ) ); \
++    return NULL; \
++  }
++
++mediacontrol_PositionKey positionKey_py_to_c( PyObject * py_key );
++mediacontrol_PositionOrigin positionOrigin_py_to_c( PyObject * py_origin );
++mediacontrol_Position * position_py_to_c( PyObject * py_position );
++PyPosition * position_c_to_py( mediacontrol_Position * position );
++
++/* Long long conversion on Mac os X/ppc */
++#if defined (__ppc__) || defined(__ppc64__)
++#define ntohll(x) ((long long) x >> 64)
++#else
++#define ntohll(x) (x)
++#endif
++
++#endif
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlc_instance.c d10-02-02-tstreamplaylist-p10/bindings/python/vlc_instance.c
+--- vlc-1.0.5/bindings/python/vlc_instance.c   1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlc_instance.c       2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,611 @@
++/*****************************************************************************
++ * vlc_instance.c: vlc.Instance binding
++ *****************************************************************************
++ * Copyright (C) 2006,2007,2008,2009 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++#include "vlcglue.h"
++
++/* Helper functions */
++static Py_ssize_t
++pyoptions_to_args(PyObject *py_options, char*** pppsz_args)
++{
++    Py_ssize_t i_size;
++    Py_ssize_t  i_index;
++
++    Py_INCREF( py_options );
++    if( ! PySequence_Check( py_options ) )
++    {
++        PyErr_SetString( PyExc_TypeError, "Parameter must be a sequence." );
++        return -1;
++    }
++    i_size = PySequence_Size( py_options );
++
++    char **ppsz_args = *pppsz_args = malloc( ( i_size + 1 ) * sizeof( char * ) );
++
++    if( ! ppsz_args )
++    {
++        PyErr_SetString( PyExc_MemoryError, "Out of memory" );
++        return -1;
++    }
++
++    for ( i_index = 0; i_index < i_size; i_index++ )
++    {
++        ppsz_args[i_index] =
++            strdup( PyString_AsString( PyObject_Str(
++                                           PySequence_GetItem( py_options,
++                                                               i_index ) ) ) );
++    }
++    ppsz_args[i_size] = NULL;
++    Py_DECREF( py_options );
++    return i_size;
++}
++
++static void
++free_args(int i_size, char** ppsz_args)
++{
++    int i_index;
++
++    for ( i_index = 0; i_index < i_size; i_index++ )
++        free( ppsz_args[i_index] );
++    free( ppsz_args );
++}
++
++/*****************************************************************************
++ * Instance object implementation
++ *****************************************************************************/
++
++static PyObject *
++vlcInstance_new( PyTypeObject *type, PyObject *args, PyObject *kwds )
++{
++    vlcInstance *self;
++    libvlc_exception_t ex;
++    PyObject* py_list = NULL;
++    char** ppsz_args = NULL;
++    int i_size = 0;
++
++    fprintf(stderr, "Instantiating\n");
++    if( PyArg_ParseTuple( args, "|O", &py_list ) )
++    {
++        i_size = pyoptions_to_args( py_list, &ppsz_args );
++        if( i_size < 0 )
++            return NULL;
++    }
++    else
++    {
++        /* No arguments were given. Clear the exception raised
++           by PyArg_ParseTuple. */
++        PyErr_Clear( );
++    }
++
++    self = PyObject_New( vlcInstance, &vlcInstance_Type );
++
++    Py_BEGIN_ALLOW_THREADS
++    LIBVLC_TRY
++    LIBVLC_INSTANCE(self) = libvlc_new( i_size, ppsz_args, &ex );
++    free_args( i_size, ppsz_args );
++    LIBVLC_EXCEPT
++    Py_END_ALLOW_THREADS
++
++    Py_INCREF( self );
++    return ( PyObject * )self;
++}
++
++static void
++vlcInstance_dealloc( PyObject *self )
++{
++    libvlc_release( LIBVLC_INSTANCE(self) );
++    PyObject_DEL( self );
++}
++
++static PyObject *
++vlcInstance_get_vlc_id( PyObject *self, PyObject *args )
++{
++    return Py_BuildValue( "i", libvlc_get_vlc_id( LIBVLC_INSTANCE(self) ) );
++}
++
++static PyObject *
++vlcInstance_new_media_player( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    libvlc_media_player_t *p_mp;
++    vlcMediaPlayer *p_ret;
++
++    LIBVLC_TRY;
++    p_mp = libvlc_media_player_new( LIBVLC_INSTANCE(self), &ex );
++    LIBVLC_EXCEPT;
++
++    p_ret = PyObject_New( vlcMediaPlayer, &vlcMediaPlayer_Type );
++    p_ret->p_mp = p_mp;
++    Py_INCREF( p_ret ); /* Ah bon ? */
++    return ( PyObject * )p_ret;
++}
++
++static PyObject *
++vlcInstance_audio_toggle_mute( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    LIBVLC_TRY;
++    libvlc_audio_toggle_mute( LIBVLC_INSTANCE(self), &ex );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_audio_get_mute( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_audio_get_mute( LIBVLC_INSTANCE(self), &ex );
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcInstance_audio_set_mute( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_mute;
++
++    if( !PyArg_ParseTuple( args, "i", &i_mute ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_audio_set_mute( LIBVLC_INSTANCE(self), i_mute, &ex );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_audio_get_volume( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_audio_get_volume( LIBVLC_INSTANCE(self), &ex );
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcInstance_audio_set_volume( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_volume;
++
++    if( !PyArg_ParseTuple( args, "i", &i_volume ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_audio_set_volume( LIBVLC_INSTANCE(self), i_volume, &ex );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_audio_get_channel( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_audio_get_channel( LIBVLC_INSTANCE(self), &ex );
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcInstance_audio_set_channel( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_channel;
++
++    if( !PyArg_ParseTuple( args, "i", &i_channel ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_audio_set_channel( LIBVLC_INSTANCE(self), i_channel, &ex );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++/* vlm_add_broadcast : name, input MRL, output MRL
++   Keywords: options, enable, loop */
++static PyObject *
++vlcInstance_vlm_add_broadcast( PyObject *self, PyObject *args, PyObject *kwds )
++{
++    libvlc_exception_t ex;
++    static char *kwlist[] = { "name", "input", "output",
++                              "options", "enable", "loop", NULL};
++    char* psz_name = NULL;
++    char* psz_input = NULL;
++    char* psz_output = NULL;
++    PyObject* py_options = NULL;
++    int i_enable = 1;
++    int i_loop = 0;
++    int i_size = 0;
++    char** ppsz_args = NULL;
++
++    if( !PyArg_ParseTupleAndKeywords( args, kwds, "sss|Oii", kwlist,
++                                      &psz_name,
++                      &psz_input, &psz_output,
++                      &py_options, &i_enable, &i_loop ) )
++        return NULL;
++
++    if( py_options )
++    {
++        i_size = pyoptions_to_args( py_options, &ppsz_args );
++    }
++
++    LIBVLC_TRY;
++    libvlc_vlm_add_broadcast( LIBVLC_INSTANCE(self),
++                              psz_name, psz_input, psz_output,
++                              i_size, ppsz_args, i_enable, i_loop, &ex);
++    free_args( i_size, ppsz_args );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_del_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_name ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_del_media( LIBVLC_INSTANCE(self), psz_name, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_set_enabled( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    int i_enabled;
++
++    if( !PyArg_ParseTuple( args, "si", &psz_name, &i_enabled ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_set_enabled( LIBVLC_INSTANCE(self), psz_name, i_enabled, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_set_output( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    char* psz_output;
++
++    if( !PyArg_ParseTuple( args, "ss", &psz_name, &psz_output ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_set_output( LIBVLC_INSTANCE(self), psz_name, psz_output, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_set_input( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    char* psz_input;
++
++    if( !PyArg_ParseTuple( args, "ss", &psz_name, &psz_input ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_set_input( LIBVLC_INSTANCE(self), psz_name, psz_input, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_add_input( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    char* psz_input;
++
++    if( !PyArg_ParseTuple( args, "ss", &psz_name, &psz_input ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_add_input( LIBVLC_INSTANCE(self), psz_name, psz_input, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_set_loop( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    int i_loop;
++
++    if( !PyArg_ParseTuple( args, "si", &psz_name, &i_loop ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_set_loop( LIBVLC_INSTANCE(self), psz_name, i_loop, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_change_media( PyObject *self, PyObject *args, PyObject *kwds )
++{
++    libvlc_exception_t ex;
++    static char *kwlist[] = { "name", "input", "output",
++                              "options", "enable", "loop", NULL};
++    char* psz_name = NULL;
++    char* psz_input = NULL;
++    char* psz_output = NULL;
++    PyObject* py_options = NULL;
++    int i_enable = 1;
++    int i_loop = 0;
++    int i_size = 0;
++    char** ppsz_args = NULL;
++
++    if( !PyArg_ParseTupleAndKeywords( args, kwds, "sss|Oii", kwlist,
++                                      &psz_name,
++                      &psz_input, &psz_output,
++                      &py_options, &i_enable, &i_loop ) )
++        return NULL;
++
++    if( py_options )
++    {
++        i_size = pyoptions_to_args( py_options, &ppsz_args );
++    }
++
++    LIBVLC_TRY;
++    libvlc_vlm_change_media( LIBVLC_INSTANCE(self),
++                              psz_name, psz_input, psz_output,
++                              i_size, ppsz_args, i_enable, i_loop, &ex);
++    free_args( i_size, ppsz_args );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_play_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_name ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_play_media( LIBVLC_INSTANCE(self), psz_name, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_stop_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_name ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_stop_media( LIBVLC_INSTANCE(self), psz_name, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_pause_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_name ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_pause_media( LIBVLC_INSTANCE(self), psz_name, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_seek_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    float f_percentage;
++
++    if( !PyArg_ParseTuple( args, "sf", &psz_name, &f_percentage ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_vlm_seek_media( LIBVLC_INSTANCE(self), psz_name, f_percentage, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcInstance_vlm_show_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_name;
++    char* psz_ret;
++    PyObject* o_ret;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_name ) )
++        return NULL;
++    LIBVLC_TRY;
++    psz_ret = libvlc_vlm_show_media( LIBVLC_INSTANCE(self), psz_name, &ex );
++    LIBVLC_EXCEPT;
++    o_ret = Py_BuildValue( "s", psz_ret );
++    free( psz_ret );
++    return o_ret;
++}
++
++static PyObject *
++vlcInstance_media_new( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    libvlc_media_t *p_media;
++    char* psz_mrl = NULL;
++    vlcMedia *p_ret;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_mrl ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    p_media = libvlc_media_new( LIBVLC_INSTANCE(self), psz_mrl, &ex );
++    LIBVLC_EXCEPT;
++
++    p_ret = PyObject_New( vlcMedia, &vlcMedia_Type );
++    p_ret->p_media = p_media;
++    Py_INCREF( p_ret ); /* Ah bon ? */
++    return ( PyObject * )p_ret;
++}
++
++/* Method table */
++static PyMethodDef vlcInstance_methods[] =
++{
++    { "get_vlc_id", vlcInstance_get_vlc_id, METH_NOARGS,
++      "get_vlc_id( ) -> int        Get the instance id."},
++    { "audio_toggle_mute", vlcInstance_audio_toggle_mute, METH_NOARGS,
++      "audio_toggle_mute()         Toggle the mute state"},
++    { "audio_get_mute", vlcInstance_audio_get_mute, METH_NOARGS,
++      "audio_get_mute() -> int     Get the mute state"},
++    { "audio_set_mute", vlcInstance_audio_set_mute, METH_VARARGS,
++      "audio_set_mute(state=int)         Set the mute state"},
++    { "audio_get_volume", vlcInstance_audio_get_volume, METH_NOARGS,
++      "audio_get_volume() -> int   Get the audio volume"},
++    { "audio_set_volume", vlcInstance_audio_set_volume, METH_VARARGS,
++      "audio_set_volume(volume=int)       Set the audio volume"},
++    { "audio_get_channel", vlcInstance_audio_get_channel, METH_NOARGS,
++      "audio_get_channel() -> int  Get current audio channel" },
++    { "audio_set_channel", vlcInstance_audio_set_channel, METH_VARARGS,
++      "audio_set_channel(int)      Set current audio channel" },
++
++    { "media_new", vlcInstance_media_new, METH_VARARGS,
++      "media_new(str) -> object   Create a media object with the given mrl."},
++
++    { "mediaplayer_new", vlcInstance_new_media_player, METH_NOARGS,
++      "mediaplayer_new() -> object   Create a media player."},
++
++    { "vlm_add_broadcast", vlcInstance_vlm_add_broadcast, METH_VARARGS | METH_KEYWORDS,
++      "vlm_add_broadcast(name=str, input=str, output=str, options=list, enable=int, loop=int)   Add a new broadcast" },
++    { "vlm_del_media", vlcInstance_vlm_del_media, METH_VARARGS,
++      "vlm_del_media(name=str)    Delete a media" },
++    { "vlm_set_enabled", vlcInstance_vlm_set_enabled, METH_VARARGS,
++      "vlm_set_enabled(name=str, enabled=int)    Enable/disable a media" },
++    { "vlm_set_output", vlcInstance_vlm_set_output, METH_VARARGS,
++      "vlm_set_output(name=str, output=str)      Set the output" },
++    { "vlm_set_input", vlcInstance_vlm_set_input, METH_VARARGS,
++      "vlm_set_input(name=str, output=str)       Set the input" },
++    { "vlm_add_input", vlcInstance_vlm_add_input, METH_VARARGS,
++      "vlm_add_input(name=str, output=str)       Add a media's input MRL" },
++    { "vlm_set_loop", vlcInstance_vlm_set_loop, METH_VARARGS,
++      "vlm_set_loop(name=str, loop=int)          Change the looping value" },
++    { "vlm_change_media", vlcInstance_vlm_change_media, METH_VARARGS | METH_KEYWORDS,
++      "vlm_change_media(name=str, input=str, output=str, options=list, enable=int, loop=int)   Change the broadcast parameters" },
++    { "vlm_play_media", vlcInstance_vlm_play_media, METH_VARARGS,
++      "vlm_play_media(name=str)       Plays the named broadcast." },
++    { "vlm_stop_media", vlcInstance_vlm_stop_media, METH_VARARGS,
++      "vlm_stop_media(name=str)       Stops the named broadcast." },
++    { "vlm_pause_media", vlcInstance_vlm_pause_media, METH_VARARGS,
++      "vlm_pause_media(name=str)      Pauses the named broadcast." },
++    { "vlm_seek_media", vlcInstance_vlm_seek_media, METH_VARARGS,
++      "vlm_seek_media(name=str, percentage=float)  Seeks in the named broadcast." },
++    { "vlm_show_media", vlcInstance_vlm_show_media, METH_VARARGS,
++      "vlm_show_media(name=str)       Return information of the named broadcast." },
++
++    { NULL, NULL, 0, NULL },
++};
++
++static PyTypeObject vlcInstance_Type =
++{
++    PyObject_HEAD_INIT( NULL )
++    0,                          /*ob_size*/
++    "vlc.Instance",             /*tp_name*/
++    sizeof( vlcInstance_Type ), /*tp_basicsize*/
++    0,                          /*tp_itemsize*/
++    ( destructor )vlcInstance_dealloc,      /*tp_dealloc*/
++    0,                         /*tp_print*/
++    0,                         /*tp_getattr*/
++    0,                         /*tp_setattr*/
++    0,                         /*tp_compare*/
++    0,                         /*tp_repr*/
++    0,                         /*tp_as_number*/
++    0,                         /*tp_as_sequence*/
++    0,                         /*tp_as_mapping*/
++    0,                         /*tp_hash */
++    0,                         /*tp_call*/
++    0,                         /*tp_str*/
++    0,                         /*tp_getattro*/
++    0,                         /*tp_setattro*/
++    0,                         /*tp_as_buffer*/
++    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
++    "VLC Instance(args)",  /* tp_doc */
++    0,                     /* tp_traverse */
++    0,                     /* tp_clear */
++    0,                     /* tp_richcompare */
++    0,                     /* tp_weaklistoffset */
++    0,                     /* tp_iter */
++    0,                     /* tp_iternext */
++    vlcInstance_methods,             /* tp_methods */
++    0,             /* tp_members */
++    0,                         /* tp_getset */
++    0,                         /* tp_base */
++    0,                         /* tp_dict */
++    0,                         /* tp_descr_get */
++    0,                         /* tp_descr_set */
++    0,                         /* tp_dictoffset */
++    0,                         /* tp_init */
++    0,                         /* tp_alloc */
++    vlcInstance_new,          /* tp_new */
++};
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlc_media.c d10-02-02-tstreamplaylist-p10/bindings/python/vlc_media.c
+--- vlc-1.0.5/bindings/python/vlc_media.c      1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlc_media.c  2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,228 @@
++/*****************************************************************************
++ * vlc_media.c: vlc.Media binding
++ *****************************************************************************
++ * Copyright (C) 2007 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++#include "vlcglue.h"
++
++/***********************************************************************
++ * vlc.Media
++ ***********************************************************************/
++
++static PyObject *
++vlcMedia_new( PyTypeObject *type, PyObject *args, PyObject *kwds )
++{
++    fprintf(stderr, "vlcMedia_new called\n");
++    PyErr_SetString( PyExc_TypeError, "vlc.Media can be instantiated by itself. You should use vlc.Instance().media_new(mrl)." );
++    return NULL;
++}
++
++static void
++vlcMedia_dealloc( PyObject *self )
++{
++    libvlc_media_release( LIBVLC_MEDIA(self) );
++    PyObject_DEL( self );
++}
++
++static PyObject *
++vlcMedia_add_option( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_options = NULL;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_options ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_add_option( LIBVLC_MEDIA(self), psz_options, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMedia_get_mrl( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char * psz_mrl;
++    PyObject * o_ret;
++
++    LIBVLC_TRY;
++    psz_mrl = libvlc_media_get_mrl( LIBVLC_MEDIA(self), &ex);
++    LIBVLC_EXCEPT;
++
++    o_ret = Py_BuildValue( "s", psz_mrl );
++    free( psz_mrl );
++    return o_ret;
++}
++
++static PyObject *
++vlcMedia_get_state( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    libvlc_state_t i_state;
++
++    LIBVLC_TRY;
++    i_state = libvlc_media_get_state( LIBVLC_MEDIA(self), &ex);
++    LIBVLC_EXCEPT;
++    /* FIXME: return the defined state constant */
++    return Py_BuildValue( "i", i_state );
++}
++
++static PyObject *
++vlcMedia_get_duration( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    libvlc_time_t i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_get_duration( LIBVLC_MEDIA(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "L", i_ret );
++}
++
++static PyObject *
++vlcMedia_media_player_new( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    libvlc_media_player_t *p_mp;
++    vlcMediaPlayer *p_ret;
++
++    LIBVLC_TRY;
++    p_mp = libvlc_media_player_new_from_media( LIBVLC_MEDIA(self), &ex);
++    LIBVLC_EXCEPT;
++
++    p_ret = PyObject_New( vlcMediaPlayer, &vlcMediaPlayer_Type );
++    p_ret->p_mp = p_mp;
++    Py_INCREF( p_ret ); /* Ah bon ? */
++    return ( PyObject * )p_ret;
++}
++
++static PyObject *
++vlcMedia_is_preparsed( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_is_preparsed( LIBVLC_MEDIA(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "L", i_ret );
++}
++
++static PyObject *
++vlcMedia_get_meta( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char * psz_meta = NULL;
++    char * psz_ret = NULL;
++    PyObject* o_ret;
++    int i_index = -1;
++    int i_loop = 0;
++    static const char * meta_names[] = { "Title", "Artist", "Genre", "Copyright", "Album", "TrackNumber", "Description", "Rating", "Date", "Setting", "URL", "Language", "NowPlaying", "Publisher", "EncodedBy", "ArtworkURL", "TrackID", NULL };
++
++    if( !PyArg_ParseTuple( args, "s", &psz_meta ) )
++        return NULL;
++    while( meta_names[i_loop] )
++    {
++        if( !strncmp(meta_names[i_loop], psz_meta, strlen(meta_names[i_loop])) )
++        {
++            i_index = i_loop;
++            break;
++        }
++        i_loop++;
++    }
++    if( i_index < 0 )
++    {
++        PyObject *py_exc = vlc_Exception;
++        PyErr_SetString( py_exc, "Unknown meta attribute" );
++        return NULL;
++    }
++
++    LIBVLC_TRY;
++    psz_ret = libvlc_media_get_meta( LIBVLC_MEDIA(self), i_index, &ex);
++    LIBVLC_EXCEPT;
++
++    o_ret = Py_BuildValue( "s", psz_ret );
++    free( psz_ret );
++    return o_ret;
++}
++
++static PyMethodDef vlcMedia_methods[] =
++{
++    { "add_option", vlcMedia_add_option, METH_VARARGS,
++      "add_option(str) Add an option to the media." },
++    { "get_mrl", vlcMedia_get_mrl, METH_VARARGS,
++      "get_mrl() -> str" },
++    { "get_state", vlcMedia_get_state, METH_VARARGS,
++      "get_state() -> int" },
++    { "get_duration", vlcMedia_get_duration, METH_VARARGS,
++      "get_duration() -> int" },
++    { "mediaplayer_new", vlcMedia_media_player_new, METH_VARARGS,
++      "mediaplayer_new() -> vlc.MediaPlayer   Create a MediaPlayer object from a Media" },
++    { "is_preparsed", vlcMedia_is_preparsed, METH_VARARGS,
++      "is_preparsed() -> int" },
++    { "get_meta", vlcMedia_get_meta, METH_VARARGS,
++      "get_meta(str) -> str   Read the meta of the media." },
++
++    { NULL }  /* Sentinel */
++};
++
++static PyTypeObject vlcMedia_Type =
++{
++    PyObject_HEAD_INIT( NULL )
++    0,                         /*ob_size*/
++    "vlc.Media",            /*tp_name*/
++    sizeof( vlcMedia_Type ),   /*tp_basicsize*/
++    0,                         /*tp_itemsize*/
++    vlcMedia_dealloc, /*tp_dealloc*/
++    0,                         /*tp_print*/
++    0,                         /*tp_getattr*/
++    0,                         /*tp_setattr*/
++    0,                         /*tp_compare*/
++    0,                         /*tp_repr*/
++    0,                         /*tp_as_number*/
++    0,                         /*tp_as_sequence*/
++    0,                         /*tp_as_mapping*/
++    0,                         /*tp_hash */
++    0,                         /*tp_call*/
++    0,                         /*tp_str*/
++    0,                         /*tp_getattro*/
++    0,                         /*tp_setattro*/
++    0,                         /*tp_as_buffer*/
++    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
++    "vlc.Media object.",  /* tp_doc */
++    0,                        /* tp_traverse */
++    0,                        /* tp_clear */
++    0,                         /* tp_richcompare */
++    0,                         /* tp_weaklistoffset */
++    0,                         /* tp_iter */
++    0,                          /* tp_iternext */
++    vlcMedia_methods,          /* tp_methods */
++    0,                         /* tp_members */
++    0,                         /* tp_getset */
++    0,                         /* tp_base */
++    0,                         /* tp_dict */
++    0,                         /* tp_descr_get */
++    0,                         /* tp_descr_set */
++    0,                         /* tp_dictoffset */
++    0,                         /* tp_init */
++    0,                         /* tp_alloc */
++    vlcMedia_new,              /* tp_new */
++};
++
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlc_mediacontrol.c d10-02-02-tstreamplaylist-p10/bindings/python/vlc_mediacontrol.c
+--- vlc-1.0.5/bindings/python/vlc_mediacontrol.c       1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlc_mediacontrol.c   2009-08-26 10:29:01.000000000 +0200
+@@ -0,0 +1,760 @@
++/*****************************************************************************
++ * vlc_mediacontrol.c: vlc.MediaControl binding
++ *****************************************************************************
++ * Copyright (C) 2006,2007,2008,2009 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++#include "vlcglue.h"
++
++/*****************************************************************************
++ * VLC MediaControl object implementation
++ *****************************************************************************/
++
++/* The MediaControl constructor takes either an existing vlc.Instance or a
++   list of strings */
++static PyObject *
++MediaControl_new( PyTypeObject *type, PyObject *args, PyObject *kwds )
++{
++    MediaControl *self;
++    mediacontrol_Exception *exception = NULL;
++    PyObject* py_param = NULL;
++    char** ppsz_args = NULL;
++    libvlc_instance_t* p_instance = NULL;
++    Py_ssize_t i_size = 0;
++
++    self = PyObject_New( MediaControl, &MediaControl_Type );
++
++    fprintf (stderr, "Instantiating mediacontrol\n");
++    if( PyArg_ParseTuple( args, "O", &py_param ) )
++    {
++        if( PyObject_TypeCheck( py_param, &vlcInstance_Type ) == 1 )
++        {
++            p_instance = ((vlcInstance*)py_param)->p_instance;
++        }
++        else
++        {
++            Py_ssize_t i_index;
++
++            Py_INCREF( py_param );
++            if( ! PySequence_Check( py_param ) )
++            {
++                PyErr_SetString( PyExc_TypeError, "Parameter must be a vlc.Instance or a sequence of strings." );
++                Py_DECREF( py_param );
++                return NULL;
++            }
++            i_size = PySequence_Size( py_param );
++            ppsz_args = malloc( ( i_size + 1 ) * sizeof( char * ) );
++            if( ! ppsz_args )
++            {
++                PyErr_SetString( PyExc_MemoryError, "Out of memory" );
++                Py_DECREF( py_param );
++                return NULL;
++            }
++
++            for ( i_index = 0; i_index < i_size; i_index++ )
++            {
++                ppsz_args[i_index] =
++                    strdup( PyString_AsString( PyObject_Str(
++                                                   PySequence_GetItem( py_param,
++                                                                       i_index ) ) ) );
++            }
++            ppsz_args[i_size] = NULL;
++            Py_DECREF( py_param );
++        }
++    }
++    else
++    {
++        /* No arguments were given. Clear the exception raised
++           by PyArg_ParseTuple. */
++        PyErr_Clear( );
++    }
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    if( p_instance )
++    {
++        self->mc = mediacontrol_new_from_instance( p_instance, exception );
++        Py_INCREF( py_param );
++        self->vlc_instance = ( vlcInstance* ) py_param;
++    }
++    else
++    {
++        self->mc = mediacontrol_new( i_size, ppsz_args, exception );
++        self->vlc_instance = PyObject_New( vlcInstance, &vlcInstance_Type );
++        self->vlc_instance->p_instance = mediacontrol_get_libvlc_instance( LIBVLC_MC(self) );
++    }
++    MC_EXCEPT;
++    Py_END_ALLOW_THREADS
++
++    Py_INCREF( self );
++    return ( PyObject * )self;
++}
++
++static void
++MediaControl_dealloc( PyObject *self )
++{
++    fprintf(stderr, "MC dealloc\n");
++    Py_DECREF( ((MediaControl*)self)->vlc_instance );
++    PyObject_DEL( self );
++}
++
++static PyObject *
++MediaControl_get_vlc_instance( PyObject *self, PyObject *args )
++{
++    vlcInstance *p_ret;
++
++    p_ret = ((MediaControl*)self)->vlc_instance;
++    Py_INCREF( p_ret );
++    return ( PyObject * )p_ret;
++}
++
++static PyObject *
++MediaControl_get_mediaplayer( PyObject *self, PyObject *args )
++{
++    vlcMediaPlayer *p_ret;
++
++    p_ret = PyObject_New( vlcMediaPlayer, &vlcMediaPlayer_Type );
++    p_ret->p_mp = mediacontrol_get_media_player( LIBVLC_MC(self) );
++    Py_INCREF( p_ret );
++    return ( PyObject * )p_ret;
++}
++
++/**
++ *  Return the current position in the stream. The returned value can
++   be relative or absolute ( according to PositionOrigin ) and the unit
++   is set by PositionKey
++ */
++static PyObject *
++MediaControl_get_media_position( PyObject *self, PyObject *args )
++{
++    mediacontrol_Position* pos;
++    mediacontrol_Exception* exception = NULL;
++    PyObject *py_origin;
++    PyObject *py_key;
++    PyObject *py_retval;
++    mediacontrol_PositionOrigin origin;
++    mediacontrol_PositionKey key;
++
++    if( !PyArg_ParseTuple( args, "OO", &py_origin, &py_key ) )
++        return NULL;
++
++    origin = positionOrigin_py_to_c( py_origin );
++    key    = positionKey_py_to_c( py_key );
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    pos = mediacontrol_get_media_position( LIBVLC_MC(self), origin, key, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    py_retval = ( PyObject* )position_c_to_py( pos );
++    free( pos );
++    return py_retval;
++}
++
++/** Set the media position */
++static PyObject *
++MediaControl_set_media_position( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    mediacontrol_Position *a_position;
++    PyObject *py_pos;
++
++    if( !PyArg_ParseTuple( args, "O", &py_pos ) )
++        return NULL;
++
++    a_position = position_py_to_c( py_pos );
++    if( !a_position )
++    {
++        PyErr_SetString( PyExc_MemoryError, "Out of memory" );
++        return NULL;
++    }
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_set_media_position( LIBVLC_MC(self), a_position, exception );
++    free( a_position );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++MediaControl_start( PyObject *self, PyObject *args )
++{
++    mediacontrol_Position *a_position;
++    mediacontrol_Exception *exception = NULL;
++    PyObject *py_pos;
++
++    if( !PyArg_ParseTuple( args, "O", &py_pos ) )
++    {
++        /* No argument. Use a default 0 value. */
++        PyErr_Clear( );
++        py_pos = NULL;
++    }
++    a_position = position_py_to_c( py_pos );
++    if( !a_position )
++        return NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_start( LIBVLC_MC(self), a_position, exception );
++    free( a_position );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++MediaControl_pause( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception *exception = NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_pause( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++  Py_INCREF( Py_None );
++  return Py_None;
++}
++
++static PyObject *
++MediaControl_resume( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception *exception = NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_resume( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++MediaControl_stop( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception *exception = NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_stop( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++MediaControl_exit( PyObject *self, PyObject *args )
++{
++    mediacontrol_exit( LIBVLC_MC(self) );
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++MediaControl_set_mrl( PyObject *self, PyObject *args )
++{
++    char *psz_file;
++    mediacontrol_Exception *exception = NULL;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_file ) )
++      return NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_set_mrl( LIBVLC_MC(self), psz_file, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++MediaControl_get_mrl( PyObject *self, PyObject *args )
++{
++    PyObject *py_retval;
++    char* psz_file;
++    mediacontrol_Exception *exception = NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    psz_file = mediacontrol_get_mrl( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    py_retval = Py_BuildValue( "s", psz_file );
++    free( psz_file );
++    return py_retval;
++}
++
++static PyObject *
++MediaControl_snapshot( PyObject *self, PyObject *args )
++{
++    mediacontrol_RGBPicture *p_retval = NULL;
++    mediacontrol_Exception* exception = NULL;
++    mediacontrol_Position *a_position = NULL;
++    PyObject *py_pos = NULL;
++    PyObject *py_obj = NULL;
++
++    if( !PyArg_ParseTuple( args, "O", &py_pos ) )
++      return NULL;
++
++    a_position = position_py_to_c( py_pos );
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    p_retval = mediacontrol_snapshot( LIBVLC_MC(self), a_position, exception );
++    free( a_position );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    if( !p_retval )
++    {
++        Py_INCREF( Py_None );
++        return Py_None;
++    }
++
++    /* FIXME: create a real RGBPicture object */
++    py_obj = PyDict_New();
++
++    PyDict_SetItemString( py_obj, "width",
++                          Py_BuildValue( "i", p_retval->width ) );
++    PyDict_SetItemString( py_obj, "height",
++                          Py_BuildValue( "i", p_retval->height ) );
++    PyDict_SetItemString( py_obj, "type",
++                          Py_BuildValue( "i", p_retval->type ) );
++    PyDict_SetItemString( py_obj, "data",
++                          Py_BuildValue( "s#", p_retval->data, p_retval->size ) );
++    PyDict_SetItemString( py_obj, "date",
++                          Py_BuildValue( "L", p_retval->date ) );
++
++    mediacontrol_RGBPicture__free( p_retval );
++
++    return py_obj;
++}
++
++static PyObject*
++MediaControl_display_text( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    PyObject *py_begin, *py_end;
++    char* message;
++    mediacontrol_Position * begin;
++    mediacontrol_Position * end;
++
++    if( !PyArg_ParseTuple( args, "sOO", &message, &py_begin, &py_end ) )
++        return NULL;
++
++    begin = position_py_to_c( py_begin );
++    end   = position_py_to_c( py_end );
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_display_text( LIBVLC_MC(self), message, begin, end, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    free( begin );
++    free( end );
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject*
++MediaControl_get_stream_information( PyObject *self, PyObject *args )
++{
++    mediacontrol_StreamInformation *retval  = NULL;
++    mediacontrol_Exception* exception = NULL;
++    PyObject *py_obj;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    retval = mediacontrol_get_stream_information(
++        LIBVLC_MC(self), mediacontrol_MediaTime, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    py_obj = PyDict_New( );
++
++     /* FIXME: create a real StreamInformation object */
++    PyDict_SetItemString( py_obj, "status",
++                  Py_BuildValue( "i", retval->streamstatus ) );
++    PyDict_SetItemString( py_obj, "url",
++                  Py_BuildValue( "s", retval->url ) );
++    PyDict_SetItemString( py_obj, "position",
++                  Py_BuildValue( "L", retval->position ) );
++    PyDict_SetItemString( py_obj, "length",
++                  Py_BuildValue( "L", retval->length ) );
++
++    mediacontrol_StreamInformation__free( retval );
++
++    return py_obj;
++}
++
++static PyObject*
++MediaControl_sound_set_volume( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    unsigned short volume;
++
++    if( !PyArg_ParseTuple( args, "H", &volume ) )
++        return NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_sound_set_volume( LIBVLC_MC(self), volume, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject*
++MediaControl_sound_get_volume( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    PyObject *py_retval;
++    unsigned short volume;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    volume = mediacontrol_sound_get_volume( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    py_retval = Py_BuildValue( "H", volume );
++    return py_retval;
++}
++
++static PyObject*
++MediaControl_set_rate( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    int rate;
++
++    if( !PyArg_ParseTuple( args, "i", &rate ) )
++        return NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_set_rate( LIBVLC_MC(self), rate, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject*
++MediaControl_get_rate( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    PyObject *py_retval;
++    int rate;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    rate = mediacontrol_get_rate( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    py_retval = Py_BuildValue( "i", rate );
++    return py_retval;
++}
++
++static PyObject*
++MediaControl_set_fullscreen( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    int fs;
++
++    if( !PyArg_ParseTuple( args, "i", &fs ) )
++        return NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_set_fullscreen( LIBVLC_MC(self), fs, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject*
++MediaControl_get_fullscreen( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    PyObject *py_retval;
++    int fs;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    fs = mediacontrol_get_fullscreen( LIBVLC_MC(self), exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    py_retval = Py_BuildValue( "i", fs );
++    return py_retval;
++}
++
++static PyObject*
++MediaControl_set_visual( PyObject *self, PyObject *args )
++{
++    mediacontrol_Exception* exception = NULL;
++    WINDOWHANDLE visual;
++
++    if( !PyArg_ParseTuple( args, "i", &visual ) )
++       return NULL;
++
++    Py_BEGIN_ALLOW_THREADS
++    MC_TRY;
++    mediacontrol_set_visual( LIBVLC_MC(self), visual, exception );
++    Py_END_ALLOW_THREADS
++    MC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++
++/*
++ *  added by Ivaylo
++ */
++
++/* see
++ http://docs.python.org/api/api.html
++ http://www.python.org/doc/current/ext/callingPython.html
++ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65210
++ http://www.suttoncourtenay.org.uk/duncan/accu/integratingpython.html */
++
++
++static int read_callback_wrapper(uint8_t *p_data, int i_data, int id)
++{
++    int retvalue = 0;
++      PyObject *py_buffer = NULL;
++      PyObject *result = NULL;
++      PyGILState_STATE gstate;
++
++
++      gstate = PyGILState_Ensure();
++
++      /* Convert the arguments to a writable buffer object */
++      py_buffer = PyBuffer_FromReadWriteMemory(p_data, i_data);
++
++    if (PyErr_Occurred()) {
++              Py_XDECREF(py_buffer);
++              PyGILState_Release(gstate);
++              return 0;
++    }
++
++    /* Build up the argument list...  */
++
++
++      if (!PyCallable_Check(py_read_callback)) {
++              PyGILState_Release(gstate);
++              return 0;
++      }
++
++    /* ...for calling the Python callback read function. */
++    result = PyObject_CallFunction(py_read_callback,"Oii", py_buffer, i_data, id);
++    if (result && PyInt_Check(result)) {
++        retvalue = PyInt_AsLong(result);
++    }
++    Py_XDECREF(result);
++    Py_XDECREF(py_buffer);
++      PyGILState_Release(gstate);
++
++    return retvalue;
++}
++
++static int seek_callback_wrapper(int64_t i_pos, int id)
++{
++    int retvalue = 0;
++      PyObject *arglist;
++      PyObject *result = NULL;
++      PyGILState_STATE gstate;
++
++      gstate = PyGILState_Ensure();
++
++
++    /* Build up the argument list...  */
++    arglist = Py_BuildValue("(Li)", i_pos, id); // Arno, 2009-08-25: was (li), incorrect, pos is long long int
++
++    /* ...for calling the Python callback seek function. */
++    result = PyEval_CallObject(py_seek_callback,arglist);
++
++    if (result && PyInt_Check(result)) {
++        retvalue = PyInt_AsLong(result);
++    }
++
++    Py_XDECREF(result);
++    Py_DECREF(arglist);
++
++      PyGILState_Release(gstate);
++    return retvalue;
++}
++
++static PyObject *
++MediaControl_set_raw_callbacks(PyObject *self, PyObject *args)
++{
++    PyObject *py_read_cbk;
++    PyObject *py_seek_cbk;
++      int i_fsize;
++      int id;
++    mediacontrol_Exception *exception = NULL;
++
++    if (!PyArg_ParseTuple(args, "OOii", &py_read_cbk, &py_seek_cbk, &i_fsize, &id))
++        return NULL;
++
++    /* make sure the arguments are functions */
++    if (!PyCallable_Check(py_read_cbk) || !PyCallable_Check(py_seek_cbk))
++        PyErr_SetString(PyExc_TypeError, "Need a callable object!");
++    else {
++              Py_XINCREF(py_read_cbk);
++              Py_XINCREF(py_seek_cbk);
++              Py_XDECREF(py_read_callback);
++              Py_XDECREF(py_read_callback);
++              /* set the global callback pointers */
++              py_read_callback = py_read_cbk;
++              py_seek_callback = py_seek_cbk;
++
++              Py_BEGIN_ALLOW_THREADS
++              MC_TRY;
++              /* announce the callbacks to the raw access module */
++              mediacontrol_set_raw_callbacks( LIBVLC_MC(self),
++                      read_callback_wrapper, seek_callback_wrapper, i_fsize, id, exception );
++
++              Py_END_ALLOW_THREADS
++          MC_EXCEPT;
++
++    }
++
++    Py_INCREF(Py_None);
++    return Py_None;
++}
++
++
++
++static PyMethodDef MediaControl_methods[] =
++{
++    { "get_vlc_instance", MediaControl_get_vlc_instance, METH_VARARGS,
++      "get_vlc_instance( ) -> Instance    Get embedded vlc.Instance." },
++    { "get_mediaplayer", MediaControl_get_mediaplayer, METH_VARARGS,
++      "get_mediaplayer( ) -> MediaPlayer    Get embedded vlc.MediaPlayer." },
++    { "get_media_position", MediaControl_get_media_position, METH_VARARGS,
++      "get_media_position( origin, key ) -> Position    Get current media position." },
++    { "set_media_position", MediaControl_set_media_position, METH_VARARGS,
++      "set_media_position( Position )            Set media position" },
++    { "start", MediaControl_start, METH_VARARGS,
++      "start( Position )         Start the player." },
++    { "pause", MediaControl_pause, METH_VARARGS,
++      "pause( Position )         Pause the player." },
++    { "resume", MediaControl_resume, METH_VARARGS,
++      "resume( Position )        Resume the player" },
++    { "stop", MediaControl_stop, METH_VARARGS,
++      "stop( Position )              Stop the player" },
++    { "exit", MediaControl_exit, METH_VARARGS,
++      "exit( )                     Exit the player" },
++    { "set_mrl", MediaControl_set_mrl, METH_VARARGS,
++      "set_mrl( str )               Set the file to be played" },
++    { "get_mrl", MediaControl_get_mrl, METH_VARARGS,
++      "get_mrl( ) -> str       Get the played file" },
++    { "snapshot", MediaControl_snapshot, METH_VARARGS,
++      "snapshot( Position ) -> dict        Take a snapshot" },
++    { "display_text", MediaControl_display_text, METH_VARARGS,
++      "display_text( str, Position, Position )    Display a text on the video" },
++    { "get_stream_information", MediaControl_get_stream_information,
++      METH_VARARGS,
++      "get_stream_information( ) -> dict      Get information about the stream"},
++    { "sound_get_volume", MediaControl_sound_get_volume, METH_VARARGS,
++      "sound_get_volume( ) -> int       Get the volume" },
++    { "sound_set_volume", MediaControl_sound_set_volume, METH_VARARGS,
++      "sound_set_volume( int )           Set the volume" },
++    { "set_visual", MediaControl_set_visual, METH_VARARGS,
++      "set_visual( int )           Set the embedding window visual ID" },
++    { "get_rate", MediaControl_get_rate, METH_VARARGS,
++      "get_rate( ) -> int       Get the rate" },
++    { "set_rate", MediaControl_set_rate, METH_VARARGS,
++      "set_rate( int )              Set the rate" },
++    { "get_fullscreen", MediaControl_get_fullscreen, METH_VARARGS,
++      "get_fullscreen( ) -> int       Get the fullscreen status" },
++    { "set_fullscreen", MediaControl_set_fullscreen, METH_VARARGS,
++      "set_fullscreen( int )              Set the fullscreen status" },
++     { "set_raw_callbacks", MediaControl_set_raw_callbacks, METH_VARARGS,
++        "set_raw_callbacks(read_cbk, seek_cbk, fsize, id)     Set the callbacks for the raw access method" }, /* added by Ivaylo */
++    { NULL, NULL, 0, NULL },
++};
++
++static PyTypeObject MediaControl_Type =
++{
++    PyObject_HEAD_INIT( NULL )
++    0,                         /*ob_size*/
++    "vlc.MediaControl",        /*tp_name*/
++    sizeof( MediaControl_Type ), /*tp_basicsize*/
++    0,                         /*tp_itemsize*/
++    ( destructor )MediaControl_dealloc,      /*tp_dealloc*/
++    0,                         /*tp_print*/
++    0,                         /*tp_getattr*/
++    0,                         /*tp_setattr*/
++    0,                         /*tp_compare*/
++    0,                         /*tp_repr*/
++    0,                         /*tp_as_number*/
++    0,                         /*tp_as_sequence*/
++    0,                         /*tp_as_mapping*/
++    0,                         /*tp_hash */
++    0,                         /*tp_call*/
++    0,                         /*tp_str*/
++    0,                         /*tp_getattro*/
++    0,                         /*tp_setattro*/
++    0,                         /*tp_as_buffer*/
++    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
++    "Control of a VLC instance.\n\nvlc.MediaControl(args): initialisation with a list of VLC parameters.\nvlc.MediaControl(instance): initialisation with an existing vlc.Instance",  /* tp_doc */
++    0,                     /* tp_traverse */
++    0,                     /* tp_clear */
++    0,                     /* tp_richcompare */
++    0,                     /* tp_weaklistoffset */
++    0,                     /* tp_iter */
++    0,                     /* tp_iternext */
++    MediaControl_methods,             /* tp_methods */
++    0,             /* tp_members */
++    0,                         /* tp_getset */
++    0,                         /* tp_base */
++    0,                         /* tp_dict */
++    0,                         /* tp_descr_get */
++    0,                         /* tp_descr_set */
++    0,                         /* tp_dictoffset */
++    0,                         /* tp_init */
++    0,                         /* tp_alloc */
++    MediaControl_new,          /* tp_new */
++};
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlc_mediaplayer.c d10-02-02-tstreamplaylist-p10/bindings/python/vlc_mediaplayer.c
+--- vlc-1.0.5/bindings/python/vlc_mediaplayer.c        1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlc_mediaplayer.c    2009-08-26 10:29:01.000000000 +0200
+@@ -0,0 +1,706 @@
++/*****************************************************************************
++ * vlc_mediaplayer.c: vlc.MediaPlayer binding
++ *****************************************************************************
++ * Copyright (C) 2006,2007,2008,2009 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++#include "vlcglue.h"
++
++/***********************************************************************
++ * vlc.Input
++ ***********************************************************************/
++
++static PyObject *
++vlcMediaPlayer_get_length( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int64_t i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_get_length( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "L", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_time( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int64_t i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_get_time( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "L", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_time( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int64_t i_time;
++
++    if( !PyArg_ParseTuple( args, "L", &i_time ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_time( LIBVLC_MEDIAPLAYER(self), i_time, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_position( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    float f_ret;
++    LIBVLC_TRY;
++    f_ret = libvlc_media_player_get_position( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "f", f_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_position( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    float f_pos;
++
++    if( !PyArg_ParseTuple( args, "f", &f_pos ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_position( LIBVLC_MEDIAPLAYER(self), f_pos, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_will_play( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_will_play( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_rate( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    float f_ret;
++    LIBVLC_TRY;
++    f_ret = libvlc_media_player_get_rate( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "f", f_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_rate( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    float f_rate;
++
++    if( !PyArg_ParseTuple( args, "f", &f_rate ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_rate( LIBVLC_MEDIAPLAYER(self), f_rate, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_state( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_get_state( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_has_vout( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_has_vout( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_fps( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    float f_ret;
++    LIBVLC_TRY;
++    f_ret = libvlc_media_player_get_fps( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "f", f_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_audio_get_track( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_audio_get_track( LIBVLC_MEDIAPLAYER(self), &ex );
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_audio_set_track( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_track;
++
++    if( !PyArg_ParseTuple( args, "i", &i_track ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_audio_set_track( LIBVLC_MEDIAPLAYER(self), i_track, &ex );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_chapter( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_get_chapter( LIBVLC_MEDIAPLAYER(self), &ex );
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_chapter_count( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_get_chapter_count( LIBVLC_MEDIAPLAYER(self), &ex );
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_chapter( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_chapter;
++
++    if( !PyArg_ParseTuple( args, "i", &i_chapter ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_chapter( LIBVLC_MEDIAPLAYER(self), i_chapter, &ex );
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++
++static PyObject *
++vlcMediaPlayer_toggle_fullscreen( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++
++    LIBVLC_TRY;
++    libvlc_toggle_fullscreen( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_set_fullscreen( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_fullscreen;
++
++    if( !PyArg_ParseTuple( args, "i", &i_fullscreen ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_set_fullscreen( LIBVLC_MEDIAPLAYER(self), i_fullscreen, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_fullscreen( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_get_fullscreen( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_height( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_video_get_height( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_width( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++
++    LIBVLC_TRY;
++    i_ret = libvlc_video_get_width( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_get_aspect_ratio( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_ret;
++    PyObject* o_ret;
++
++    LIBVLC_TRY;
++    psz_ret = libvlc_video_get_aspect_ratio( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    o_ret=Py_BuildValue( "s", psz_ret );
++    free( psz_ret );
++    return o_ret;
++}
++
++static PyObject *
++vlcMediaPlayer_set_aspect_ratio( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_ratio;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_ratio ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_video_set_aspect_ratio( LIBVLC_MEDIAPLAYER(self), psz_ratio, &ex);
++    LIBVLC_EXCEPT;
++    free( psz_ratio );
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_video_take_snapshot( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    char* psz_filename;
++
++    if( !PyArg_ParseTuple( args, "s", &psz_filename ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_video_take_snapshot( LIBVLC_MEDIAPLAYER(self), psz_filename, 0, 0, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_is_seekable( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_is_seekable( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_can_pause( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_media_player_can_pause( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_play( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++
++    LIBVLC_TRY;
++    libvlc_media_player_play( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_pause( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++
++    LIBVLC_TRY;
++    libvlc_media_player_pause( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_stop( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++
++    LIBVLC_TRY;
++    libvlc_media_player_stop( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_set_xwindow( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    uint32_t i_drawable;
++
++    if( !PyArg_ParseTuple( args, "i", &i_drawable ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_xwindow( LIBVLC_MEDIAPLAYER(self), i_drawable, &ex );
++    LIBVLC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_xwindow( PyObject *self, PyObject *args )
++{
++    uint32_t i_ret;
++
++    i_ret = libvlc_media_player_get_xwindow( LIBVLC_MEDIAPLAYER(self));
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_hwnd( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    void* i_drawable;
++
++    if( !PyArg_ParseTuple( args, "l", &i_drawable ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_hwnd( LIBVLC_MEDIAPLAYER(self), (void*) i_drawable, &ex );
++    LIBVLC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_hwnd( PyObject *self, PyObject *args )
++{
++    void* i_ret;
++
++    i_ret = libvlc_media_player_get_hwnd( LIBVLC_MEDIAPLAYER(self));
++    return Py_BuildValue( "l", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_agl( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    uint32_t i_drawable;
++
++    if( !PyArg_ParseTuple( args, "i", &i_drawable ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_agl( LIBVLC_MEDIAPLAYER(self), i_drawable, &ex );
++    LIBVLC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_agl( PyObject *self, PyObject *args )
++{
++    uint32_t i_ret;
++
++    i_ret = libvlc_media_player_get_agl( LIBVLC_MEDIAPLAYER(self));
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_nsobject( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    void* i_drawable;
++
++    if( !PyArg_ParseTuple( args, "l", &i_drawable ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_media_player_set_nsobject( LIBVLC_MEDIAPLAYER(self), (void*) i_drawable, &ex );
++    LIBVLC_EXCEPT;
++
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++static PyObject *
++vlcMediaPlayer_get_nsobject( PyObject *self, PyObject *args )
++{
++    void* i_ret;
++
++    i_ret = libvlc_media_player_get_nsobject( LIBVLC_MEDIAPLAYER(self));
++    return Py_BuildValue( "l", i_ret );
++}
++
++
++static PyObject *
++vlcMediaPlayer_set_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    PyObject* py_param = NULL;
++
++    if( !PyArg_ParseTuple( args, "O", &py_param ) )
++        return NULL;
++    if( PyObject_TypeCheck( py_param, &vlcMedia_Type ) == 1 )
++    {
++        LIBVLC_TRY;
++        libvlc_media_player_set_media( LIBVLC_MEDIAPLAYER(self), ((vlcMedia*)py_param)->p_media, &ex );
++        LIBVLC_EXCEPT;
++    }
++    else
++    {
++        PyObject *py_exc = vlc_Exception;
++        PyErr_SetString( py_exc, "vlc.Media parameter needed" );
++        return NULL;
++    }
++    return NULL;
++}
++
++static PyObject *
++vlcMediaPlayer_get_media( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    libvlc_media_t *p_media;
++    vlcMedia *p_ret;
++
++    LIBVLC_TRY;
++    p_media = libvlc_media_player_get_media( LIBVLC_MEDIAPLAYER(self), &ex );
++    LIBVLC_EXCEPT;
++
++    if( !p_media )
++    {
++        Py_INCREF( Py_None );
++        return Py_None;
++    }
++    else
++    {
++        p_ret = PyObject_New( vlcMedia, &vlcMedia_Type );
++        p_ret->p_media = p_media;
++        Py_INCREF( p_ret ); /* Ah bon ? */
++        return ( PyObject * )p_ret;
++    }
++}
++
++static PyObject *
++vlcMediaPlayer_get_spu( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_ret;
++    LIBVLC_TRY;
++    i_ret = libvlc_video_get_spu( LIBVLC_MEDIAPLAYER(self), &ex);
++    LIBVLC_EXCEPT;
++    return Py_BuildValue( "i", i_ret );
++}
++
++static PyObject *
++vlcMediaPlayer_set_spu( PyObject *self, PyObject *args )
++{
++    libvlc_exception_t ex;
++    int i_spu;
++
++    if( !PyArg_ParseTuple( args, "i", &i_spu ) )
++        return NULL;
++
++    LIBVLC_TRY;
++    libvlc_video_set_spu( LIBVLC_MEDIAPLAYER(self), i_spu, &ex);
++    LIBVLC_EXCEPT;
++    Py_INCREF( Py_None );
++    return Py_None;
++}
++
++
++static PyMethodDef vlcMediaPlayer_methods[] =
++{
++    { "get_length", vlcMediaPlayer_get_length, METH_VARARGS,
++      "get_length() -> long    " },
++    { "get_time", vlcMediaPlayer_get_time, METH_VARARGS,
++      "get_time() -> long" },
++    { "set_time", vlcMediaPlayer_set_time, METH_VARARGS,
++      "set_time(long)" },
++    { "get_position", vlcMediaPlayer_get_position, METH_VARARGS,
++      "get_position() -> float" },
++    { "set_position", vlcMediaPlayer_set_position, METH_VARARGS,
++      "set_position(float)" },
++    { "will_play", vlcMediaPlayer_will_play, METH_VARARGS,
++      "will_play() -> int" },
++    { "is_seekable", vlcMediaPlayer_is_seekable, METH_VARARGS,
++      "is_seekable() -> int" },
++    { "can_pause", vlcMediaPlayer_can_pause, METH_VARARGS,
++      "can_pause() -> int" },
++    { "get_rate", vlcMediaPlayer_get_rate, METH_VARARGS,
++      "get_rate() -> float" },
++    { "set_rate", vlcMediaPlayer_set_rate, METH_VARARGS,
++      "set_rate(float)" },
++    { "get_state", vlcMediaPlayer_get_state, METH_VARARGS,
++      "get_state() -> int" },
++    { "has_vout", vlcMediaPlayer_has_vout, METH_VARARGS,
++      "has_vout() -> int" },
++    { "get_fps", vlcMediaPlayer_get_fps, METH_VARARGS,
++      "get_fps() -> float" },
++    { "audio_get_track", vlcMediaPlayer_audio_get_track, METH_VARARGS,
++      "audio_get_track() -> int    Get current audio track" },
++    { "audio_set_track", vlcMediaPlayer_audio_set_track, METH_VARARGS,
++      "audio_set_track(int)        Set current audio track" },
++    { "toggle_fullscreen", vlcMediaPlayer_toggle_fullscreen, METH_VARARGS,
++      "toggle_fullscreen()    Toggle fullscreen status on video output" },
++    { "set_fullscreen", vlcMediaPlayer_set_fullscreen, METH_VARARGS,
++      "set_fullscreen(bool)    Enable or disable fullscreen on a video output" },
++    { "get_fullscreen", vlcMediaPlayer_get_fullscreen, METH_VARARGS,
++      "get_fullscreen() -> bool    Get current fullscreen status" },
++    { "get_height", vlcMediaPlayer_get_height, METH_VARARGS,
++      "get_height() -> int           Get current video height" },
++    { "get_width", vlcMediaPlayer_get_width, METH_VARARGS,
++      "get_width() -> int           Get current video width" },
++    { "get_aspect_ratio", vlcMediaPlayer_get_aspect_ratio, METH_VARARGS,
++      "get_aspect_ratio() -> str    Get current video aspect ratio" },
++    { "set_aspect_ratio", vlcMediaPlayer_set_aspect_ratio, METH_VARARGS,
++      "set_aspect_ratio(str)        Set new video aspect ratio" },
++    { "video_take_snapshot", vlcMediaPlayer_video_take_snapshot, METH_VARARGS,
++      "video_take_snapshot(filename=str)        Take a snapshot of the current video window" },
++
++    { "play", vlcMediaPlayer_play, METH_VARARGS,
++      "play()    Play the media instance" },
++    { "pause", vlcMediaPlayer_pause, METH_VARARGS,
++      "pause()   Pause the media instance" },
++    { "stop", vlcMediaPlayer_stop, METH_VARARGS,
++      "stop()    Stop the media instance" },
++#ifdef OLD
++    { "set_drawable", vlcMediaPlayer_set_drawable, METH_VARARGS,
++      "set_drawable()    Set the drawable id" },
++#endif
++    { "get_chapter", vlcMediaPlayer_get_chapter, METH_VARARGS,
++      "get_chapter() -> int    Get current chapter" },
++    { "set_chapter", vlcMediaPlayer_set_chapter, METH_VARARGS,
++      "set_chapter(int)        Set current chapter" },
++    { "get_chapter_count", vlcMediaPlayer_get_chapter_count, METH_VARARGS,
++      "get_chapter_count() -> int    Get current chapter count" },
++
++    { "set_media", vlcMediaPlayer_set_media, METH_VARARGS,
++      "set_media(vlc.Media)        Set the media that will be used by the media_player" },
++    { "get_media", vlcMediaPlayer_get_media, METH_VARARGS,
++      "get_media() -> vlc.Media    Get the media used by the media_player (if any)." },
++
++    { "get_spu", vlcMediaPlayer_get_spu, METH_VARARGS,
++      "get_spu() -> int   Get current video subtitle" },
++    { "set_spu", vlcMediaPlayer_set_spu, METH_VARARGS,
++      "set_spu(int)      Set new video subtitle" },
++
++    { NULL }  /* Sentinel */
++};
++
++static PyTypeObject vlcMediaPlayer_Type =
++{
++    PyObject_HEAD_INIT( NULL )
++    0,                         /*ob_size*/
++    "vlc.MediaPlayer",            /*tp_name*/
++    sizeof( vlcMediaPlayer_Type ),   /*tp_basicsize*/
++    0,                         /*tp_itemsize*/
++    0,                         /*tp_dealloc*/
++    0,                         /*tp_print*/
++    0,                         /*tp_getattr*/
++    0,                         /*tp_setattr*/
++    0,                         /*tp_compare*/
++    0,                         /*tp_repr*/
++    0,                         /*tp_as_number*/
++    0,                         /*tp_as_sequence*/
++    0,                         /*tp_as_mapping*/
++    0,                         /*tp_hash */
++    0,                         /*tp_call*/
++    0,                         /*tp_str*/
++    0,                         /*tp_getattro*/
++    0,                         /*tp_setattro*/
++    0,                         /*tp_as_buffer*/
++    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
++    "vlc.MediaPlayer object\n\nIt cannot be instantiated standalone, it must be obtained from an existing vlc.Instance object",  /* tp_doc */
++    0,                        /* tp_traverse */
++    0,                        /* tp_clear */
++    0,                         /* tp_richcompare */
++    0,                         /* tp_weaklistoffset */
++    0,                         /* tp_iter */
++    0,                          /* tp_iternext */
++    vlcMediaPlayer_methods,          /* tp_methods */
++    0,                         /* tp_members */
++    0,                         /* tp_getset */
++    0,                         /* tp_base */
++    0,                         /* tp_dict */
++    0,                         /* tp_descr_get */
++    0,                         /* tp_descr_set */
++    0,                         /* tp_dictoffset */
++    0,                         /* tp_init */
++    0,                         /* tp_alloc */
++    0,                         /* tp_new */
++};
++
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlc_module.c d10-02-02-tstreamplaylist-p10/bindings/python/vlc_module.c
+--- vlc-1.0.5/bindings/python/vlc_module.c     1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlc_module.c 2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,179 @@
++/*****************************************************************************
++ * vlc_module.c: vlc python binding module
++ *****************************************************************************
++ * Copyright (C) 2006,2007,2008,2009 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++
++#include "vlcglue.h"
++
++/**************************************************************************
++ * VLC Module
++ **************************************************************************/
++
++#ifndef vlcMODINIT_FUNC /* declarations for DLL import/export */
++#define vlcMODINIT_FUNC void
++#endif
++
++static PyMethodDef vlc_methods[] = {
++    { NULL }  /* Sentinel */
++};
++
++/* Module globals */
++PyObject* MediaControl_InternalException          = NULL;
++PyObject* MediaControl_PositionKeyNotSupported    = NULL;
++PyObject *MediaControl_PositionOriginNotSupported = NULL;
++PyObject* MediaControl_InvalidPosition            = NULL;
++PyObject *MediaControl_PlaylistException          = NULL;
++
++/* Added by Ivaylo */
++static PyObject* py_read_callback     = NULL;
++static PyObject* py_seek_callback     = NULL;
++/* End added by Ivaylo */
++
++
++vlcMODINIT_FUNC
++initvlc( void )
++{
++    PyObject* p_module;
++
++    /* vlcMediaPlayer_Type.tp_new = PyType_GenericNew; */
++    vlcMediaPlayer_Type.tp_alloc = PyType_GenericAlloc;
++    /* vlcMedia_Type.tp_new = PyType_GenericNew; */
++    vlcMedia_Type.tp_alloc = PyType_GenericAlloc;
++
++    vlcInstance_Type.tp_alloc = PyType_GenericAlloc;
++    MediaControl_Type.tp_alloc = PyType_GenericAlloc;
++
++    p_module = Py_InitModule3( "vlc", vlc_methods,
++                               "VLC media player embedding module." );
++
++    if( !p_module )
++      return;
++
++    if( PyType_Ready( &PyPosition_Type ) < 0 )
++        return;
++    if( PyType_Ready( &MediaControl_Type ) < 0 )
++        return;
++    if( PyType_Ready( &vlcInstance_Type ) < 0 )
++        return;
++    if( PyType_Ready( &vlcMediaPlayer_Type ) < 0 )
++        return;
++    if( PyType_Ready( &vlcMedia_Type ) < 0 )
++        return;
++
++    /* Exceptions */
++    MediaControl_InternalException =
++            PyErr_NewException( "vlc.InternalException", NULL, NULL );
++    Py_INCREF( MediaControl_InternalException );
++    PyModule_AddObject( p_module, "InternalException",
++                        MediaControl_InternalException );
++
++    MediaControl_PositionKeyNotSupported =
++            PyErr_NewException( "vlc.PositionKeyNotSupported", NULL, NULL );
++    Py_INCREF( MediaControl_PositionKeyNotSupported );
++    PyModule_AddObject( p_module, "PositionKeyNotSupported",
++                        MediaControl_PositionKeyNotSupported );
++
++    MediaControl_PositionOriginNotSupported=
++            PyErr_NewException( "vlc.InvalidPosition", NULL, NULL );
++    Py_INCREF( MediaControl_PositionOriginNotSupported );
++    PyModule_AddObject( p_module, "PositionOriginNotSupported",
++                        MediaControl_PositionOriginNotSupported );
++
++    MediaControl_InvalidPosition =
++            PyErr_NewException( "vlc.InvalidPosition", NULL, NULL );
++    Py_INCREF( MediaControl_InvalidPosition );
++    PyModule_AddObject( p_module, "InvalidPosition",
++                        MediaControl_InvalidPosition );
++
++    MediaControl_PlaylistException =
++            PyErr_NewException( "vlc.PlaylistException", NULL, NULL );
++    Py_INCREF( MediaControl_PlaylistException );
++    PyModule_AddObject( p_module, "PlaylistException",
++                        MediaControl_PlaylistException );
++
++    /* Exceptions */
++    vlc_Exception =
++        PyErr_NewException( "vlc.InstanceException", NULL, NULL );
++    Py_INCREF( vlc_Exception );
++    PyModule_AddObject( p_module, "InstanceException",
++                        vlc_Exception );
++
++    /* Types */
++    Py_INCREF( &PyPosition_Type );
++    PyModule_AddObject( p_module, "Position",
++                        ( PyObject * )&PyPosition_Type );
++
++    Py_INCREF( &MediaControl_Type );
++    PyModule_AddObject( p_module, "MediaControl",
++                        ( PyObject * )&MediaControl_Type );
++
++    Py_INCREF( &vlcInstance_Type );
++    PyModule_AddObject( p_module, "Instance",
++                        ( PyObject * )&vlcInstance_Type );
++
++    Py_INCREF( &vlcMediaPlayer_Type );
++    PyModule_AddObject( p_module, "MediaPlayer",
++                        ( PyObject * )&vlcMediaPlayer_Type );
++
++    Py_INCREF( &vlcMedia_Type );
++    PyModule_AddObject( p_module, "Media",
++                        ( PyObject * )&vlcMedia_Type );
++
++    /* Constants */
++    PyModule_AddIntConstant( p_module, "AbsolutePosition",
++                             mediacontrol_AbsolutePosition );
++    PyModule_AddIntConstant( p_module, "RelativePosition",
++                             mediacontrol_RelativePosition );
++    PyModule_AddIntConstant( p_module, "ModuloPosition",
++                             mediacontrol_ModuloPosition );
++
++    PyModule_AddIntConstant( p_module, "ByteCount",
++                             mediacontrol_ByteCount );
++    PyModule_AddIntConstant( p_module, "SampleCount",
++                             mediacontrol_SampleCount );
++    PyModule_AddIntConstant( p_module, "MediaTime",
++                             mediacontrol_MediaTime );
++
++    PyModule_AddIntConstant( p_module, "PlayingStatus",
++                             mediacontrol_PlayingStatus );
++    PyModule_AddIntConstant( p_module, "PauseStatus",
++                             mediacontrol_PauseStatus );
++    PyModule_AddIntConstant( p_module, "InitStatus",
++                             mediacontrol_InitStatus );
++    PyModule_AddIntConstant( p_module, "EndStatus",
++                             mediacontrol_EndStatus );
++    PyModule_AddIntConstant( p_module, "UndefinedStatus",
++                             mediacontrol_UndefinedStatus );
++
++}
++
++/* Horrible hack... Please do not look.  Temporary workaround for the
++   forward declaration mess of python types (cf vlcglue.h). If we do a
++   separate compilation, we have to declare some types as extern. But
++   the recommended way to forward declared types in python is
++   static... I am sorting the mess but in the meantime, this will
++   produce a working python module.
++*/
++#include "vlc_mediacontrol.c"
++#include "vlc_position.c"
++#include "vlc_instance.c"
++#include "vlc_mediaplayer.c"
++#include "vlc_media.c"
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlc_position.c d10-02-02-tstreamplaylist-p10/bindings/python/vlc_position.c
+--- vlc-1.0.5/bindings/python/vlc_position.c   1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlc_position.c       2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,219 @@
++/*****************************************************************************
++ * vlc_position.c: vlc.Position binding
++ *****************************************************************************
++ * Copyright (C) 2006,2007,2008,2009 the VideoLAN team
++ * $Id$
++ *
++ * Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
++ *****************************************************************************/
++#include "vlcglue.h"
++
++/***********************************************************************
++ * Position
++ ***********************************************************************/
++
++static PyObject *
++PyPosition_new( PyTypeObject *type, PyObject *args, PyObject *kwds )
++{
++    PyPosition *self;
++    static char *kwlist[] = { "value", "origin", "key", NULL};
++
++    self = PyObject_New( PyPosition, &PyPosition_Type );
++
++    self->value=0;
++    self->origin=mediacontrol_AbsolutePosition;
++    self->key=mediacontrol_MediaTime;
++
++    if(! PyArg_ParseTupleAndKeywords( args, kwds, "|lii", kwlist,
++                                      &(self->value),
++                                      &(self->origin),
++                                      &(self->key) ) )
++    {
++        return NULL;
++    }
++
++    if( self->key != mediacontrol_MediaTime
++    && self->key != mediacontrol_ByteCount
++    && self->key != mediacontrol_SampleCount )
++    {
++        PyErr_SetString ( MediaControl_InternalException, "Invalid key value" );
++        return NULL;
++    }
++
++    if( self->origin != mediacontrol_AbsolutePosition
++    && self->origin != mediacontrol_RelativePosition
++    && self->origin != mediacontrol_ModuloPosition )
++    {
++        PyErr_SetString ( MediaControl_InternalException, "Invalid origin value" );
++        return NULL;
++    }
++
++    Py_INCREF( self );
++    return ( PyObject * )self;
++}
++
++mediacontrol_PositionKey
++positionKey_py_to_c( PyObject * py_key )
++{
++    mediacontrol_PositionKey key_position = mediacontrol_MediaTime;
++    int key;
++
++    if( !PyArg_Parse( py_key, "i", &key ) )
++    {
++        PyErr_SetString ( MediaControl_InternalException, "Invalid key value" );
++        return key_position;
++    }
++
++    switch ( key )
++    {
++    case 0: key = mediacontrol_ByteCount;   break;
++    case 1: key = mediacontrol_SampleCount; break;
++    case 2: key = mediacontrol_MediaTime;   break;
++    }
++    return key_position;
++}
++
++mediacontrol_PositionOrigin
++positionOrigin_py_to_c( PyObject * py_origin )
++{
++    mediacontrol_PositionOrigin  origin_position = mediacontrol_AbsolutePosition;
++    int origin;
++
++    if( !PyArg_Parse( py_origin,"i", &origin ) )
++    {
++        PyErr_SetString( MediaControl_InternalException,
++                         "Invalid origin value" );
++        return origin_position;
++    }
++
++    switch ( origin )
++    {
++    case 0: origin_position = mediacontrol_AbsolutePosition; break;
++    case 1: origin_position = mediacontrol_RelativePosition; break;
++    case 2: origin_position = mediacontrol_ModuloPosition;   break;
++    }
++
++    return origin_position;
++}
++
++/* Methods for transforming the Position Python object to Position structure*/
++mediacontrol_Position*
++position_py_to_c( PyObject * py_position )
++{
++    mediacontrol_Position * a_position = NULL;
++    PyPosition *pos = ( PyPosition* )py_position;
++
++    a_position = ( mediacontrol_Position* )malloc( sizeof( mediacontrol_Position ) );
++    if( !a_position )
++    {
++        PyErr_SetString( PyExc_MemoryError, "Out of memory" );
++        return NULL;
++    }
++
++    if( !py_position )
++    {
++        /* If we give a NULL value, it will be considered as
++           a 0 relative position in mediatime */
++        a_position->origin = mediacontrol_RelativePosition;
++        a_position->key    = mediacontrol_MediaTime;
++        a_position->value  = 0;
++    }
++    else if( PyObject_IsInstance( py_position, ( PyObject* )&PyPosition_Type ) )
++    {
++        a_position->origin = pos->origin;
++        a_position->key    = pos->key;
++        a_position->value  = ntohll(pos->value);
++    }
++    else
++    {
++        /* Feature: if we give an integer, it will be considered as
++           a relative position in mediatime */
++        a_position->origin = mediacontrol_RelativePosition;
++        a_position->key    = mediacontrol_MediaTime;
++        a_position->value  = PyLong_AsLongLong( py_position );
++    }
++    return a_position;
++}
++
++PyPosition*
++position_c_to_py( mediacontrol_Position *position )
++{
++    PyPosition* py_retval;
++
++    py_retval = PyObject_New( PyPosition, &PyPosition_Type );
++    py_retval->origin = position->origin;
++    py_retval->key    = position->key;
++    py_retval->value  = position->value;
++
++    return py_retval;
++}
++
++static PyMethodDef PyPosition_methods[] =
++{
++    { NULL }  /* Sentinel */
++};
++
++static PyMemberDef PyPosition_members[] =
++{
++    { "origin", T_INT, offsetof( PyPosition, origin ), 0, "Position origin" },
++    { "key",    T_INT, offsetof( PyPosition, key ),    0, "Position key" },
++    { "value",  T_ULONG, offsetof( PyPosition, value ), 0, "Position value" },
++    { NULL }  /* Sentinel */
++};
++
++static PyTypeObject PyPosition_Type =
++{
++    PyObject_HEAD_INIT( NULL )
++    0,                         /*ob_size*/
++    "vlc.Position",            /*tp_name*/
++    sizeof( PyPosition_Type ),   /*tp_basicsize*/
++    0,                         /*tp_itemsize*/
++    0,                         /*tp_dealloc*/
++    0,                         /*tp_print*/
++    0,                         /*tp_getattr*/
++    0,                         /*tp_setattr*/
++    0,                         /*tp_compare*/
++    0,                         /*tp_repr*/
++    0,                         /*tp_as_number*/
++    0,                         /*tp_as_sequence*/
++    0,                         /*tp_as_mapping*/
++    0,                         /*tp_hash */
++    0,                         /*tp_call*/
++    0,                         /*tp_str*/
++    0,                         /*tp_getattro*/
++    0,                         /*tp_setattro*/
++    0,                         /*tp_as_buffer*/
++    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
++    "Represent a Position with value, origin and key",  /* tp_doc */
++    0,                        /* tp_traverse */
++    0,                        /* tp_clear */
++    0,                         /* tp_richcompare */
++    0,                         /* tp_weaklistoffset */
++    0,                         /* tp_iter */
++    0,                          /* tp_iternext */
++    PyPosition_methods,             /* tp_methods */
++    PyPosition_members,             /* tp_members */
++    0,                         /* tp_getset */
++    0,                         /* tp_base */
++    0,                         /* tp_dict */
++    0,                         /* tp_descr_get */
++    0,                         /* tp_descr_set */
++    0,                         /* tp_dictoffset */
++    0,                         /* tp_init */
++    0,                         /* tp_alloc */
++    PyPosition_new,            /* tp_new */
++};
+diff -rbNU 3 vlc-1.0.5/bindings/python/vlcwidget.py d10-02-02-tstreamplaylist-p10/bindings/python/vlcwidget.py
+--- vlc-1.0.5/bindings/python/vlcwidget.py     1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/bindings/python/vlcwidget.py 2009-08-25 14:23:17.000000000 +0200
+@@ -0,0 +1,114 @@
++#! /usr/bin/python
++
++"""VLC Widget classes.
++
++This module provides two helper classes, to ease the embedding of a
++VLC component inside a pygtk application.
++
++VLCWidget is a simple VLC widget.
++
++DecoratedVLCWidget provides simple player controls.
++
++$Id$
++"""
++
++import gtk
++import sys
++import vlc
++
++from gettext import gettext as _
++
++class VLCWidget(gtk.DrawingArea):
++    """Simple VLC widget.
++
++    Its player can be controlled through the 'player' attribute, which
++    is a MediaControl instance.
++    """
++    def __init__(self, *p):
++        gtk.DrawingArea.__init__(self)
++        self.player=vlc.MediaControl(*p)
++        def handle_embed(*p):
++            if sys.platform == 'win32':
++                xidattr='handle'
++            else:
++                xidattr='xid'
++            self.player.set_visual(getattr(self.window, xidattr))
++            return True
++        self.connect("map-event", handle_embed)
++        self.set_size_request(320, 200)
++
++
++class DecoratedVLCWidget(gtk.VBox):
++    """Decorated VLC widget.
++
++    VLC widget decorated with a player control toolbar.
++
++    Its player can be controlled through the 'player' attribute, which
++    is a MediaControl instance.
++    """
++    def __init__(self, *p):
++        gtk.VBox.__init__(self)
++        self._vlc_widget=VLCWidget(*p)
++        self.player=self._vlc_widget.player
++        self.pack_start(self._vlc_widget, expand=True)
++        self._toolbar = self.get_player_control_toolbar()
++        self.pack_start(self._toolbar, expand=False)
++
++    def get_player_control_toolbar(self):
++        """Return a player control toolbar
++        """
++        tb=gtk.Toolbar()
++        tb.set_style(gtk.TOOLBAR_ICONS)
++
++        def on_play(b):
++            self.player.start(0)
++            return True
++
++        def on_stop(b):
++            self.player.stop(0)
++            return True
++
++        def on_pause(b):
++            self.player.pause(0)
++            return True
++
++        tb_list = (
++            (_("Play"), _("Play"), gtk.STOCK_MEDIA_PLAY,
++             on_play),
++            (_("Pause"), _("Pause"), gtk.STOCK_MEDIA_PAUSE,
++             on_pause),
++            (_("Stop"), _("Stop"), gtk.STOCK_MEDIA_STOP,
++             on_stop),
++            )
++
++        for text, tooltip, stock, callback in tb_list:
++            b=gtk.ToolButton(stock)
++            b.connect("clicked", callback)
++            tb.insert(b, -1)
++        tb.show_all()
++        return tb
++
++class VideoPlayer:
++    """Example video player.
++    """
++    def __init__(self):
++        self.vlc = DecoratedVLCWidget()
++
++    def main(self, fname):
++        self.vlc.player.set_mrl(fname)
++        self.popup()
++        gtk.main()
++
++    def popup(self):
++        w=gtk.Window()
++        w.add(self.vlc)
++        w.show_all()
++        w.connect("destroy", gtk.main_quit)
++        return w
++
++if __name__ == '__main__':
++    if not sys.argv[1:]:
++       print "You must provide a movie filename"
++       sys.exit(1)
++    p=VideoPlayer()
++    p.main(sys.argv[1])
+diff -rbNU 3 vlc-1.0.5/configure.ac d10-02-02-tstreamplaylist-p10/configure.ac
+--- vlc-1.0.5/configure.ac     2010-01-24 22:26:15.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/configure.ac 2010-02-12 13:22:37.000000000 +0100
+@@ -5766,6 +5766,17 @@
+ AC_LANG_POP(C++)
+ AM_CONDITIONAL(BUILD_MOZILLA,${mozilla})
++dnl
++dnl  Ivaylo's special access module for raw input from Python
++dnl
++AC_ARG_ENABLE(access-raw,
++  [  --enable-access-raw     Enable raw access module (default disabled)])
++if test "${enable_access_raw}" = "yes"
++then
++  VLC_ADD_PLUGIN([access_raw])
++fi
++
++
+ dnl Tests for Osso and Xsp
+ AC_CHECK_LIB(osso, osso_display_blanking_pause, [
+   PKG_CHECK_MODULES(GLIB2, glib-2.0, [
+diff -rbNU 3 vlc-1.0.5/include/vlc/mediacontrol.h d10-02-02-tstreamplaylist-p10/include/vlc/mediacontrol.h
+--- vlc-1.0.5/include/vlc/mediacontrol.h       2009-12-20 18:43:39.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/include/vlc/mediacontrol.h   2010-02-12 13:22:37.000000000 +0100
+@@ -225,6 +225,30 @@
+ VLC_PUBLIC_API char * mediacontrol_get_mrl( mediacontrol_Instance *self,
+                                             mediacontrol_Exception *exception );
++
++
++/**
++ *  Added by Ivaylo:
++ *
++ * Set the size and ID of the content to be played, and the callback functions
++ * for reading and seeking in the content via the raw C access module.
++ *
++ * \param self the mediacontrol instance
++ * \param read_callback the function for VLC to call when it wants to read
++ * \param seek_callback the function for VLC to call when it wants to seek
++ * \param i_fsize size of the content
++ * \param id unique ID of the content
++ * \param exception an initialized exception pointer
++ */
++VLC_PUBLIC_API void mediacontrol_set_raw_callbacks( mediacontrol_Instance *self,
++                                                            raw_read_fnc_ptr read_callback,
++                                                                raw_seek_fnc_ptr seek_callback,
++                                                                int i_fsize,
++                                                                int id,
++                                  mediacontrol_Exception *exception );
++
++
++
+ /*****************************************************************************
+  * A/V functions
+  *****************************************************************************/
+diff -rbNU 3 vlc-1.0.5/include/vlc/mediacontrol_structures.h d10-02-02-tstreamplaylist-p10/include/vlc/mediacontrol_structures.h
+--- vlc-1.0.5/include/vlc/mediacontrol_structures.h    2009-12-20 18:43:39.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/include/vlc/mediacontrol_structures.h        2009-08-25 14:23:17.000000000 +0200
+@@ -135,6 +135,14 @@
+ } mediacontrol_StreamInformation;
++/**
++ * For Ivaylo's mediacontrol_set_raw_callbacks()
++ */
++
++typedef int (* raw_read_fnc_ptr) ( uint8_t *p_data, int i_data, int id );
++typedef       int (* raw_seek_fnc_ptr) ( int64_t i_pos, int id );
++
++
+ # ifdef __cplusplus
+ }
+ # endif
+diff -rbNU 3 vlc-1.0.5/modules/access/Makefile.am d10-02-02-tstreamplaylist-p10/modules/access/Makefile.am
+--- vlc-1.0.5/modules/access/Makefile.am       2010-01-24 22:28:08.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/modules/access/Makefile.am   2009-08-26 10:29:01.000000000 +0200
+@@ -3,7 +3,7 @@
+ basedir = access
+ dir = access
+-mods = access_alsa access_directory access_dv access_eyetv access_fake access_file access_ftp access_gnomevfs access_http access_jack access_mmap access_mtp access_oss access_smb access_tcp access_udp cdda dc1394 dvdnav dvdread pvr qtcapture v4l v4l2
++mods = access_alsa access_directory access_dv access_eyetv access_fake access_file access_ftp access_gnomevfs access_http access_jack access_mmap access_mtp access_oss access_raw access_smb access_tcp access_udp cdda dc1394 dvdnav dvdread pvr qtcapture v4l v4l2
+ libvlc_LTLIBRARIES =  $(LTLIBaccess_alsa) $(LTLIBaccess_dv) $(LTLIBaccess_eyetv) $(LTLIBaccess_gnomevfs) $(LTLIBaccess_jack) $(LTLIBaccess_mmap) $(LTLIBaccess_mtp) $(LTLIBaccess_oss) $(LTLIBaccess_smb) $(LTLIBcdda) $(LTLIBdc1394) $(LTLIBdvdnav) $(LTLIBdvdread) $(LTLIBpvr) $(LTLIBqtcapture) $(LTLIBv4l) $(LTLIBv4l2)
+ EXTRA_LTLIBRARIES =  libaccess_alsa_plugin.la libaccess_dv_plugin.la libaccess_eyetv_plugin.la libaccess_gnomevfs_plugin.la libaccess_jack_plugin.la libaccess_mmap_plugin.la libaccess_mtp_plugin.la libaccess_oss_plugin.la libaccess_smb_plugin.la libcdda_plugin.la libdc1394_plugin.la libdvdnav_plugin.la libdvdread_plugin.la libpvr_plugin.la libqtcapture_plugin.la libv4l_plugin.la libv4l2_plugin.la
+@@ -152,6 +152,17 @@
+ libaccess_oss_plugin_la_LIBADD = $(AM_LIBADD)
+ libaccess_oss_plugin_la_DEPENDENCIES = $(top_srcdir)/src/libvlccore.sym
++# The access_raw plugin
++libaccess_raw_plugin_la_SOURCES = $(SOURCES_access_raw)
++nodist_libaccess_raw_plugin_la_SOURCES = $(nodist_SOURCES_access_raw)
++# Force per-target objects:
++libaccess_raw_plugin_la_CFLAGS = $(AM_CFLAGS)
++libaccess_raw_plugin_la_CXXFLAGS = $(AM_CXXFLAGS)
++libaccess_raw_plugin_la_OBJCFLAGS = $(AM_OBJCFLAGS)
++# Set LIBADD and DEPENDENCIES manually:
++libaccess_raw_plugin_la_LIBADD = $(AM_LIBADD)
++libaccess_raw_plugin_la_DEPENDENCIES = $(top_srcdir)/src/libvlccore.sym
++
+ # The access_smb plugin
+ libaccess_smb_plugin_la_SOURCES = $(SOURCES_access_smb)
+ nodist_libaccess_smb_plugin_la_SOURCES = $(nodist_SOURCES_access_smb)
+diff -rbNU 3 vlc-1.0.5/modules/access/Modules.am d10-02-02-tstreamplaylist-p10/modules/access/Modules.am
+--- vlc-1.0.5/modules/access/Modules.am        2009-12-20 18:43:39.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/modules/access/Modules.am    2010-02-12 13:22:37.000000000 +0100
+@@ -41,6 +41,7 @@
+ SOURCES_access_alsa = alsa.c
+ SOURCES_access_oss = oss.c
+ SOURCES_access_mtp = mtp.c
++SOURCES_access_raw = raw.c
+ libaccess_rtmp_plugin_la_SOURCES = \
+         rtmp/access.c \
+@@ -60,5 +61,6 @@
+       libaccess_http_plugin.la \
+       libaccess_ftp_plugin.la \
+       libaccess_fake_plugin.la \
++      libaccess_raw_plugin.la \
+       libaccess_rtmp_plugin.la \
+       $(NULL)
+diff -rbNU 3 vlc-1.0.5/modules/access/raw.c d10-02-02-tstreamplaylist-p10/modules/access/raw.c
+--- vlc-1.0.5/modules/access/raw.c     1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/modules/access/raw.c 2009-09-11 10:18:54.000000000 +0200
+@@ -0,0 +1,335 @@
++/*****************************************************************************\r
++ * raw.c: Raw input module for reading input directly from a C program in the same\r
++ * address space, for example, and in particular, Python.\r
++ *\r
++ *****************************************************************************\r
++* Copyright (C) 2008-2009  Delft University of Technology. All rights reserved.\r
++ *\r
++ * Author: Ivaylo Haratcherev <I.J.Haratcherev@tudelft.nl>\r
++ *\r
++ * This program is free software; you can redistribute it and/or modify\r
++ * it under the terms of the GNU General Public License as published by\r
++ * the Free Software Foundation; either version 2 of the License, or\r
++ * (at your option) any later version.\r
++ *\r
++ * This program is distributed in the hope that it will be useful,\r
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
++ * GNU General Public License for more details.\r
++ *\r
++ * You should have received a copy of the GNU General Public License\r
++ * along with this program; if not, write to the Free Software\r
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.\r
++ *****************************************************************************/\r
++\r
++/*****************************************************************************\r
++ * Preamble\r
++ *****************************************************************************/\r
++#ifdef HAVE_CONFIG_H\r
++# include "config.h"\r
++#endif\r
++\r
++#include <vlc_common.h>\r
++#include <vlc_plugin.h>\r
++#include <vlc_access.h>\r
++#include <vlc_demux.h>\r
++#include <vlc_image.h>\r
++\r
++\r
++#include <stdlib.h>\r
++#include <string.h>\r
++#if defined( WIN32 )\r
++    #define strtok_r(s,d,p) strtok(s,d)\r
++#endif\r
++\r
++#include <vlc/mediacontrol_structures.h>\r
++\r
++\r
++/*****************************************************************************\r
++ * Module descriptor\r
++ *****************************************************************************/\r
++static int  Open ( vlc_object_t * );\r
++static void Close( vlc_object_t * );\r
++\r
++\r
++#define CACHING_TEXT N_("Caching value in ms")\r
++#define CACHING_LONGTEXT N_( \\r
++    "Caching value for raw streams. This " \\r
++    "value should be set in milliseconds." )\r
++\r
++vlc_module_begin();\r
++    set_description( _("Raw input") );\r
++    set_shortname( _( "Raw" ) );\r
++    set_category( CAT_INPUT );\r
++    set_subcategory( SUBCAT_INPUT_ACCESS );\r
++    add_integer( "raw-caching", DEFAULT_PTS_DELAY / 1000, NULL, CACHING_TEXT, CACHING_LONGTEXT, true )\r
++    //  Ivaylo: What the heck is the second parameter in set_capability?\r
++    set_capability( "access", 10 );\r
++    add_shortcut( "raw" );\r
++    set_callbacks( Open, Close );\r
++vlc_module_end();\r
++\r
++\r
++/*****************************************************************************\r
++ * Exported prototypes\r
++ *****************************************************************************/\r
++static ssize_t Read( access_t *, uint8_t *, size_t );\r
++static int Seek( access_t *, int64_t );\r
++static int Control( access_t *, int, va_list );\r
++\r
++static int process_options( access_t *p_access );\r
++\r
++\r
++/*****************************************************************************\r
++ * Local prototypes\r
++ *****************************************************************************/\r
++struct access_sys_t\r
++{\r
++    int fd;\r
++\r
++    bool b_seekable;\r
++\r
++/* callback pointers */\r
++    raw_read_fnc_ptr    read_callback;\r
++    raw_seek_fnc_ptr    seek_callback;\r
++    int fsize;\r
++};\r
++\r
++\r
++\r
++\r
++/*****************************************************************************\r
++ * Open:\r
++ *****************************************************************************/\r
++static int Open( vlc_object_t *p_this )\r
++{\r
++    access_t     *p_access = (access_t*)p_this;\r
++    access_sys_t *p_sys;\r
++\r
++    /* Set up p_access */\r
++    p_access->pf_read = Read;\r
++    p_access->pf_block = NULL;\r
++    p_access->pf_control = Control;\r
++    p_access->pf_seek = Seek;\r
++    p_access->info.i_update = 0;\r
++    p_access->info.i_size = 0;\r
++    p_access->info.i_pos = 0;\r
++    p_access->info.b_eof = false;\r
++    p_access->info.i_title = 0;\r
++    p_access->info.i_seekpoint = 0;\r
++    p_access->p_sys = p_sys = malloc( sizeof( access_sys_t ) );\r
++    memset( p_sys, 0, sizeof( access_sys_t ) );\r
++    p_sys->fd = -1;\r
++    p_sys->b_seekable = true;\r
++    p_sys->read_callback = NULL;\r
++    p_sys->seek_callback = NULL;\r
++    p_sys->fsize = 0;\r
++    msg_Dbg( p_access, "Open raw before process options. MRL was: %s", p_access->psz_path );\r
++\r
++    if ( process_options(p_access) != VLC_SUCCESS )\r
++    {\r
++        msg_Err( p_access, "Bad MRL, please check the option line "\r
++                          "(MRL was: %s)",\r
++                          p_access->psz_path );\r
++        free( p_sys );\r
++        p_access->p_sys = NULL;\r
++        return VLC_EGENERIC;\r
++    }\r
++    p_access->info.i_size = p_sys->fsize;\r
++    msg_Dbg( p_access, "   read_callback %p, seek_callback %p, fsize %d",\r
++        p_sys->read_callback, p_sys->seek_callback, p_sys->fsize);\r
++\r
++     /* PTS delay */\r
++    var_Create( p_access, "raw-caching", VLC_VAR_INTEGER |VLC_VAR_DOINHERIT );\r
++\r
++    return VLC_SUCCESS;\r
++}\r
++\r
++/*****************************************************************************\r
++ * Close:\r
++ *****************************************************************************/\r
++static void Close( vlc_object_t *p_this )\r
++{\r
++    access_t     *p_access = (access_t*)p_this;\r
++    access_sys_t *p_sys = p_access->p_sys;\r
++\r
++    msg_Dbg( p_access, "Close raw...");\r
++\r
++    free( p_sys );\r
++}\r
++\r
++/*****************************************************************************\r
++ * Read: Read up to i_len bytes from the raw connection and place in\r
++ * p_buffer. Return the actual number of bytes read\r
++ *****************************************************************************/\r
++static ssize_t Read( access_t *p_access, uint8_t *p_buffer, size_t i_len )\r
++{\r
++    access_sys_t *p_sys = p_access->p_sys;\r
++    int i_read;\r
++\r
++/*    msg_Err( p_access, "Read...");*/\r
++\r
++    if( p_access->info.i_size > 0 &&\r
++        i_len + p_access->info.i_pos > p_access->info.i_size )\r
++    {\r
++        if( ( i_len = p_access->info.i_size - p_access->info.i_pos ) == 0 )\r
++        {\r
++            p_access->info.b_eof = true;\r
++            return 0;\r
++        }\r
++    }\r
++\r
++\r
++    if( p_sys->read_callback )\r
++        i_read = p_sys->read_callback(p_buffer, i_len, p_sys->fd);\r
++    else\r
++        i_read = 0;\r
++\r
++    if( i_read > 0 )\r
++        p_access->info.i_pos += i_read;\r
++\r
++    //msg_Dbg( p_access, "Read: i_read= %d", i_read );\r
++    return i_read;\r
++}\r
++\r
++\r
++\r
++/*****************************************************************************\r
++ * Seek: close and re-open a connection at the right place\r
++ *****************************************************************************/\r
++static int Seek( access_t *p_access, int64_t i_pos )\r
++{\r
++    access_sys_t *p_sys = p_access->p_sys;\r
++    msg_Dbg( p_access, "trying to seek to %lld", i_pos );\r
++\r
++    if( p_sys->seek_callback ) {\r
++        if (!p_sys->seek_callback(i_pos, p_sys->fd)) {\r
++            p_access->info.i_pos = i_pos;\r
++            if( p_access->info.i_size < p_access->info.i_pos )\r
++            {\r
++                msg_Err( p_access, "seeking too far" );\r
++                p_access->info.i_pos = p_access->info.i_size;\r
++            }\r
++            else if( p_access->info.i_pos < 0 )\r
++            {\r
++                msg_Err( p_access, "seeking too early" );\r
++                p_access->info.i_pos = 0;\r
++            }\r
++            /* Reset eof */\r
++            p_access->info.b_eof = false;\r
++            return VLC_SUCCESS;\r
++        }\r
++    }\r
++\r
++    return VLC_EGENERIC;\r
++}\r
++\r
++\r
++/*****************************************************************************\r
++ * Control:\r
++ *****************************************************************************/\r
++static int Control( access_t *p_access, int i_query, va_list args )\r
++{\r
++    access_sys_t *p_sys = p_access->p_sys;\r
++    bool   *pb_bool;\r
++    int64_t      *pi_64;\r
++\r
++    msg_Dbg( p_access, "Control...");\r
++    switch( i_query )\r
++    {\r
++        /* */\r
++        case ACCESS_CAN_SEEK:\r
++            pb_bool = (bool *)va_arg( args, bool* );\r
++            *pb_bool = p_sys->b_seekable;\r
++            break;\r
++        case ACCESS_CAN_FASTSEEK:\r
++            pb_bool = (bool*)va_arg( args, bool* );\r
++            *pb_bool = true; // Arno: Hack to get AVI seeking to work\r
++            break;\r
++        case ACCESS_CAN_PAUSE:\r
++        case ACCESS_CAN_CONTROL_PACE:\r
++            pb_bool = (bool*)va_arg( args, bool* );\r
++            *pb_bool = true;\r
++            break;\r
++\r
++         /* */\r
++        case ACCESS_GET_PTS_DELAY:\r
++            pi_64 = (int64_t*)va_arg( args, int64_t * );\r
++            *pi_64 = (int64_t)var_GetInteger( p_access, "raw-caching" ) * 1000;\r
++            break;\r
++\r
++        case ACCESS_SET_PAUSE_STATE:\r
++            /* Nothing to do */\r
++            break;\r
++\r
++        case ACCESS_GET_TITLE_INFO:\r
++        case ACCESS_SET_TITLE:\r
++        case ACCESS_SET_SEEKPOINT:\r
++        case ACCESS_SET_PRIVATE_ID_STATE:\r
++        case ACCESS_GET_META:\r
++        case ACCESS_GET_PRIVATE_ID_STATE:\r
++        case ACCESS_GET_CONTENT_TYPE:\r
++            return VLC_EGENERIC;\r
++\r
++        default:\r
++            msg_Warn( p_access, "unimplemented query %d in control", i_query );\r
++            return VLC_EGENERIC;\r
++\r
++    }\r
++    return VLC_SUCCESS;\r
++}\r
++\r
++\r
++static int process_options( access_t *p_access )\r
++{\r
++    access_sys_t *p_sys = p_access->p_sys;\r
++\r
++    char *psz_dup;\r
++    char *psz_parser;\r
++    char *token = NULL;\r
++    char *state = NULL;\r
++\r
++    if( strncmp(p_access->psz_path,"raw",3) != 0 )\r
++        return VLC_EGENERIC;\r
++\r
++    psz_dup = strdup( p_access->psz_path );\r
++    psz_parser = psz_dup;\r
++    for( token = strtok_r( psz_parser,":",&state); token;\r
++         token = strtok_r( NULL, ":", &state ) )\r
++    {\r
++        if( strncmp( token, "rptr=", strlen("rptr=") ) == 0)\r
++        {\r
++            token += strlen("rptr=");\r
++            sscanf( token, "0x%p", &p_sys->read_callback );\r
++            if (p_sys->read_callback == NULL)\r
++            {\r
++               // snprintf in src/control/mediacontrol_core.c prints 0x0xADDR\r
++               // on crosscompiled Win32\r
++               sscanf( token, "0x0x%p", &p_sys->read_callback );\r
++            }\r
++        }\r
++        else if( strncmp( token, "sptr=", strlen("sptr=") ) == 0)\r
++        {\r
++            token += strlen("sptr=");\r
++            sscanf( token, "0x%p", &p_sys->seek_callback );\r
++            if (p_sys->seek_callback == NULL)\r
++            {\r
++                // snprintf in src/control/mediacontrol_core.c prints 0x0xADDR\r
++                // on crosscompiled Win32\r
++                sscanf( token, "0x0x%p", &p_sys->seek_callback );\r
++            }\r
++        }\r
++        else if( strncmp( token, "fsize=", strlen("fsize=") ) == 0)\r
++        {\r
++            token += strlen("fsize=");\r
++            sscanf( token, "%d", &p_sys->fsize );  // Arno: TODO: shouldn't this be int64 like seek's offset?\r
++        }\r
++        else if( strncmp( token, "id=", strlen("id=") ) == 0)\r
++        {\r
++            token += strlen("id=");\r
++            sscanf( token, "%d", &p_sys->fd );\r
++        }\r
++    }\r
++    if( psz_dup ) free( psz_dup );\r
++    return VLC_SUCCESS;\r
++}\r
+diff -rbNU 3 vlc-1.0.5/modules/demux/ts.c d10-02-02-tstreamplaylist-p10/modules/demux/ts.c
+--- vlc-1.0.5/modules/demux/ts.c       2009-12-20 18:43:39.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/modules/demux/ts.c   2010-02-12 13:22:37.000000000 +0100
+@@ -485,9 +485,10 @@
+     {
+         i_packet_size = TS_PACKET_SIZE_204;
+     }
+-    else if( p_demux->b_force )
++    else if( 1 || p_demux->b_force ) /* Arno: Patch so VLC will continue to interpret stream as MPEGTS */
+     {
+         i_packet_size = TS_PACKET_SIZE_188;
++        msg_Warn( p_demux, "TS module forced (lost sync)" );
+     }
+     else if( b_topfield )
+     {
+diff -rbNU 3 vlc-1.0.5/p2pnext-changeversion.sh d10-02-02-tstreamplaylist-p10/p2pnext-changeversion.sh
+--- vlc-1.0.5/p2pnext-changeversion.sh 1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/p2pnext-changeversion.sh     2010-08-13 15:43:45.000000000 +0200
+@@ -0,0 +1,10 @@
++#!/bin/sh
++# Changes 1.0.8 and 1,0,8 encoded version # to new.
++
++for name in projects/activex/axvlc.dll.manifest projects/activex/axvlc.inf.in projects/activex/axvlc_rc.rc.in projects/mozilla/npvlc.dll.manifest projects/mozilla/npvlc_rc.rc.in ; do
++      sed -e 's/1\.0\.8/1.1.0/g' -e 's/1,0,8/1,1,0/g' $name > /tmp/rename.$$
++      /bin/mv /tmp/rename.$$ $name
++done
++
++echo Also modify version number in *.inf files!
++
+diff -rbNU 3 vlc-1.0.5/p2pnext-win32-setup-from-co.sh d10-02-02-tstreamplaylist-p10/p2pnext-win32-setup-from-co.sh
+--- vlc-1.0.5/p2pnext-win32-setup-from-co.sh   1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/p2pnext-win32-setup-from-co.sh       2009-08-21 15:43:23.000000000 +0200
+@@ -0,0 +1,193 @@
++#!/bin/bash
++
++# Written by Diego Andres Rabaioli, Arno Bakker
++
++# Important:
++# Check that you have installed the following tools:
++# -subversion
++# -mingw32
++# -automake
++# -libtool
++# -gettext
++# -cvs
++# -libgcrypt-dev
++#
++
++CONTRIB_NAME="contribs-0.9.9"
++CONTRIB_EXT=".tar.bz2"
++CONTRIB_URI="http://download.videolan.org/pub/videolan/testing/win32/"
++
++VLC_URI="https://svn.tribler.org/vlc/branches/plugin-1.0"
++VLC_REV="HEAD"  # HEAD = latest, set to specific revision for stability
++VLC_DIR="vlc-plugin-1.0-r$VLC_REV"
++VLC_LNK="_vlc"
++VLC_CONFIG_FLAGS="--host=i586-mingw32msvc --build=i386-linux \
++     --disable-gtk --disable-zvbi \
++     --enable-nls --enable-sdl --with-sdl-config-path=/usr/win32/bin \
++     --enable-faad --enable-flac --enable-theora \
++     --with-wx-config-path=/usr/win32/bin \
++     --with-freetype-config-path=/usr/win32/bin \
++     --with-fribidi-config-path=/usr/win32/bin \
++     --with-libgcrypt-prefix=/usr/win32 \
++     --disable-live555 --disable-caca \
++     --with-xml2-config-path=/usr/win32/bin \
++     --with-dvdnav-config-path=/usr/win32/bin \
++     --disable-cddax --disable-vcdx --enable-goom \
++     --enable-twolame --disable-dvdread \
++     --enable-debug --disable-swscale --disable-mad --disable-a52 --disable-dca \
++     --disable-qt4 --disable-skins2 --disable-atmo --enable-mozilla --with-mozilla-sdk-path=/usr/win32/gecko-sdk"
++
++
++# Build the environment
++build_env ()
++{
++  # VLC Contrib
++  p_head "Checking VLC CONTRIB is installed in /usr/win32"
++  if [ ! -d /usr/win32 ] ;
++  then
++      wget "$CONTRIB_URI$CONTRIB_NAME$CONTRIB_EXT"
++      echo "Please unpack $CONTRIB_NAME$CONTRIB_EXT as root from / to create /usr/win32"
++      exit 0
++  fi
++
++  # VLC
++  p_head "Installing VLC"
++  # Downloading
++  svn checkout $VLC_URI -r $VLC_REV $VLC_DIR
++  ln -s $VLC_DIR $VLC_LNK
++
++  echo
++  echo "The environment is ready. It's possible to compile Vlc now"
++  echo -n "Do you want to compile Vlc now? [y/n] "
++  read choise
++  if [ "$choise" = "y" ]; then
++    build_vlc
++  fi
++}
++
++# Clean the environment
++clean_env ()
++{
++  echo "This operation will delete the following files and directories:"
++  echo
++  echo "$CONTRIB_NAME"
++  echo "$CONTRIB_NAME$CONTRIB_EXT"
++  echo "$VLC_DIR"
++  echo "$VLC_LNK"
++  echo
++
++  echo -n "Are you sure you want to continue? [y/n] "
++  read choise
++  if [ "$choise" = "y" ]; then
++    p_head "Cleaning Environment"
++    rm -rf "$CONTRIB_NAME" "$CONTRIB_NAME$CONTRIB_EXT" "$VLC_DIR" "$VLC_LNK"
++  fi
++}
++
++# Build Vlc
++build_vlc ()
++{
++  # Bootstrap
++  p_head "Bootstrapping Vlc "
++  export PKG_CONFIG_PATH=/usr/win32/lib/pkgconfig
++  export CPPFLAGS="-I/usr/win32/include -I/usr/win32/include/ebml"
++  export LDFLAGS="-L/usr/win32/lib"
++  export CC=i586-mingw32msvc-gcc
++  export CXX=i586-mingw32msvc-g++
++  export CXXCPP=i586-mingw32msvc-cpp
++  cd $VLC_LNK
++  ./bootstrap
++  if [ $? -ne 0 ]; then
++    p_error "Error in bootstrapping Vlc."
++    exit 1
++  fi
++
++  # Configure
++  p_head "Configuring Vlc"
++  ./configure $VLC_CONFIG_FLAGS
++  if [ $? -ne 0 ]; then
++    p_error "Error in configuring Vlc."
++    exit 1
++  fi
++
++  # Make
++  p_head "Making Vlc"
++  make
++  if [ $? -ne 0 ]; then
++    p_error "Error in making Vlc."
++    exit 1
++  fi
++  cd ..
++
++}
++
++# Clean Vlc
++clean_vlc ()
++{
++  echo
++  echo "This operation will clean your Vlc source tree"
++  echo -n "Are you sure you want to continue? [y/n] "
++  read choise
++  if [ "$choise" = "y" ]; then
++    p_head "Cleaning Vlc"
++    cd _vlc
++    make clean
++    make distclean
++    cd ..
++  fi
++
++}
++
++# Print Header Utlity
++p_head ()
++{
++  echo
++  echo -en '\E[37;44m' "$1"; tput sgr0
++  echo
++  echo
++}
++
++# Print Header Utlity
++p_error ()
++{
++  echo
++  echo -en '\E[37;41m' "$1"; tput sgr0
++  echo
++  echo
++}
++
++
++# MAIN
++echo
++select selection in "build environment" "clean environment" "build vlc" "clean vlc"
++do
++  echo
++  case "$selection" in
++    "build environment" )
++    build_env
++    ;;
++
++    "clean environment" )
++    clean_env
++    ;;
++
++    "build vlc" )
++    build_vlc
++    ;;
++
++    "clean vlc" )
++    clean_vlc
++    ;;
++
++    * )
++      echo "inconsistent selection"
++    ;;
++  esac
++  break
++done
++
++p_head "Done."
++
++exit 0
++
++
+diff -rbNU 3 vlc-1.0.5/projects/activex/axvlc.dll.manifest d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.dll.manifest
+--- vlc-1.0.5/projects/activex/axvlc.dll.manifest      2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.dll.manifest  2010-08-13 15:43:48.000000000 +0200
+@@ -1,10 +1,10 @@
+ <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+       <assemblyIdentity
+-              version="1.0.0.0"
++              version="1.1.0.0"
+               processorArchitecture="x86"
+               name="axvlc.dll"
+               type="win32"
+       />
+-      <description>VLC ActiveX plugin</description>
++      <description>SwarmPlugin P2P Multimedia ActiveX plugin</description>
+ </assembly>
+diff -rbNU 3 vlc-1.0.5/projects/activex/axvlc.idl d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.idl
+--- vlc-1.0.5/projects/activex/axvlc.idl       2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.idl   2010-02-15 14:05:24.000000000 +0100
+@@ -216,8 +216,8 @@
\r
+     [\r
+       odl,\r
+-      uuid(49E0DBD1-9440-466C-9C97-95C67190C603),\r
+-      helpstring("VLC Input APIs"),\r
++        uuid(8C8EF0C0-1A2E-11DF-A9E1-0002A5D5C51B), // P2P-Next, another new UUID, 2010-02-15 \r
++      helpstring("P2P-Next extended VLC Input APIs"),\r
+       dual,\r
+       oleautomation\r
+     ]\r
+@@ -249,6 +249,14 @@
\r
+         [propget, helpstring("Returns whether current playback displays video.")]\r
+         HRESULT hasVout([out, retval] VARIANT_BOOL* hasVout);\r
++\r
++              // P2P-Next\r
++        [propget, helpstring("Returns last NSSA INFO message")]\r
++        HRESULT p2pstatus([out, retval] BSTR* p2pstatus);\r
++\r
++              // P2P-Next, 2010-02-15\r
++        [helpstring("Add a playlist item.")]\r
++        HRESULT set_p2ptarget([in] BSTR uri, [out, retval] long* itemId);\r
+     };\r
\r
+     [\r
+@@ -530,7 +538,7 @@
+     };\r
\r
+     [\r
+-      uuid(E23FE9C6-778E-49D4-B537-38FCDE4887D8),\r
++      uuid(045E7BEE-6F8F-44cf-AFF1-52710C6209FE),\r
+       helpstring("VLC control (deprecated)"),\r
+       control\r
+     ]\r
+@@ -542,8 +550,9 @@
+     };\r
\r
+     [\r
+-      uuid(9BE31822-FDAD-461B-AD51-BE1D1C159921),\r
+-      helpstring("VLC control"),\r
++      uuid(1800B8AF-4E33-43C0-AFC7-894433C13538),\r
++\r
++      helpstring("SwarmPlugin control"),\r
+       control\r
+     ]\r
+     coclass VLCPlugin2\r
+diff -rbNU 3 vlc-1.0.5/projects/activex/axvlc_idl.c d10-02-02-tstreamplaylist-p10/projects/activex/axvlc_idl.c
+--- vlc-1.0.5/projects/activex/axvlc_idl.c     2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/axvlc_idl.c 2010-02-15 14:05:24.000000000 +0100
+@@ -6,7 +6,7 @@
\r
\r
+  /* File created by MIDL compiler version 7.00.0500 */\r
+-/* at Fri Apr 25 11:37:37 2008\r
++/* at Mon Feb 15 13:55:55 2010\r
+  */\r
+ /* Compiler settings for axvlc.idl:\r
+     Oicf, W1, Zp8, env=Win32 (32b run)\r
+@@ -76,7 +76,7 @@
+ MIDL_DEFINE_GUID(IID, IID_IVLCAudio,0x9E0BD17B,0x2D3C,0x4656,0xB9,0x4D,0x03,0x08,0x4F,0x3F,0xD9,0xD4);\r
\r
\r
+-MIDL_DEFINE_GUID(IID, IID_IVLCInput,0x49E0DBD1,0x9440,0x466C,0x9C,0x97,0x95,0xC6,0x71,0x90,0xC6,0x03);\r
++MIDL_DEFINE_GUID(IID, IID_IVLCInput,0x8C8EF0C0,0x1A2E,0x11DF,0xA9,0xE1,0x00,0x02,0xA5,0xD5,0xC5,0x1B);\r
\r
\r
+ MIDL_DEFINE_GUID(IID, IID_IVLCLog,0x8E3BC3D9,0x62E9,0x48FB,0x8A,0x6D,0x99,0x3F,0x9A,0xBC,0x4A,0x0A);\r
+@@ -106,10 +106,10 @@
+ MIDL_DEFINE_GUID(IID, IID_IVLCPlaylistItems,0xFD37FE32,0x82BC,0x4A25,0xB0,0x56,0x31,0x5F,0x4D,0xBB,0x19,0x4D);\r
\r
\r
+-MIDL_DEFINE_GUID(CLSID, CLSID_VLCPlugin,0xE23FE9C6,0x778E,0x49D4,0xB5,0x37,0x38,0xFC,0xDE,0x48,0x87,0xD8);\r
++MIDL_DEFINE_GUID(CLSID, CLSID_VLCPlugin,0x045E7BEE,0x6F8F,0x44cf,0xAF,0xF1,0x52,0x71,0x0C,0x62,0x09,0xFE);\r
\r
\r
+-MIDL_DEFINE_GUID(CLSID, CLSID_VLCPlugin2,0x9BE31822,0xFDAD,0x461B,0xAD,0x51,0xBE,0x1D,0x1C,0x15,0x99,0x21);\r
++MIDL_DEFINE_GUID(CLSID, CLSID_VLCPlugin2,0x1800B8AF,0x4E33,0x43C0,0xAF,0xC7,0x89,0x44,0x33,0xC1,0x35,0x38);\r
\r
+ #undef MIDL_DEFINE_GUID\r
\r
+diff -rbNU 3 vlc-1.0.5/projects/activex/axvlc_idl.h d10-02-02-tstreamplaylist-p10/projects/activex/axvlc_idl.h
+--- vlc-1.0.5/projects/activex/axvlc_idl.h     2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/axvlc_idl.h 2010-02-15 14:05:24.000000000 +0100
+@@ -4,7 +4,7 @@
\r
\r
+  /* File created by MIDL compiler version 7.00.0500 */\r
+-/* at Fri Apr 25 11:37:37 2008\r
++/* at Mon Feb 15 13:55:55 2010\r
+  */\r
+ /* Compiler settings for axvlc.idl:\r
+     Oicf, W1, Zp8, env=Win32 (32b run)\r
+@@ -852,7 +852,7 @@
\r
+ #if defined(__cplusplus) && !defined(CINTERFACE)\r
+     \r
+-    MIDL_INTERFACE("49E0DBD1-9440-466C-9C97-95C67190C603")\r
++    MIDL_INTERFACE("8C8EF0C0-1A2E-11DF-A9E1-0002A5D5C51B")\r
+     IVLCInput : public IDispatch\r
+     {\r
+     public:\r
+@@ -886,6 +886,13 @@
+         virtual /* [helpstring][propget] */ HRESULT STDMETHODCALLTYPE get_hasVout( \r
+             /* [retval][out] */ VARIANT_BOOL *hasVout) = 0;\r
+         \r
++        virtual /* [helpstring][propget] */ HRESULT STDMETHODCALLTYPE get_p2pstatus( \r
++            /* [retval][out] */ BSTR *p2pstatus) = 0;\r
++        \r
++        virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE set_p2ptarget( \r
++            /* [in] */ BSTR uri,\r
++            /* [retval][out] */ long *itemId) = 0;\r
++        \r
+     };\r
+     \r
+ #else         /* C style interface */\r
+@@ -975,6 +982,15 @@
+             IVLCInput * This,\r
+             /* [retval][out] */ VARIANT_BOOL *hasVout);\r
+         \r
++        /* [helpstring][propget] */ HRESULT ( STDMETHODCALLTYPE *get_p2pstatus )( \r
++            IVLCInput * This,\r
++            /* [retval][out] */ BSTR *p2pstatus);\r
++        \r
++        /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *set_p2ptarget )( \r
++            IVLCInput * This,\r
++            /* [in] */ BSTR uri,\r
++            /* [retval][out] */ long *itemId);\r
++        \r
+         END_INTERFACE\r
+     } IVLCInputVtbl;\r
\r
+@@ -1041,6 +1057,12 @@
+ #define IVLCInput_get_hasVout(This,hasVout)   \\r
+     ( (This)->lpVtbl -> get_hasVout(This,hasVout) ) \r
\r
++#define IVLCInput_get_p2pstatus(This,p2pstatus)       \\r
++    ( (This)->lpVtbl -> get_p2pstatus(This,p2pstatus) ) \r
++\r
++#define IVLCInput_set_p2ptarget(This,uri,itemId)      \\r
++    ( (This)->lpVtbl -> set_p2ptarget(This,uri,itemId) ) \r
++\r
+ #endif /* COBJMACROS */\r
\r
\r
+@@ -2171,12 +2193,6 @@
+         virtual /* [helpstring][propput][id] */ HRESULT STDMETHODCALLTYPE put_MRL( \r
+             /* [in] */ BSTR mrl) = 0;\r
+         \r
+-        virtual /* [helpstring][propget][id] */ HRESULT STDMETHODCALLTYPE get_Toolbar( \r
+-            /* [retval][out] */ VARIANT_BOOL *visible) = 0;\r
+-        \r
+-        virtual /* [helpstring][propput][id] */ HRESULT STDMETHODCALLTYPE put_Toolbar( \r
+-            /* [in] */ VARIANT_BOOL visible) = 0;\r
+-        \r
+         virtual /* [helpstring][propget] */ HRESULT STDMETHODCALLTYPE get_VersionInfo( \r
+             /* [retval][out] */ BSTR *version) = 0;\r
+         \r
+@@ -2198,6 +2214,12 @@
+         virtual /* [helpstring][propput][id] */ HRESULT STDMETHODCALLTYPE put_BackColor( \r
+             /* [in] */ OLE_COLOR backcolor) = 0;\r
+         \r
++        virtual /* [helpstring][propget][id] */ HRESULT STDMETHODCALLTYPE get_Toolbar( \r
++            /* [retval][out] */ VARIANT_BOOL *visible) = 0;\r
++        \r
++        virtual /* [helpstring][propput][id] */ HRESULT STDMETHODCALLTYPE put_Toolbar( \r
++            /* [in] */ VARIANT_BOOL visible) = 0;\r
++        \r
+         virtual /* [helpstring][propget] */ HRESULT STDMETHODCALLTYPE get_audio( \r
+             /* [retval][out] */ IVLCAudio **obj) = 0;\r
+         \r
+@@ -2302,14 +2324,6 @@
+             IVLCControl2 * This,\r
+             /* [in] */ BSTR mrl);\r
+         \r
+-        /* [helpstring][propget][id] */ HRESULT ( STDMETHODCALLTYPE *get_Toolbar )( \r
+-            IVLCControl2 * This,\r
+-            /* [retval][out] */ VARIANT_BOOL *visible);\r
+-        \r
+-        /* [helpstring][propput][id] */ HRESULT ( STDMETHODCALLTYPE *put_Toolbar )( \r
+-            IVLCControl2 * This,\r
+-            /* [in] */ VARIANT_BOOL visible);\r
+-        \r
+         /* [helpstring][propget] */ HRESULT ( STDMETHODCALLTYPE *get_VersionInfo )( \r
+             IVLCControl2 * This,\r
+             /* [retval][out] */ BSTR *version);\r
+@@ -2338,6 +2352,14 @@
+             IVLCControl2 * This,\r
+             /* [in] */ OLE_COLOR backcolor);\r
+         \r
++        /* [helpstring][propget][id] */ HRESULT ( STDMETHODCALLTYPE *get_Toolbar )( \r
++            IVLCControl2 * This,\r
++            /* [retval][out] */ VARIANT_BOOL *visible);\r
++        \r
++        /* [helpstring][propput][id] */ HRESULT ( STDMETHODCALLTYPE *put_Toolbar )( \r
++            IVLCControl2 * This,\r
++            /* [in] */ VARIANT_BOOL visible);\r
++        \r
+         /* [helpstring][propget] */ HRESULT ( STDMETHODCALLTYPE *get_audio )( \r
+             IVLCControl2 * This,\r
+             /* [retval][out] */ IVLCAudio **obj);\r
+@@ -2424,12 +2446,6 @@
+ #define IVLCControl2_put_MRL(This,mrl)        \\r
+     ( (This)->lpVtbl -> put_MRL(This,mrl) ) \r
\r
+-#define IVLCControl2_get_Toolbar(This,visible)        \\r
+-    ( (This)->lpVtbl -> get_Toolbar(This,visible) ) \r
+-\r
+-#define IVLCControl2_put_Toolbar(This,visible)        \\r
+-    ( (This)->lpVtbl -> put_Toolbar(This,visible) ) \r
+-\r
+ #define IVLCControl2_get_VersionInfo(This,version)    \\r
+     ( (This)->lpVtbl -> get_VersionInfo(This,version) ) \r
\r
+@@ -2451,6 +2467,12 @@
+ #define IVLCControl2_put_BackColor(This,backcolor)    \\r
+     ( (This)->lpVtbl -> put_BackColor(This,backcolor) ) \r
\r
++#define IVLCControl2_get_Toolbar(This,visible)        \\r
++    ( (This)->lpVtbl -> get_Toolbar(This,visible) ) \r
++\r
++#define IVLCControl2_put_Toolbar(This,visible)        \\r
++    ( (This)->lpVtbl -> put_Toolbar(This,visible) ) \r
++\r
+ #define IVLCControl2_get_audio(This,obj)      \\r
+     ( (This)->lpVtbl -> get_audio(This,obj) ) \r
\r
+@@ -2727,7 +2749,7 @@
\r
+ #ifdef __cplusplus\r
\r
+-class DECLSPEC_UUID("E23FE9C6-778E-49D4-B537-38FCDE4887D8")\r
++class DECLSPEC_UUID("045E7BEE-6F8F-44cf-AFF1-52710C6209FE")\r
+ VLCPlugin;\r
+ #endif\r
\r
+@@ -2735,7 +2757,7 @@
\r
+ #ifdef __cplusplus\r
\r
+-class DECLSPEC_UUID("9BE31822-FDAD-461B-AD51-BE1D1C159921")\r
++class DECLSPEC_UUID("1800B8AF-4E33-43C0-AFC7-894433C13538")\r
+ VLCPlugin2;\r
+ #endif\r
+ #endif /* __AXVLC_LIBRARY_DEFINED__ */\r
+diff -rbNU 3 vlc-1.0.5/projects/activex/axvlc.inf.in d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.inf.in
+--- vlc-1.0.5/projects/activex/axvlc.inf.in    2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.inf.in        2010-08-13 15:43:48.000000000 +0200
+@@ -1,4 +1,6 @@
+ ; Version number and signature of INF file.\r
++; Written by Diego Andres Rabaioli\r
++; see LICENSE.txt for license information\r
+ ;\r
+ [version]\r
+   signature="$CHICAGO$"\r
+@@ -6,18 +8,17 @@
\r
+ [Add.Code]\r
+   axvlc.dll=axvlc.dll\r
+-  vlc-@VERSION@-win32.exe=vlc-@VERSION@-win32.exe\r
++  SwarmPlugin_1.1.0.exe=SwarmPlugin_1.1.0.exe\r
\r
+ [axvlc.dll]\r
+-  FileVersion=@VERSION_MAJOR@,@VERSION_MINOR@,@VERSION_REVISION@,0\r
+-  clsid={9BE31822-FDAD-461B-AD51-BE1D1C159921}\r
++  FileVersion=1,1,0,0\r
++  clsid={1800B8AF-4E33-43C0-AFC7-894433C13538}\r
+   RegisterServer=no\r
+   Hook=runinstaller\r
\r
+-[vlc-@VERSION@-win32.exe]\r
+-  FileVersion=@VERSION_MAJOR@,@VERSION_MINOR@,@VERSION_REVISION@,0\r
+-  file-win32-x86=http://downloads.videolan.org/pub/videolan/vlc/@VERSION@/win32/vlc-@VERSION@-win32.exe\r
++[SwarmPlugin_1.1.0.exe]\r
++  FileVersion=1,1,0,0\r
++  file-win32-x86=http://trial.p2p-next.org/Beta/SwarmPlugin_1.1.0.exe\r
\r
+ [runinstaller]\r
+-  run=%EXTRACT_DIR%\vlc-@VERSION@-win32.exe\r
+-\r
++  run=%EXTRACT_DIR%\SwarmPlugin_1.1.0.exe\r
+diff -rbNU 3 vlc-1.0.5/projects/activex/axvlc_rc.rc.in d10-02-02-tstreamplaylist-p10/projects/activex/axvlc_rc.rc.in
+--- vlc-1.0.5/projects/activex/axvlc_rc.rc.in  2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/axvlc_rc.rc.in      2010-08-13 15:43:48.000000000 +0200
+@@ -1,4 +1,4 @@
+-#define VERSION_NUMBER @VERSION_MAJOR@,@VERSION_MINOR@,@VERSION_REVISION@,@VERSION_EXTRA_RC@
++#define VERSION_NUMBER 1,1,0,0
+ 1 BITMAP "vlc16x16.bmp"
+ 1 VERSIONINFO
+@@ -11,10 +11,10 @@
+   BEGIN
+     BLOCK "040904E4"
+     BEGIN
+-      VALUE "CompanyName", "the VideoLAN Team"
+-      VALUE "FileVersion", "@VERSION@"
+-      VALUE "FileDescription", "VLC media player (Activex Plugin)"
+-      VALUE "LegalCopyright", "(c) @COPYRIGHT_YEARS@ the VideoLAN Team"
++      VALUE "CompanyName", "the VideoLAN Team and P2P-Next project"
++      VALUE "FileVersion", "1.1.0"
++      VALUE "FileDescription", "SwarmPlugin Version 1.1.0, Copyright (c) 2009-2010, the VideoLAN Team and Delft University of Technology<br><a href=""http://www.pds.ewi.tudelft.nl/code.html"">http://www.pds.ewi.tudelft.nl/code.html</a>"
++      VALUE "LegalCopyright", "Copyright \251 2009-2010 the VideoLAN Team and Delft University of Technology"
+       VALUE "OLESelfRegister", "\0"
+     END
+   END
+Binary files vlc-1.0.5/projects/activex/axvlc.tlb and d10-02-02-tstreamplaylist-p10/projects/activex/axvlc.tlb differ
+diff -rbNU 3 vlc-1.0.5/projects/activex/main.cpp d10-02-02-tstreamplaylist-p10/projects/activex/main.cpp
+--- vlc-1.0.5/projects/activex/main.cpp        2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/main.cpp    2009-06-17 13:59:44.000000000 +0200
+@@ -34,9 +34,9 @@
+ using namespace std;
+-#define COMPANY_STR "VideoLAN"
+-#define PROGRAM_STR "VLCPlugin"
+-#define DESCRIPTION "VideoLAN VLC ActiveX Plugin"
++#define COMPANY_STR "P2PNext"
++#define PROGRAM_STR "SwarmPlugin"
++#define DESCRIPTION "SwarmPlayer ActiveX Plugin"
+ #define THREADING_MODEL "Apartment"
+ #define MISC_STATUS     "131473"
+diff -rbNU 3 vlc-1.0.5/projects/activex/Makefile.am d10-02-02-tstreamplaylist-p10/projects/activex/Makefile.am
+--- vlc-1.0.5/projects/activex/Makefile.am     2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/Makefile.am 2009-08-19 13:16:24.000000000 +0200
+@@ -42,6 +42,8 @@
+     vlccontrol2.h \
+     plugin.cpp \
+     plugin.h \
++    p2pcontrol.cpp \
++    p2pcontrol.h \
+     axvlc_idl.c \
+     axvlc_idl.h \
+     guiddef.h \
+@@ -75,7 +77,7 @@
+ axvlc_la_CXXFLAGS = `$(VLC_CONFIG) --cxxflags activex`
+ axvlc_la_DEPENDENCIES = axvlc.def $(DATA_axvlc_rc) $(LIBRARIES_libvlc)
+ axvlc_la_LDFLAGS = -Wl,$(srcdir)/axvlc.def -Wl,$(DATA_axvlc_rc) \
+-    -no-undefined -avoid-version -module \
++    -no-undefined -avoid-version -module -lws2_32 \
+     `$(VLC_CONFIG) --ldflags activex libvlc`
+ axvlc_la_LIBADD = $(LIBRARIES_libvlc) \
+       `$(VLC_CONFIG) -libs activex`
+diff -rbNU 3 vlc-1.0.5/projects/activex/Makefile.in d10-02-02-tstreamplaylist-p10/projects/activex/Makefile.in
+--- vlc-1.0.5/projects/activex/Makefile.in     2010-01-24 22:28:52.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/Makefile.in 2010-02-12 13:22:37.000000000 +0100
+@@ -94,7 +94,7 @@
+       objectsafety.h dataobject.cpp dataobject.h viewobject.cpp \
+       viewobject.h supporterrorinfo.cpp supporterrorinfo.h \
+       vlccontrol.cpp vlccontrol.h vlccontrol2.cpp vlccontrol2.h \
+-      plugin.cpp plugin.h axvlc_idl.c axvlc_idl.h guiddef.h
++      plugin.cpp plugin.h p2pcontrol.cpp p2pcontrol.h axvlc_idl.c axvlc_idl.h guiddef.h
+ am__objects_1 = axvlc_la-main.lo axvlc_la-utils.lo \
+       axvlc_la-olecontrol.lo axvlc_la-oleinplaceactiveobject.lo \
+       axvlc_la-oleinplaceobject.lo axvlc_la-oleobject.lo \
+@@ -103,7 +103,7 @@
+       axvlc_la-connectioncontainer.lo axvlc_la-objectsafety.lo \
+       axvlc_la-dataobject.lo axvlc_la-viewobject.lo \
+       axvlc_la-supporterrorinfo.lo axvlc_la-vlccontrol.lo \
+-      axvlc_la-vlccontrol2.lo axvlc_la-plugin.lo \
++      axvlc_la-vlccontrol2.lo axvlc_la-plugin.lo axvlc_la-p2pcontrol.lo \
+       axvlc_la-axvlc_idl.lo
+ @BUILD_ACTIVEX_TRUE@am_axvlc_la_OBJECTS = $(am__objects_1)
+ axvlc_la_OBJECTS = $(am_axvlc_la_OBJECTS)
+@@ -738,6 +738,8 @@
+     vlccontrol2.h \
+     plugin.cpp \
+     plugin.h \
++    plugin.cpp \
++    plugin.h \
+     axvlc_idl.c \
+     axvlc_idl.h \
+     guiddef.h \
+@@ -872,6 +874,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-persiststorage.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-persiststreaminit.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-plugin.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-p2pcontrol.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-provideclassinfo.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-supporterrorinfo.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/axvlc_la-utils.Plo@am__quote@
+@@ -1054,6 +1057,15 @@
+ @AMDEP_TRUE@@am__fastdepCXX_FALSE@    DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ @am__fastdepCXX_FALSE@        $(LIBTOOL)  --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(axvlc_la_CXXFLAGS) $(CXXFLAGS) -c -o axvlc_la-plugin.lo `test -f 'plugin.cpp' || echo '$(srcdir)/'`plugin.cpp
++
++axvlc_la-p2pcontrol.lo: p2pcontrol.cpp
++@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(axvlc_la_CXXFLAGS) $(CXXFLAGS) -MT axvlc_la-p2pcontrol.lo -MD -MP -MF $(DEPDIR)/axvlc_la-p2pcontrol.Tpo -c -o axvlc_la-p2pcontrol.lo `test -f 'p2pcontrol.cpp' || echo '$(srcdir)/'`p2pcontrol.cpp
++@am__fastdepCXX_TRUE@ mv -f $(DEPDIR)/axvlc_la-p2pcontrol.Tpo $(DEPDIR)/axvlc_la-p2pcontrol.Plo
++@AMDEP_TRUE@@am__fastdepCXX_FALSE@    source='p2pcontrol.cpp' object='axvlc_la-p2pcontrol.lo' libtool=yes @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCXX_FALSE@    DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCXX_FALSE@        $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(axvlc_la_CXXFLAGS) $(CXXFLAGS) -c -o axvlc_la-p2pcontrol.lo `test -f 'p2pcontrol.cpp' || echo '$(srcdir)/'`p2pcontrol.cpp
++
++
+ mostlyclean-libtool:
+       -rm -f *.lo
+diff -rbNU 3 vlc-1.0.5/projects/activex/p2pcontrol.cpp d10-02-02-tstreamplaylist-p10/projects/activex/p2pcontrol.cpp
+--- vlc-1.0.5/projects/activex/p2pcontrol.cpp  1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/p2pcontrol.cpp      2010-05-27 15:57:07.000000000 +0200
+@@ -0,0 +1,509 @@
++/* 
++ * Written by Diego Andres Rabaioli
++ * see P2P-Next-LICENSE.txt for license information
++ */
++
++#include "p2pcontrol.h"
++#include <string>
++
++#define P_DEBUG
++
++#ifdef P_DEBUG
++#include <stdio.h>
++#include <stdarg.h>
++#endif
++
++////////
++// TODO : Diego : Just for debug... remove it or move to vlc log system
++////////
++void writeOnLog( const char * msg, ... )
++{
++#ifdef P_DEBUG
++  static std::string logPath = "";
++
++  if( logPath.empty() )
++  {
++    char   tmp[256];
++    LONG   result;
++    HKEY   hKey;
++    WCHAR  Logpath[256]; // TODO : Fix this
++    DWORD  bufLen = sizeof( Logpath );
++
++    // Look in the Windows registry for installation path
++    result = RegOpenKeyEx( HKEY_LOCAL_MACHINE, PLUGIN_REG_KEY, 0, KEY_QUERY_VALUE, &hKey );
++    if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return; }
++    result = RegQueryValueEx( hKey, LOG_PATH_ELEMENT, NULL, NULL, (LPBYTE)Logpath, &bufLen);
++    if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return; }
++    RegCloseKey( hKey );
++    wcstombs( tmp, Logpath, 256 );
++    logPath.assign( tmp );
++    logPath.append( "\\" );
++    logPath.append( LOG_FILE_NAME );
++  }
++
++  if( ! logPath.empty() )
++  {
++    FILE * file;
++    file = fopen( logPath.c_str(), "a" );
++    if( file == NULL ) return;
++
++    va_list args;
++    va_start( args, msg );
++    vfprintf( file, msg, args );
++    va_end( args );
++    fputs( "\r\n", file );
++    fclose( file );
++  }
++#endif
++};
++
++// Thread parameters
++typedef struct {
++  BGPConnection  * connection;
++  event_cb_map_t * eventMap;
++  HANDLE *         syncEvent;
++  HANDLE *         syncMutex;
++} ThreadParams;
++
++
++///////////////////////////
++// BGPConnection
++///////////////////////////
++BGPConnection::BGPConnection( const int port, const std::string bgAddress )
++{
++  mBGAddress   = bgAddress;
++  mPort        = port;
++  mSocketState = S_DOWN;
++}
++
++///////////////////////////
++BGPConnection::~BGPConnection()
++{
++  disconnect();
++}
++
++///////////////////////////
++bool BGPConnection::connect()
++{
++  WSADATA            wsaData;
++  struct sockaddr_in serverAddress;
++  int                iResult;
++
++  // Just to be in a consistent state
++  if( mSocketState == S_UP )
++    return true;
++  else if( mSocketState != S_DOWN )
++    disconnect();
++
++  // Init Winsock
++  iResult = WSAStartup( MAKEWORD( 2, 2 ), &wsaData );
++  if( iResult != 0 )
++    return disconnect();
++
++  mSocketState = S_STARTED;
++
++  // Create socket
++  if( ( mServerSocket = socket( AF_INET, SOCK_STREAM, IPPROTO_TCP ) ) == INVALID_SOCKET )
++    return disconnect();
++
++  mSocketState = S_CREATED;
++
++  memset( &serverAddress, 0, sizeof( serverAddress ) );
++  serverAddress.sin_family      = AF_INET;
++  serverAddress.sin_port        = htons( mPort );
++  serverAddress.sin_addr.s_addr = inet_addr( mBGAddress.c_str() );
++
++  // Connect to the BG process server
++  if( ( ::connect( mServerSocket, (struct sockaddr *)&serverAddress,
++                 sizeof( serverAddress ) ) ) == SOCKET_ERROR )
++  {
++    writeOnLog( "BGPConnection: Could not connect to server" );
++    writeOnLog( "%i Socket Server : %i Server Addres : %i", WSAGetLastError(), mServerSocket, serverAddress.sin_addr.s_addr );
++    return disconnect();
++  }
++
++  writeOnLog( "BGPConnection: CONNECTED" );
++
++  mSocketState = S_UP;
++  return true;
++
++}
++
++///////////////////////////
++bool BGPConnection::disconnect()
++{
++// Shut down the connection depending on the state of the socket
++// It basically returns always false (stupid code optimization reason)
++  #ifdef P_DEBUG_2
++  writeOnLog( "BGPConnection: Shutting down connection from status %d", mSocketState );
++  #endif
++
++  if( mSocketState == S_DOWN )
++    return false;
++
++  if( mSocketState >= S_CREATED )
++  {
++    closesocket( mServerSocket );
++    mServerSocket = INVALID_SOCKET;
++  }
++
++  WSACleanup();
++  mSocketState = S_DOWN;
++  return false;
++}
++
++///////////////////////////
++bool BGPConnection::sendMsg( const std::string & message ) const
++{
++  int res;
++
++  // Check socket status
++  if( mSocketState != S_UP )
++    return false;
++
++  // Complete message
++  std::string msg = message + "\r\n";
++  // Send Request
++  res = send( mServerSocket, msg.c_str(), msg.length(), 0 );
++  writeOnLog( "BGPConnection: Sending: %s", message.c_str() );
++
++  if( res == SOCKET_ERROR )
++  {
++    writeOnLog( "BGPConnection: Error in sending message" );
++    return false;
++  }
++
++  return true;
++}
++
++///////////////////////////
++bool BGPConnection::recvMsg( std::string & message ) const
++{
++  char inBuffer[ IN_BUF_LEN ];
++  int  res;
++
++  if( mSocketState != S_UP )
++    return false;
++
++  message = "";
++  while( true )
++  {
++    res = recv( mServerSocket, inBuffer, IN_BUF_LEN, 0 );
++
++    if( res <= 0 )
++    {
++      writeOnLog( "BGPConnection: Error in receiving" );
++      message = "SHUTDOWN";
++      return false;
++    }
++
++    message.append( inBuffer, res );
++    if( ! message.compare( message.size() - 2, 2, "\r\n" ) )
++      break;
++  }
++  // Trim "\r\n"
++  message.erase( message.size() - 2 );
++
++  writeOnLog( "BGPConnection: RECEIVED : %s", message.c_str() );
++
++  return true;
++}
++
++///////////////////////////
++// P2PControl
++///////////////////////////
++P2PControl::P2PControl( const int port, const std::string bgAddress ) :
++  mEventCBMap(), mProtoState( P_DOWN ) 
++{
++  mConnection  = new BGPConnection( port, bgAddress );
++  mEventThread = NULL;
++}
++
++///////////////////////////
++P2PControl::~P2PControl()
++{
++  shutdown();
++}
++
++///////////////////////////
++bool P2PControl::startup()
++{
++  writeOnLog( "P2PControl::starting up..." );
++
++  // Check if communication protocol is already up.
++  if( mProtoState == P_UP )
++    return true;
++
++  // Try to connect to the BG Process. If it doesn't succeed then
++  // try to start the BG Process and try to connect againg.
++  if( ! mConnection->connect() )
++    if( ! startBGProcess() || ! mConnection->connect() )
++      return mConnection->disconnect();
++
++  // Check if the thread is already running.
++  if( mEventThread != NULL )
++    return true;
++
++  // Init syncronization event
++  mSyncEvent = CreateEvent( NULL, false, false, NULL );
++  if( mSyncEvent == NULL )
++  { writeOnLog( "P2PControl: Error in creating syncronization event" ); return false; }
++
++  // Init syncronization mutex
++  mSyncMutex = CreateMutex( NULL, false, NULL );
++  if( mSyncMutex == NULL )
++  { writeOnLog( "P2PControl: Error in creating syncronization mutex" ); return false; }
++
++  // Init the thread arguments
++  ThreadParams * threadParams = new ThreadParams;
++  threadParams->connection    = mConnection;
++  threadParams->eventMap      = &mEventCBMap;
++  threadParams->syncEvent     = &mSyncEvent;
++  threadParams->syncMutex     = &mSyncMutex;
++
++  // Start the Event Loop
++  mEventThread = CreateThread( NULL,
++                               0,
++                               &eventLoop,
++                               threadParams,
++                               0,
++                               NULL );
++
++  if( mEventThread == NULL )
++  {
++    writeOnLog( "P2PControl: Could not start event loop thread" );
++    delete threadParams;
++    return false;
++  }
++
++  // After the BG Process starts up, a notification event is sent to
++  // the plug-in, it means we are now able to connect to the BG.
++  // Just wait for it.
++  if( WaitForSingleObject( mSyncEvent, INFINITE ) != WAIT_OBJECT_0 )
++    writeOnLog( "Sync Error while Starting up the event thread" );
++
++  delete threadParams;
++
++  mProtoState = P_UP;
++
++  return true;
++}
++
++///////////////////////////
++bool P2PControl::shutdown()
++{
++  // Send a shutdown request to the BG and wait
++  // to receive the syncronization event back.
++  DWORD res;
++  mConnection->sendMsg( "SHUTDOWN" );
++  res = WaitForSingleObject( mSyncEvent, 2000 );
++  if( res == WAIT_TIMEOUT ) writeOnLog( "Sync Error while closing thread" );
++  else writeOnLog( "P2PControl: Thread cleanly exited" );
++
++  // Close all handlers and prepare to exit.
++  CloseHandle( mEventThread );
++  CloseHandle( mSyncEvent );
++  CloseHandle( mSyncMutex );
++  delete mConnection;
++  mProtoState = P_DOWN;
++
++  return true;
++}
++
++///////////////////////////
++bool P2PControl::startBGProcess()
++{
++  LONG         result;
++  HKEY         hKey;
++  WCHAR        BGpath[256]; // TODO : Fix this
++  DWORD        bufLen = sizeof( BGpath );
++
++  // Look in the Windows registry to get the path of the BG
++  result = RegOpenKeyEx( HKEY_LOCAL_MACHINE, PLUGIN_REG_KEY, 0, KEY_QUERY_VALUE, &hKey );
++  if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return false; }
++  result = RegQueryValueEx( hKey, BG_PATH_ELEMENT, NULL, NULL, (LPBYTE)BGpath, &bufLen);
++  if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return false; }
++  RegCloseKey( hKey );
++
++  // Set up variables
++  STARTUPINFOW        startupInfo;
++  PROCESS_INFORMATION processInfo;
++  memset( &startupInfo, 0, sizeof( startupInfo ) );
++  memset( &processInfo, 0, sizeof( processInfo ) );
++  startupInfo.cb = sizeof( startupInfo );
++
++  writeOnLog( "Starting BG Process..." );
++  // Finally start the BG Process
++  bool started = CreateProcess( BGpath,
++                                NULL,
++                                NULL,
++                                NULL,
++                                false,
++                                CREATE_NO_WINDOW,
++                                NULL,
++                                NULL,
++                                &startupInfo,
++                                &processInfo );
++
++  // Wait the process to startup and send a 'startup' event
++  HANDLE startupEvent = CreateEvent( NULL, false, false, L"startupEvent" );
++  if( startupEvent == NULL )
++  {
++    writeOnLog( "P2PControl: Error in creating syncronization event: Could not create BG Process" );
++    started = false;
++  }
++  else
++  {
++    if( started )
++    {
++      writeOnLog( "Waiting Startup event from BG" );
++      ::WaitForSingleObject( startupEvent, INFINITE );
++      writeOnLog( "BGProcess Created" );
++    }
++    else
++    {
++      writeOnLog( "Could not start BG Process" );
++      writeOnLog(  "Last Error code : %i", GetLastError() );
++    }
++
++    CloseHandle( startupEvent );
++  }
++
++  CloseHandle( processInfo.hProcess );
++  CloseHandle( processInfo.hThread );
++
++  return started;
++}
++
++///////////////////////////
++bool P2PControl::sendTorrent( const std::string torrent )
++{
++  #ifdef P_DEBUG_2
++  writeOnLog( "P2PControl: Sending Torrent..." );
++  #endif
++
++  // Send START command with the torrent
++  std::string command = "START " + torrent;
++  return mConnection->sendMsg( command );
++}
++
++///////////////////////////
++DWORD WINAPI P2PControl::eventLoop( LPVOID params )
++{
++  BGPConnection *          connection;
++  event_cb_map_t *         eventMap;
++  std::string              serverCmd, command;
++  bg_event_t               event = EV_NONE;
++  event_cb_map_t::iterator eventIt;
++  HANDLE *                 syncEvent;
++  HANDLE *                 syncMutex;
++  // Just for iterating through the events
++  std::pair< event_cb_map_t::iterator, event_cb_map_t::iterator > equalEventsIt;
++
++  // Parse parameters
++  connection = ( (ThreadParams *)params )->connection;
++  eventMap   = ( (ThreadParams *)params )->eventMap;
++  syncEvent  = ( (ThreadParams *)params )->syncEvent;
++  syncMutex  = ( (ThreadParams *)params )->syncMutex;
++
++  // TODO : Check here
++  SetEvent( *syncEvent ); // Main thread can delete ThreadParams now
++
++  writeOnLog( "P2P Thread: STARTING LOOP" );
++  // Main Thread Loop
++  while( event != EV_CLOSE )
++  {
++    if( ! connection->recvMsg( serverCmd ) )
++    {
++      writeOnLog( "P2P Thread: Unable to receive the command from BG" );
++
++      // TODO : Check here
++      if( serverCmd != "SHUTDOWN" )
++        continue;
++    }
++
++    command = "";
++    event   = EV_NONE;
++    if( ! serverCmd.compare( 0, 4, "PLAY" ) )
++    {
++      command = serverCmd.substr( 5 );
++      event = EV_PLAY;
++    }
++    else if ( ! serverCmd.compare( 0, 5, "PAUSE" ) )
++    {
++      event = EV_PAUSE;
++    }
++    else if ( ! serverCmd.compare( 0, 6, "RESUME" ) )
++    {
++      event = EV_RESUME;
++    }
++    else if ( ! serverCmd.compare( 0, 8, "SHUTDOWN" ) )
++    {
++      writeOnLog( "P2P Thread: Received SHUTDOWN" );
++      event = EV_CLOSE;
++    }
++    else if ( ! serverCmd.compare( 0, 4, "INFO" ) )
++    {
++      command = serverCmd.substr( 5 );
++      event = EV_INFO;
++      writeOnLog( "INFO command received: %s", command.c_str() );
++    }
++    else if( ! serverCmd.compare( 0, 4, "STOP" ) )
++    {
++      event = EV_STOP;
++    }
++    else
++    {
++      writeOnLog( "P2P Thread: Received wrong command: %s", serverCmd.c_str() );
++      continue;
++    }
++
++    writeOnLog( "P2P Thread: Command: %s", command.c_str() );
++
++    if( WaitForSingleObject( *syncMutex, INFINITE ) == WAIT_FAILED )
++    { writeOnLog( "Failed to Lock the Mutex" ); continue; }
++
++    // Call all event handlers related to the event
++    equalEventsIt = eventMap->equal_range( event );
++    for( eventIt = equalEventsIt.first; eventIt != equalEventsIt.second; ++eventIt )
++      ( (*eventIt).second )->process( command.c_str() );
++
++    if( ! ReleaseMutex( *syncMutex ) )
++    { writeOnLog( "Failed to Release the Mutex" ); continue; }
++
++    if( event == EV_CLOSE )
++      break;
++  }
++  writeOnLog( "P2P Thread: EXITING Loop" );
++  SetEvent( *syncEvent ); // Main thread can shut down now
++  return 0;
++}
++
++///////////////////////////
++/*void P2PControl::unregEventCB( bg_event_t event, EventHandlerWrap * eventCallback )
++{
++  std::pair< event_cb_map_t::iterator, event_cb_map_t::iterator > equalEventsIt;
++  // TODO
++  if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++  { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++  equalEventsIt = mEventCBMap->equal_range( event );
++  for( eventIt = equalEventsIt.first; eventIt != equalEventsIt.second; ++eventIt )
++    ( (*eventIt).second )->process( command.c_str() );
++
++  if( ! ReleaseMutex( mSyncMutex ) )
++  { writeOnLog( "Failed to Release the Mutex" ); return; }
++}*/
++
++///////////////////////////
++void P2PControl::unregEventCB( bg_event_t event )
++{
++  if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++  { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++  mEventCBMap.erase( event );
++
++  if( ! ReleaseMutex( mSyncMutex ) )
++  { writeOnLog( "Failed to Release the Mutex" ); return; }
++}
++
+diff -rbNU 3 vlc-1.0.5/projects/activex/p2pcontrol.h d10-02-02-tstreamplaylist-p10/projects/activex/p2pcontrol.h
+--- vlc-1.0.5/projects/activex/p2pcontrol.h    1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/p2pcontrol.h        2009-08-21 14:31:19.000000000 +0200
+@@ -0,0 +1,202 @@
++/* 
++ * Written by Diego Andres Rabaioli
++ * see P2P-Next-LICENSE.txt for license information
++ */
++
++
++#ifndef _P2PCONTROL_H
++#define _P2PCONTROL_H
++
++#include <winsock2.h>
++#include <string>
++#include <map>
++
++
++////////////////////////
++//
++// The P2PControl class handles the communicaion with the BG Process.
++// Through P2PControl it's possible to send requests to the BG like
++// 'START myvideo.torrent' and accept commands like 'PLAY', 'PAUSE',...
++// It's possible to associate an action to each BG command, registering
++// a callback related to the specific event:
++// regEventCB( EV_PLAY, myCallbackFunction );
++//
++// General use case:
++//
++// P2PControl * p2pControl = new P2PControl();
++// if( ! p2pControl->startup() )
++//   return -1;
++//
++// p2pControl->regEventCB( EV_PLAY,   &myPlayHandler );
++// p2pControl->regEventCB( EV_PAUSE,  &myPauseHandler );
++// p2pControl->regEventCB( EV_RESUME, &myResumeHandler );
++// ...
++//
++// if( ! p2pControl->sendTorrent( 'http://www.foo.com/videocontent_1.torrent' ) )
++//  return -1;
++//
++////////////////////////
++
++void writeOnLog( const char * msg, ... );
++
++///////////////////////////
++// BGPConnection
++///////////////////////////
++
++// States enumerations
++enum sock_state_t { S_DOWN, S_STARTED, S_CREATED, S_UP };
++
++const int         IN_BUF_LEN = 512;
++const int         BG_PORT    = 62062;
++const std::string BG_ADDRESS = "127.0.0.1";
++
++class BGPConnection
++{
++ public:
++  BGPConnection( const int port, const std::string bgAddress );
++  ~BGPConnection();
++
++  bool connect();
++  bool disconnect();
++
++  bool sendMsg( const std::string & ) const;
++  bool recvMsg( std::string & ) const;
++
++  std::string  getAddress() { return mBGAddress; }
++  int          getPort()    { return mPort; }
++
++ private:
++  SOCKET       mServerSocket;
++  std::string  mBGAddress;
++  int          mPort;
++  sock_state_t mSocketState;
++};
++
++///////////////////////////
++// EventHandlerWrap
++///////////////////////////
++class EventHandlerWrap
++{
++ public:
++  virtual void process( const char * ) const = 0;
++  virtual ~EventHandlerWrap() {}
++};
++
++///////////////////////////
++class EventHandlerWrap_Static : public EventHandlerWrap
++{
++ public:
++  EventHandlerWrap_Static( void ( *handler )( const char * ) ):
++    mSHandler( handler ) {}
++
++  virtual ~EventHandlerWrap_Static() {}
++
++  virtual void process( const char * c ) const
++  {
++    mSHandler( c );
++  }
++
++ private:
++  void ( *mSHandler )( const char * );
++
++};
++
++///////////////////////////
++template < class T >
++class EventHandlerWrap_NonStatic : public EventHandlerWrap
++{
++ public:
++  EventHandlerWrap_NonStatic( T * obj, void ( T::* handler )( const char * ) ):
++    mObject( obj ), mNHandler( handler ) {}
++
++  virtual ~EventHandlerWrap_NonStatic() {}
++
++  virtual void process( const char * c ) const
++  {
++    (mObject->*mNHandler)( c );
++  }
++
++ private:
++  T * mObject;
++  void ( T::* mNHandler )( const char * );
++};
++
++///////////////////////////
++// P2PControl
++///////////////////////////
++
++// Constants
++const WCHAR PLUGIN_REG_KEY[]   = L"Software\\SwarmPlugin";
++const WCHAR BG_PATH_ELEMENT[]  = L"BGProcessPath";
++const WCHAR LOG_PATH_ELEMENT[] = L"InstallDir";
++const char  LOG_FILE_NAME[]    = "swarmplugin.log";
++
++// Event Callback types
++enum bg_event_t { EV_NONE, EV_PLAY, EV_PAUSE, EV_RESUME, EV_STOP, EV_INFO, EV_CLOSE };
++typedef std::multimap< bg_event_t, EventHandlerWrap * > event_cb_map_t;
++typedef std::pair< bg_event_t, EventHandlerWrap * >     event_cb_item_t;
++
++// States enumerations
++enum protocol_status_t { P_DOWN, P_UP, P_CLOSING };
++
++class P2PControl
++{
++ public:
++  P2PControl( const int port = BG_PORT, const std::string bgAddress = BG_ADDRESS );
++  ~P2PControl();
++
++  bool  startup();
++  bool  shutdown();
++
++  bool  launchBGProcess( const char * cmd = NULL );
++  bool  sendTorrent( const std::string torrent );
++
++  void  regEventCB( bg_event_t event, void (*callback)( const char * ) )
++  {
++    if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++      { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++    EventHandlerWrap * wrap = new EventHandlerWrap_Static( callback );
++    mEventCBMap.insert( event_cb_item_t( event, wrap ) );
++
++    if( ! ReleaseMutex( mSyncMutex ) )
++      { writeOnLog( "Failed to Release the Mutex" ); return; }
++  }
++
++  template < class T >
++  void  regEventCB( bg_event_t event, T * obj, void ( T::* callback)( const char * ) )
++  {
++    if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++      { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++    EventHandlerWrap * wrap = new EventHandlerWrap_NonStatic< T >( obj, callback );
++    mEventCBMap.insert( event_cb_item_t( event, wrap ) ); 
++
++    if( ! ReleaseMutex( mSyncMutex ) )
++      { writeOnLog( "Failed to Release the Mutex" ); return; }
++  }
++
++  //void  unregEventCB( bg_event_t, EventHandlerWrap * );
++  void  unregEventCB( bg_event_t );
++
++ private:
++  // Thread utilities
++  const BGPConnection *  getConnection() const { return mConnection; }
++  const event_cb_map_t * getEventMap()   const { return &mEventCBMap; }
++  // Event Loop: receives events from BG and call the registered handlers
++  static DWORD WINAPI    eventLoop( LPVOID );
++  // Back Ground Process
++  static bool            startBGProcess();
++
++  // Variable Members
++  BGPConnection *   mConnection;
++  protocol_status_t mProtoState;
++
++  // Thread and Sync
++  event_cb_map_t    mEventCBMap;  // Association event/callback
++  HANDLE            mEventThread; // Thread handler
++  HANDLE            mSyncEvent;   // Syncronize the thread at startup and shutdown time
++  HANDLE            mSyncMutex;   // Syncronize access to the Event Map
++};
++
++#endif  // _P2PCONTROL_H
+diff -rbNU 3 vlc-1.0.5/projects/activex/plugin.cpp d10-02-02-tstreamplaylist-p10/projects/activex/plugin.cpp
+--- vlc-1.0.5/projects/activex/plugin.cpp      2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/plugin.cpp  2010-08-02 12:03:41.000000000 +0200
+@@ -40,6 +40,11 @@
+ #include "utils.h"
++// P2P-Next
++#include "p2pcontrol.h"
++#include <vlc_osd.h>
++
++
+ #include <stdio.h>
+ #include <string.h>
+ #include <winreg.h>
+@@ -230,6 +235,12 @@
+     vlcOleObject = new VLCOleObject(this);
+     vlcSupportErrorInfo = new VLCSupportErrorInfo(this);
++    // P2P-Next
++    p2pControl = new P2PControl();
++    p2pStatus = NULL;
++#if _MEDIACONTROL_
++    mediaControl    = NULL;
++#endif
+     // configure controlling IUnknown interface for implemented interfaces
+     this->pUnkOuter = (NULL != pUnkOuter) ? pUnkOuter : dynamic_cast<LPUNKNOWN>(this);
+@@ -248,6 +259,9 @@
+     */
+     AddRef();
++    // P2P-Next
++    delete p2pControl;
++
+     delete vlcSupportErrorInfo;
+     delete vlcOleObject;
+     delete vlcDataObject;
+@@ -374,6 +388,10 @@
+         HimetricFromDP(hDC, (LPPOINT)&_extent, 1);
+         DeleteDC(hDC);
++        // P2P-Next
++        if( ! p2pControl->startup() ) // TODO : check what happen if !startup
++          return E_FAIL;
++
+         return S_OK;
+     }
+     return CO_E_ALREADYINITIALIZED;
+@@ -449,7 +467,7 @@
+     HKEY h_key;
+     char p_pluginpath[MAX_PATH];
+-    if( RegOpenKeyEx( HKEY_LOCAL_MACHINE, TEXT("Software\\VideoLAN\\VLC"),
++    if( RegOpenKeyEx( HKEY_LOCAL_MACHINE, TEXT("Software\\SwarmPlugin"),
+                       0, KEY_READ, &h_key ) == ERROR_SUCCESS )
+     {
+         DWORD i_type, i_data = MAX_PATH;
+@@ -465,6 +483,7 @@
+                     strcat( p_pluginpath, "\\plugins" );
+                     ppsz_argv[ppsz_argc++] = "--plugin-path";
+                     ppsz_argv[ppsz_argc++] = p_pluginpath;
++                    writeOnLog( "VlcPlugin: Found plugins directory: %s", p_pluginpath );
+                 }
+             }
+         }
+@@ -479,6 +498,11 @@
+     ppsz_argv[ppsz_argc++] = "--no-media-library";
+     ppsz_argv[ppsz_argc++] = "--ignore-config";
+     ppsz_argv[ppsz_argc++] = "--intf=dummy";
++    ppsz_argv[ppsz_argc++] = "--no-osd";
++    ppsz_argv[ppsz_argc++] = "--no-video-title-show";
++    // Arno: to log to file, uncomment extraintf and logfile lines:
++    //ppsz_argv[ppsz_argc++] = "--extraintf=logger";
++    //ppsz_argv[ppsz_argc++] = "--logfile=D:\\VLC-logfile.txt";
+     // loop mode is a configuration option only
+     if( _b_autoloop )
+@@ -505,8 +529,9 @@
+         libvlc_audio_set_mute(_p_libvlc, TRUE, NULL);
+     }
++
+     // initial playlist item
+-    if( SysStringLen(_bstr_mrl) > 0 )
++    if( SysStringLen(_bstr_mrl) >= 0 ) // Arno, 2010-08-01: also allow _bst_mrl == ""
+     {
+         char *psz_mrl = NULL;
+@@ -535,6 +560,7 @@
+         }
+         if( NULL != psz_mrl )
+         {
++#ifdef ORIG
+             const char *options[1];
+             int i_options = 0;
+@@ -544,8 +570,23 @@
+                 snprintf(timeBuffer, sizeof(timeBuffer), ":start-time=%d", _i_time);
+                 options[i_options++] = timeBuffer;
+             }
++
+             // add default target to playlist
+             playlist_add_extended_untrusted(psz_mrl, i_options, options, NULL);
++#else
++            // P2P-Next
++          // The URL is that of a torrent file that we want to watch, forward to BG process
++            p2pControl->regEventCB< VLCPlugin >( EV_PLAY,   this, &VLCPlugin::onPlay );
++            p2pControl->regEventCB< VLCPlugin >( EV_PAUSE,  this, &VLCPlugin::onPause );
++            p2pControl->regEventCB< VLCPlugin >( EV_RESUME, this, &VLCPlugin::onResume );
++            p2pControl->regEventCB< VLCPlugin >( EV_STOP,   this, &VLCPlugin::onStop );
++            p2pControl->regEventCB< VLCPlugin >( EV_INFO,   this, &VLCPlugin::onInfo );
++
++            if( ! p2pControl->sendTorrent( string( psz_mrl ) ) )
++            {
++                   writeOnLog( "Plugin: initVLC: Could NOT send torrent to BG process" );
++              }
++#endif
+             CoTaskMemFree(psz_mrl);
+         }
+     }
+@@ -554,7 +595,7 @@
+ void VLCPlugin::setErrorInfo(REFIID riid, const char *description)
+ {
+     vlcSupportErrorInfo->setErrorInfo( getClassID() == CLSID_VLCPlugin2 ?
+-        OLESTR("VideoLAN.VLCPlugin.2") : OLESTR("VideoLAN.VLCPlugin.1"),
++        OLESTR("P2PNext.SwarmPlugin.2") : OLESTR("P2PNext.SwarmPlugin.1"),
+         riid, description );
+ };
+@@ -1104,4 +1145,121 @@
+     return item;
+ }
++// P2P-Next
++void VLCPlugin::onPlay( const char * stream )
++{
++    const char *options[1];
++    int i_options = 0;
++
++    char timeBuffer[32];
++    if( _i_time )
++    {
++        snprintf( timeBuffer, sizeof(timeBuffer), ":start-time=%d", _i_time );
++        options[i_options++] = timeBuffer;
++    }
++    // add default target to playlist
++    int item = playlist_add_extended_untrusted(stream, i_options, options, NULL);
++
++    if( _b_autoplay )
++    {
++        libvlc_exception_t ex;
++        libvlc_exception_init(&ex);
++
++        // Arno, 2010-02-11: explicitly select item, otherwise VLC may play old one
++        // for some reason, even if the playlist appears to be empty (JS level)
++        playlist_play_item(item, &ex);
++        if( libvlc_exception_raised(&ex) )
++        {
++              writeOnLog( "VLCPlugin::onPlay: Exception" );
++              writeOnLog( libvlc_exception_get_message(&ex) );
++              libvlc_exception_clear(&ex);
++              return;
++        }
++
++        // Arno, 2009-08-19: Not sure whether we need these still. vlccontrol.cpp uses them,
++        // vlccontrol2.cpp doesn't.
++        fireOnPlayEvent();
++    }
++}
++
++void VLCPlugin::onPause( const char * nothing )
++{
++    libvlc_exception_t ex;
++    libvlc_exception_init(&ex);
++
++    playlist_pause(&ex);
++    if( libvlc_exception_raised(&ex) )
++    {
++        writeOnLog( "VLCPlugin::onPause: Exception" );
++        writeOnLog( libvlc_exception_get_message(&ex) );
++        libvlc_exception_clear(&ex);
++        return;
++    }
++
++    fireOnPauseEvent();
++}
++
++void VLCPlugin::onResume( const char * nothing )
++{
++    libvlc_exception_t ex;
++    libvlc_exception_init(&ex);
++
++    playlist_play(&ex);
++    if( libvlc_exception_raised(&ex) )
++    {
++        writeOnLog( "VLCPlugin::onResume: Exception" );
++        writeOnLog( libvlc_exception_get_message(&ex) );
++        libvlc_exception_clear(&ex);
++        return;
++    }
++
++    fireOnPlayEvent();
++}
++
++void VLCPlugin::onStop( const char * nothing )
++{
++    libvlc_exception_t ex;
++    libvlc_exception_init(&ex);
++
++    playlist_stop(&ex);
++    if( libvlc_exception_raised(&ex) )
++    {
++        writeOnLog( "VLCPlugin::onStop: Exception" );
++        writeOnLog( libvlc_exception_get_message(&ex) );
++        libvlc_exception_clear(&ex);
++        return;
++    }
++
++    fireOnStopEvent();
++}
++
++void VLCPlugin::onInfo( const char * infostr )
++{
++    // Save copy of status message
++      if (this->p2pStatus != NULL)
++      {
++              free(this->p2pStatus);
++      }
++      this->p2pStatus = (char *)malloc((strlen(infostr)+1)*sizeof(char));
++      strcpy(this->p2pStatus,infostr);
++}
++// For JavaScript API
++const char *VLCPlugin::getP2PStatus()
++{
++    if (this->p2pStatus == NULL)
++              return "";
++      else
++              return this->p2pStatus;
++}
++
++int  VLCPlugin::set_p2ptarget(const char *mrl, libvlc_exception_t *ex)
++{
++    int item = -1;
++
++    if( ! p2pControl->sendTorrent( string( mrl ) ) )
++    {
++         writeOnLog( "Plugin: set_p2ptarget: Could NOT send torrent to BG process" );
++    }
++    return item;
++}
+diff -rbNU 3 vlc-1.0.5/projects/activex/plugin.h d10-02-02-tstreamplaylist-p10/projects/activex/plugin.h
+--- vlc-1.0.5/projects/activex/plugin.h        2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/plugin.h    2010-02-15 14:05:24.000000000 +0100
+@@ -28,6 +28,13 @@
+ #include <vlc/vlc.h>
++// P2P-Next
++#define _MEDIACONTROL_ 0
++#if _MEDIACONTROL_
++// TODO : just for test
++#include <vlc/mediacontrol.h>
++#endif
++
+ extern "C" const GUID CLSID_VLCPlugin;
+ extern "C" const GUID CLSID_VLCPlugin2;
+ extern "C" const GUID LIBID_AXVLC;
+@@ -237,6 +244,14 @@
+     void fireOnPauseEvent(void);
+     void fireOnStopEvent(void);
++    // P2P-Next
++    /* P2P Event Handlers */
++    void onPlay  ( const char * stream );
++    void onPause ( const char * nothing );
++    void onResume( const char * nothing );
++    void onStop  ( const char * nothing );
++    void onInfo  ( const char * string );
++
+     // controlling IUnknown interface
+     LPUNKNOWN pUnkOuter;
+@@ -307,6 +322,10 @@
+             libvlc_media_player_play(_p_mplayer,ex);
+     }
++    // P2P-Next: For JavaScript API
++    const char *       getP2PStatus();
++    int                          set_p2ptarget(const char *mrl, libvlc_exception_t *ex);
++
+ protected:
+     virtual ~VLCPlugin();
+@@ -333,6 +352,13 @@
+     class VLCDataObject *vlcDataObject;
+     class VLCSupportErrorInfo *vlcSupportErrorInfo;
++    // P2P-Next
++    class P2PControl  * p2pControl;
++    char * p2pStatus;
++#if _MEDIACONTROL_
++    mediacontrol_Instance * mediaControl;
++#endif
++
+     // in place activated window (Plugin window)
+     HWND _inplacewnd;
+diff -rbNU 3 vlc-1.0.5/projects/activex/README.TXT d10-02-02-tstreamplaylist-p10/projects/activex/README.TXT
+--- vlc-1.0.5/projects/activex/README.TXT      2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/README.TXT  2009-09-10 09:35:27.000000000 +0200
+@@ -20,6 +20,12 @@
+ axvlc_idl.h
+ axvlc.tlb
++Arno: Open Visual Studio 2008 Command Prompt via Start Menu. Then run:
++
++C:> midl /h axvlc_idl.h /iid axvlc_idl.c axvlc.idl
++
++ArnoEnd.
++
+ To use the MIDL compiler on cygwin, you will need to set some environment variables
+ before configuring vlc. If you have a copy of 'Microsoft Visual C++ 6.0' installed,
+ the following settings are correct:
+diff -rbNU 3 vlc-1.0.5/projects/activex/vlccontrol2.cpp d10-02-02-tstreamplaylist-p10/projects/activex/vlccontrol2.cpp
+--- vlc-1.0.5/projects/activex/vlccontrol2.cpp 2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/vlccontrol2.cpp     2010-02-15 14:13:48.000000000 +0100
+@@ -646,6 +646,89 @@
+     return hr;
+ };
++
++// P2P-Next
++STDMETHODIMP VLCInput::get_p2pstatus(BSTR *p2pstatus)
++{
++    if( NULL == p2pstatus )
++        return E_POINTER;
++
++    const char *p2pstatusStr = _p_instance->getP2PStatus();
++    if( NULL != p2pstatusStr )
++    {
++        *p2pstatus = BSTRFromCStr(CP_UTF8, p2pstatusStr);
++
++        return (NULL == *p2pstatus) ? E_OUTOFMEMORY : NOERROR;
++    }
++    *p2pstatus = NULL;
++    return E_FAIL;
++};
++
++// P2P-Next, 2010-02-15
++STDMETHODIMP VLCInput::set_p2ptarget(BSTR uri, long* item)
++{
++    if( NULL == item )
++        return E_POINTER;
++
++    if( 0 == SysStringLen(uri) )
++        return E_INVALIDARG;
++
++    libvlc_instance_t* p_libvlc;
++    HRESULT hr = _p_instance->getVLC(&p_libvlc);
++    if( SUCCEEDED(hr) )
++    {
++        libvlc_exception_t ex;
++        libvlc_exception_init(&ex);
++
++        char *psz_uri = NULL;
++        if( SysStringLen(_p_instance->getBaseURL()) > 0 )
++        {
++            /*
++            ** if the MRL a relative URL, we should end up with an absolute URL
++            */
++            LPWSTR abs_url = CombineURL(_p_instance->getBaseURL(), uri);
++            if( NULL != abs_url )
++            {
++                psz_uri = CStrFromWSTR(CP_UTF8, abs_url, wcslen(abs_url));
++                CoTaskMemFree(abs_url);
++            }
++            else
++            {
++                psz_uri = CStrFromBSTR(CP_UTF8, uri);
++            }
++        }
++        else
++        {
++            /*
++            ** baseURL is empty, assume MRL is absolute
++            */
++            psz_uri = CStrFromBSTR(CP_UTF8, uri);
++        }
++
++        if( NULL == psz_uri )
++        {
++            return E_OUTOFMEMORY;
++        }
++
++        *item = _p_instance->set_p2ptarget(psz_uri, &ex);
++
++        CoTaskMemFree(psz_uri);
++        if( libvlc_exception_raised(&ex) )
++        {
++            _p_instance->setErrorInfo(IID_IVLCInput,
++                libvlc_exception_get_message(&ex));
++            libvlc_exception_clear(&ex);
++            return E_FAIL;
++        }
++        return NOERROR;
++    }
++    return hr;
++};
++
++
++
++
++
+ /*******************************************************************************/
+ VLCLog::~VLCLog()
+diff -rbNU 3 vlc-1.0.5/projects/activex/vlccontrol2.h d10-02-02-tstreamplaylist-p10/projects/activex/vlccontrol2.h
+--- vlc-1.0.5/projects/activex/vlccontrol2.h   2009-12-20 18:43:40.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/activex/vlccontrol2.h       2010-02-15 14:05:24.000000000 +0100
+@@ -126,6 +126,8 @@
+     STDMETHODIMP put_rate(double);
+     STDMETHODIMP get_fps(double*);
+     STDMETHODIMP get_hasVout(VARIANT_BOOL*);
++    STDMETHODIMP get_p2pstatus(BSTR *version); //P2P-Next
++    STDMETHODIMP set_p2ptarget(BSTR, long*); //P2P-Next, 2010-02-15
+ protected:
+     HRESULT loadTypeInfo();
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/control/npolibvlc.cpp d10-02-02-tstreamplaylist-p10/projects/mozilla/control/npolibvlc.cpp
+--- vlc-1.0.5/projects/mozilla/control/npolibvlc.cpp   2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/control/npolibvlc.cpp       2010-02-12 10:51:02.000000000 +0100
+@@ -11,7 +11,7 @@
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+- * This program is distributed in the hope that it will be useful,
++ * This program is distribputed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+@@ -369,6 +369,7 @@
+     "rate",
+     "fps",
+     "hasVout",
++    "p2pstatus", //P2P-Next
+ };
+ COUNTNAMES(LibvlcInputNPObject,propertyCount,propertyNames);
+@@ -381,6 +382,7 @@
+     ID_input_rate,
+     ID_input_fps,
+     ID_input_hasvout,
++    ID_input_p2pstatus, //P2P-Next
+ };
+ RuntimeNPObject::InvokeResult
+@@ -396,19 +398,23 @@
+         libvlc_media_player_t *p_md = p_plugin->getMD(&ex);
+         if( libvlc_exception_raised(&ex) )
+         {
+-            if( index != ID_input_state )
++              if( index == ID_input_p2pstatus )
+             {
+-                NPN_SetException(this, libvlc_exception_get_message(&ex));
+-                libvlc_exception_clear(&ex);
+-                return INVOKERESULT_GENERIC_ERROR;
++                return this->getP2PStatus(result);
+             }
+-            else
++            else if( index == ID_input_state )
+             {
+                 /* for input state, return CLOSED rather than an exception */
+                 INT32_TO_NPVARIANT(0, result);
+                 libvlc_exception_clear(&ex);
+                 return INVOKERESULT_NO_ERROR;
+             }
++            else
++            {
++                NPN_SetException(this, libvlc_exception_get_message(&ex));
++                libvlc_exception_clear(&ex);
++                return INVOKERESULT_GENERIC_ERROR;
++            }
+         }
+         switch( index )
+@@ -462,6 +468,11 @@
+                 BOOLEAN_TO_NPVARIANT(val, result);
+                 return INVOKERESULT_NO_ERROR;
+             }
++            case ID_input_p2pstatus: //P2P-Next
++            {
++                return this->getP2PStatus(result);
++            }
++
+             default:
+                 ;
+         }
+@@ -469,6 +480,28 @@
+     return INVOKERESULT_GENERIC_ERROR;
+ }
++//P2P-Next
++RuntimeNPObject::InvokeResult
++LibvlcInputNPObject::getP2PStatus(NPVariant &result)
++{
++    VlcPlugin* p_plugin = getPrivate<VlcPlugin>();
++#ifdef XP_WIN
++    const char *s = p_plugin->getP2PStatus();
++    int len = strlen(s);
++    NPUTF8 *retval =(NPUTF8*)NPN_MemAlloc(len);
++    if( !retval )
++        return INVOKERESULT_OUT_OF_MEMORY;
++
++    memcpy(retval, s, len);
++    STRINGN_TO_NPVARIANT(retval, len, result);
++    return INVOKERESULT_NO_ERROR;
++#else
++    return INVOKERESULT_GENERIC_ERROR;
++#endif
++}
++
++
++
+ RuntimeNPObject::InvokeResult
+ LibvlcInputNPObject::setProperty(int index, const NPVariant &value)
+ {
+@@ -537,14 +570,13 @@
+ const NPUTF8 * const LibvlcInputNPObject::methodNames[] =
+ {
+-    /* no methods */
+-    "none",
++    "set_p2ptarget", //P2P-Next
+ };
+ COUNTNAMES(LibvlcInputNPObject,methodCount,methodNames);
+ enum LibvlcInputNPObjectMethodIds
+ {
+-    ID_none,
++      ID_input_set_p2ptarget,
+ };
+ RuntimeNPObject::InvokeResult
+@@ -554,10 +586,48 @@
+     /* is plugin still running */
+     if( isPluginRunning() )
+     {
++        VlcPlugin* p_plugin = getPrivate<VlcPlugin>();
++        libvlc_exception_t ex;
++        libvlc_exception_init(&ex);
++
+         switch( index )
+         {
+-            case ID_none:
++            case ID_input_set_p2ptarget: //P2P-Next
++            {
++                              if(argCount < 1)
+                 return INVOKERESULT_NO_SUCH_METHOD;
++
++                              char *url = NULL;
++
++                              // grab URL
++                              if( NPVARIANT_IS_NULL(args[0]) )
++                                      return INVOKERESULT_NO_SUCH_METHOD;
++
++                              if( NPVARIANT_IS_STRING(args[0]) )
++                              {
++                                      char *s = stringValue(NPVARIANT_TO_STRING(args[0]));
++                                      if( s )
++                                      {
++                                              url = p_plugin->getAbsoluteURL(s);
++                                              if( url )
++                                                      free(s);
++                                              else
++                                                      // problem with combining url, use argument
++                                                      url = s;
++                                      }
++                                      else
++                                              return INVOKERESULT_OUT_OF_MEMORY;
++                              }
++                              else
++                                      return INVOKERESULT_NO_SUCH_METHOD;
++
++                              int item = p_plugin->input_set_p2ptarget(url,&ex);
++                              free(url);
++
++                              RETURN_ON_EXCEPTION(this,ex);
++                              INT32_TO_NPVARIANT(item, result);
++                              return INVOKERESULT_NO_ERROR;
++            }
+             default:
+                 ;
+         }
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/control/npolibvlc.h d10-02-02-tstreamplaylist-p10/projects/mozilla/control/npolibvlc.h
+--- vlc-1.0.5/projects/mozilla/control/npolibvlc.h     2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/control/npolibvlc.h 2009-09-09 14:38:26.000000000 +0200
+@@ -99,6 +99,9 @@
+     static const NPUTF8 * const methodNames[];
+     InvokeResult invoke(int index, const NPVariant *args, uint32_t argCount, NPVariant &result);
++//P2P-Next
++private:
++    InvokeResult getP2PStatus(NPVariant &result);
+ };
+ class LibvlcPlaylistItemsNPObject: public RuntimeNPObject
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/Makefile.am d10-02-02-tstreamplaylist-p10/projects/mozilla/Makefile.am
+--- vlc-1.0.5/projects/mozilla/Makefile.am     2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/Makefile.am 2009-08-26 14:16:29.000000000 +0200
+@@ -11,6 +11,8 @@
+       vlcshell.h \
+       vlcplugin.cpp \
+       vlcplugin.h \
++      p2pcontrol.cpp \
++      p2pcontrol.h \
+       control/npolibvlc.cpp \
+       control/npolibvlc.h \
+       control/nporuntime.cpp \
+@@ -35,11 +37,11 @@
+ SOURCES_support = support/npwin.cpp
+ CPPFLAGS_mozilla_EXTRA = -DXP_WIN -DXP_WIN32
+-LDFLAGS_mozilla_EXTRA = -no-undefined -Wl,--kill-at -Wl,$(DATA_npvlc_rc)
++LDFLAGS_mozilla_EXTRA = -lws2_32 -no-undefined -Wl,--kill-at -Wl,$(DATA_npvlc_rc)
+ npvlc_la_SOURCES = $(SOURCES_mozilla_common) $(SOURCES_support)
+-npvlc_la_CFLAGS = `$(VLC_CONFIG) --cflags mozilla` $(CPPFLAGS_mozilla_extra)
+-npvlc_la_CXXFLAGS = `$(VLC_CONFIG) --cxxflags mozilla` $(CPPFLAGS_mozilla_extra)
++npvlc_la_CFLAGS = `$(VLC_CONFIG) --cflags mozilla` $(CPPFLAGS_mozilla_EXTRA)
++npvlc_la_CXXFLAGS = `$(VLC_CONFIG) --cxxflags mozilla` $(CPPFLAGS_mozilla_EXTRA)
+ npvlc_la_DEPENDENCIES = $(DATA_npvlc_rc) $(LIBRARIES_libvlc)
+ npvlc_la_LDFLAGS = `$(VLC_CONFIG) --ldflags mozilla` -module -avoid-version \
+                                  $(LDFLAGS_mozilla_EXTRA)
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/Makefile.in d10-02-02-tstreamplaylist-p10/projects/mozilla/Makefile.in
+--- vlc-1.0.5/projects/mozilla/Makefile.in     2010-01-24 22:28:52.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/Makefile.in 2010-02-12 13:22:37.000000000 +0100
+@@ -89,9 +89,10 @@
+       vlcplugin.cpp vlcplugin.h control/npolibvlc.cpp \
+       control/npolibvlc.h control/nporuntime.cpp \
+       control/nporuntime.h support/classinfo.h support/npunix.c \
+-      support/npmac.cpp support/npwin.cpp
++      support/npmac.cpp support/npwin.cpp \
++      p2pcontrol.cpp p2pcontrol.h
+ am__objects_1 = libvlcplugin_la-vlcshell.lo \
+-      libvlcplugin_la-vlcplugin.lo libvlcplugin_la-npolibvlc.lo \
++      libvlcplugin_la-vlcplugin.lo libvlcplugin_la-p2pcontrol.lo libvlcplugin_la-npolibvlc.lo \
+       libvlcplugin_la-nporuntime.lo
+ @BUILD_MOZILLA_TRUE@@HAVE_DARWIN_FALSE@@HAVE_WIN32_FALSE@am__objects_2 = libvlcplugin_la-npunix.lo
+ @BUILD_MOZILLA_TRUE@@HAVE_DARWIN_TRUE@@HAVE_WIN32_FALSE@am__objects_2 = libvlcplugin_la-npmac.lo
+@@ -110,9 +111,10 @@
+       vlcplugin.h control/npolibvlc.cpp control/npolibvlc.h \
+       control/nporuntime.cpp control/nporuntime.h \
+       support/classinfo.h support/npunix.c support/npmac.cpp \
+-      support/npwin.cpp
++      support/npwin.cpp \
++      p2pcontrol.cpp p2pcontrol.h
+ am__objects_3 = npvlc_la-vlcshell.lo npvlc_la-vlcplugin.lo \
+-      npvlc_la-npolibvlc.lo npvlc_la-nporuntime.lo
++      npvlc_la-p2pcontrol.lo npvlc_la-npolibvlc.lo npvlc_la-nporuntime.lo
+ @BUILD_MOZILLA_TRUE@@HAVE_DARWIN_FALSE@@HAVE_WIN32_FALSE@am__objects_4 = npvlc_la-npunix.lo
+ @BUILD_MOZILLA_TRUE@@HAVE_DARWIN_TRUE@@HAVE_WIN32_FALSE@am__objects_4 = npvlc_la-npmac.lo
+ @BUILD_MOZILLA_TRUE@@HAVE_WIN32_TRUE@am__objects_4 =  \
+@@ -929,6 +931,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvlcplugin_la-npwin.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvlcplugin_la-vlcplugin.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvlcplugin_la-vlcshell.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvlcplugin_la-p2pcontrol.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-npmac.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-npolibvlc.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-nporuntime.Plo@am__quote@
+@@ -936,6 +939,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-npwin.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-vlcplugin.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-vlcshell.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/npvlc_la-p2pcontrol.Plo@am__quote@
+ .c.o:
+ @am__fastdepCC_TRUE@  $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@@ -1000,6 +1004,15 @@
+ @AMDEP_TRUE@@am__fastdepCXX_FALSE@    DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ @am__fastdepCXX_FALSE@        $(LIBTOOL)  --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libvlcplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o libvlcplugin_la-vlcshell.lo `test -f 'vlcshell.cpp' || echo '$(srcdir)/'`vlcshell.cpp
++
++libvlcplugin_la-p2pcontrol.lo: p2pcontrol.cpp
++@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libvlcplugin_la_CXXFLAGS) $(CXXFLAGS) -MT libvlcplugin_la-p2pcontrol.lo -MD -MP -MF $(DEPDIR)/libvlcplugin_la-p2pcontrol.Tpo -c -o libvlcplugin_la-p2pcontrol.lo `test -f 'p2pcontrol.cpp' || echo '$(srcdir)/'`p2pcontrol.cpp
++@am__fastdepCXX_TRUE@ mv -f $(DEPDIR)/libvlcplugin_la-p2pcontrol.Tpo $(DEPDIR)/libvlcplugin_la-p2pcontrol.Plo
++@AMDEP_TRUE@@am__fastdepCXX_FALSE@    source='p2pcontrol.cpp' object='libvlcplugin_la-p2pcontrol.lo' libtool=yes @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCXX_FALSE@    DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCXX_FALSE@        $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libvlcplugin_la_CXXFLAGS) $(CXXFLAGS) -c -o libvlcplugin_la-p2pcontrol.lo `test -f 'p2pcontrol.cpp' || echo '$(srcdir)/'`p2pcontrol.cpp
++
++
+ libvlcplugin_la-vlcplugin.lo: vlcplugin.cpp
+ @am__fastdepCXX_TRUE@ $(LIBTOOL)  --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libvlcplugin_la_CXXFLAGS) $(CXXFLAGS) -MT libvlcplugin_la-vlcplugin.lo -MD -MP -MF $(DEPDIR)/libvlcplugin_la-vlcplugin.Tpo -c -o libvlcplugin_la-vlcplugin.lo `test -f 'vlcplugin.cpp' || echo '$(srcdir)/'`vlcplugin.cpp
+ @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libvlcplugin_la-vlcplugin.Tpo $(DEPDIR)/libvlcplugin_la-vlcplugin.Plo
+@@ -1042,6 +1055,15 @@
+ @AMDEP_TRUE@@am__fastdepCXX_FALSE@    DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ @am__fastdepCXX_FALSE@        $(LIBTOOL)  --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(npvlc_la_CXXFLAGS) $(CXXFLAGS) -c -o npvlc_la-vlcshell.lo `test -f 'vlcshell.cpp' || echo '$(srcdir)/'`vlcshell.cpp
++
++npvlc_la-p2pcontrol.lo: p2pcontrol.cpp
++@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(npvlc_la_CXXFLAGS) $(CXXFLAGS) -MT npvlc_la-p2pcontrol.lo -MD -MP -MF $(DEPDIR)/npvlc_la-p2pcontrol.Tpo -c -o npvlc_la-p2pcontrol.lo `test -f 'p2pcontrol.cpp' || echo '$(srcdir)/'`p2pcontrol.cpp
++@am__fastdepCXX_TRUE@ mv -f $(DEPDIR)/npvlc_la-p2pcontrol.Tpo $(DEPDIR)/npvlc_la-p2pcontrol.Plo
++@AMDEP_TRUE@@am__fastdepCXX_FALSE@    source='p2pcontrol.cpp' object='npvlc_la-p2pcontrol.lo' libtool=yes @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCXX_FALSE@    DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCXX_FALSE@        $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(npvlc_la_CXXFLAGS) $(CXXFLAGS) -c -o npvlc_la-p2pcontrol.lo `test -f 'p2pcontrol.cpp' || echo '$(srcdir)/'`p2pcontrol.cpp
++
++
+ npvlc_la-vlcplugin.lo: vlcplugin.cpp
+ @am__fastdepCXX_TRUE@ $(LIBTOOL)  --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(npvlc_la_CXXFLAGS) $(CXXFLAGS) -MT npvlc_la-vlcplugin.lo -MD -MP -MF $(DEPDIR)/npvlc_la-vlcplugin.Tpo -c -o npvlc_la-vlcplugin.lo `test -f 'vlcplugin.cpp' || echo '$(srcdir)/'`vlcplugin.cpp
+ @am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/npvlc_la-vlcplugin.Tpo $(DEPDIR)/npvlc_la-vlcplugin.Plo
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/npvlc.dll.manifest d10-02-02-tstreamplaylist-p10/projects/mozilla/npvlc.dll.manifest
+--- vlc-1.0.5/projects/mozilla/npvlc.dll.manifest      2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/npvlc.dll.manifest  2010-08-13 15:43:48.000000000 +0200
+@@ -1,10 +1,10 @@
+ <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+       <assemblyIdentity
+-              version="1.0.0.0"
++              version="1.1.0.0"
+               processorArchitecture="x86"
+               name="npvlc.dll"
+               type="win32"
+       />
+-      <description>VLC Mozilla plugin</description>
++      <description>SwarmPlugin P2P Multimedia Mozilla plugin</description>
+ </assembly>
+\ No newline at end of file
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/npvlc_rc.rc.in d10-02-02-tstreamplaylist-p10/projects/mozilla/npvlc_rc.rc.in
+--- vlc-1.0.5/projects/mozilla/npvlc_rc.rc.in  2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/npvlc_rc.rc.in      2010-08-13 15:43:48.000000000 +0200
+@@ -1,8 +1,8 @@
+ /////////////////////////////////////////////////////////////////////////////
+ //
+-//  VLC Plugin description.
++//  SwarmPlugin description.
+ //
+-#define VERSION_NUMBER @VERSION_MAJOR@,@VERSION_MINOR@,@VERSION_REVISION@,@VERSION_EXTRA_RC@ 
++#define VERSION_NUMBER 1,1,0,0
+ //VS_VERSION_INFO VERSIONINFO
+ 1 VERSIONINFO
+@@ -23,17 +23,17 @@
+     BEGIN
+         BLOCK "040904e4"
+         BEGIN
+-            VALUE "ProductName", "VLC Multimedia Plug-in\0"
+-            VALUE "ProductVersion", "@VERSION@"
++            VALUE "ProductName", "SwarmPlugin P2P Multimedia Plug-in\0"
++            VALUE "ProductVersion", "1.1.0"
+             VALUE "OriginalFilename", "npvlc.dll\0"
+-            VALUE "FileVersion", "@VERSION@"
+-            VALUE "FileDescription", "Version @VERSION@, copyright @COPYRIGHT_YEARS@ The VideoLAN Team<br><a href=""http://www.videolan.org/"">http://www.videolan.org/</a>\0"
++            VALUE "FileVersion", "1.1.0"
++            VALUE "FileDescription", "SwarmPlugin Version 1.1.0, Copyright (c) 2009-2010, the VideoLAN Team and Delft University of Technology<br><a href=""http://www.pds.ewi.tudelft.nl/code.html"">http://www.pds.ewi.tudelft.nl/code.html</a>\0"
+             VALUE "InternalName", "npvlc\0"
+-            VALUE "CompanyName", "the VideoLAN Team\0"
+-            VALUE "LegalCopyright", "Copyright \251 @COPYRIGHT_YEARS@ The VideoLAN Team\0"
+-            VALUE "MIMEType", "audio/mpeg|audio/x-mpeg|video/mpeg|video/x-mpeg|video/mpeg-system|video/x-mpeg-system|video/mp4|audio/mp4|application/mpeg4-iod|application/mpeg4-muxcodetable|video/x-msvideo|video/quicktime|application/x-ogg|application/ogg|application/x-vlc-plugin|video/x-ms-asf-plugin|video/x-ms-asf|application/x-mplayer2|video/x-ms-wmv|application/x-google-vlc-plugin|audio/wav|audio/x-wav|audio/3gpp|video/3gpp|audio/3gpp2|video/3gpp2|video/divx|video/flv|video/x-flv|video/x-matroska|audio/x-matroska|application/xspf+xml|audio/x-m4a|audio/x-mpegurl|audio/x-ms-wma\0"
+-            VALUE "FileExtents", "mp2,mp3,mpga,mpega|mp2,mp3,mpga,mpega|mpg,mpeg,mpe|mpg,mpeg,mpe|mpg,mpeg,vob|mpg,mpeg,vob|mp4,mpg4|mp4,mpg4|mp4,mpg4|mp4,mpg4|avi|mov,qt|ogg|ogg|vlc|asf,asx|asf,asx||wmv||wav|wav|3gp,3gpp|3gp,3gpp|3g2,3gpp2|3g2,3gpp2|divx|flv|flv|mkv|mka|xspf|m4a|m3u|wma\0"
+-            VALUE "FileOpenName", "MPEG audio|MPEG audio|MPEG video|MPEG video|MPEG video|MPEG video|MPEG-4 video|MPEG-4 audio|MPEG-4 video|MPEG-4 video|AVI video|QuickTime video|Ogg stream|Ogg stream|VLC plug-in|Windows Media video|Windows Media video|Windows Media plug-in|Windows Media video|Google VLC plug-in|WAV audio|WAV audio|3GPP audio|3GPP video|3GPP2 audio|3GPP2 video|DivX video|FLV video|FLV video|Matroska video|Matroska audio|Playlist xspf|MPEG-4 audio|Playlist M3U|Windows Media Audio\0"
++            VALUE "CompanyName", "the VideoLAN Team and Delft University of Technology\0"
++            VALUE "LegalCopyright", "Copyright \251 2009-2010 the VideoLAN Team and Delft University of Technology\0"
++            VALUE "MIMEType", "application/x-ns-stream\0"
++            VALUE "FileExtents", "torrent\0"
++            VALUE "FileOpenName", "Torrent Video\0"
+         END
+     END
+     BLOCK "VarFileInfo"
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/p2pcontrol.cpp d10-02-02-tstreamplaylist-p10/projects/mozilla/p2pcontrol.cpp
+--- vlc-1.0.5/projects/mozilla/p2pcontrol.cpp  1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/p2pcontrol.cpp      2010-05-27 15:57:58.000000000 +0200
+@@ -0,0 +1,522 @@
++/*
++ * Written by Diego Andres Rabaioli
++ * see P2P-Next-LICENSE.txt for license information
++ */
++#ifdef XP_WIN
++
++#include "p2pcontrol.h"
++#include <string>
++
++#define P_DEBUG
++
++#ifdef P_DEBUG
++#include <stdio.h>
++#include <stdarg.h>
++#endif
++
++void writeOnLog( const char * msg, ... )
++{
++#ifdef P_DEBUG
++  static std::string logPath = "";
++
++  if( logPath.empty() )
++  {
++    LONG   result;
++    HKEY   hKey;
++    CHAR   Logpath[256];
++    DWORD  bufLen = sizeof( Logpath );
++
++    // Look in the Windows registry for installation path
++    result = RegOpenKeyEx( HKEY_LOCAL_MACHINE, PLUGIN_REG_KEY, 0, KEY_QUERY_VALUE, &hKey );
++    if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return; }
++    result = RegQueryValueEx( hKey, LOG_PATH_ELEMENT, NULL, NULL, (LPBYTE)Logpath, &bufLen);
++    if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return; }
++    RegCloseKey( hKey );
++    logPath.assign( Logpath );
++    logPath.append( "\\" );
++    logPath.append( LOG_FILE_NAME );
++  }
++
++  if( ! logPath.empty() )
++  {
++    FILE * file;
++    file = fopen( logPath.c_str(), "a" );
++    if( file == NULL ) return;
++
++    va_list args;
++    va_start( args, msg );
++    vfprintf( file, msg, args );
++    va_end( args );
++    fputs( "\r\n", file );
++    fclose( file );
++  }
++
++/*  FILE * file;
++  file = fopen( "C:\\vlc_plugin_fx.log", "a" );
++  if( file == NULL ) return;
++
++  va_list args;
++  va_start( args, msg );
++  vfprintf( file, msg, args );
++  va_end( args );
++  fputs( "\r\n", file );
++  fclose( file );*/
++#endif
++};
++
++///////////////////////////
++// BGPConnection
++///////////////////////////
++BGPConnection::BGPConnection( const int port, const std::string bgAddress )
++{
++  mBGAddress   = bgAddress;
++  mPort        = port;
++  mSocketState = S_DOWN;
++}
++
++///////////////////////////
++BGPConnection::~BGPConnection()
++{
++  disconnect();
++}
++
++///////////////////////////
++bool BGPConnection::connect()
++{
++  WSADATA            wsaData;
++  struct sockaddr_in serverAddress;
++  int                iResult;
++
++  // Just to be in a consistent state
++  if( mSocketState == S_UP )
++    return true;
++  else if( mSocketState != S_DOWN )
++    disconnect();
++
++  // Init Winsock
++  iResult = WSAStartup( MAKEWORD( 2, 2 ), &wsaData );
++  if( iResult != 0 )
++    return disconnect();
++
++  mSocketState = S_STARTED;
++
++  // Create socket
++  if( ( mServerSocket = socket( AF_INET, SOCK_STREAM, IPPROTO_TCP ) ) == INVALID_SOCKET )
++    return disconnect();
++
++  mSocketState = S_CREATED;
++
++  memset( &serverAddress, 0, sizeof( serverAddress ) );
++  serverAddress.sin_family      = AF_INET;
++  serverAddress.sin_port        = htons( mPort );
++  serverAddress.sin_addr.s_addr = inet_addr( mBGAddress.c_str() );
++
++  // Connect to the BG process server
++  if( ( ::connect( mServerSocket, (struct sockaddr *)&serverAddress,
++                 sizeof( serverAddress ) ) ) == SOCKET_ERROR )
++  {
++    #ifdef P_DEBUG
++    writeOnLog( "BGPConnection: Could not connect to server" );
++    writeOnLog( "%i Socket Server : %i Server Addres : %i", WSAGetLastError(), mServerSocket, serverAddress.sin_addr.s_addr );
++    #endif
++    return disconnect();
++  }
++
++  #ifdef P_DEBUG
++  writeOnLog( "BGPConnection: CONNECTED" );
++  #endif
++
++  mSocketState = S_UP;
++  return true;
++
++}
++
++///////////////////////////
++bool BGPConnection::disconnect()
++{
++// Shut down the connection depending on the state of the socket
++// It basically returns always false (stupid code optimization reasons)
++  #ifdef P_DEBUG_2
++  char * tmp_str = NULL;
++  sprintf( tmp_str, "BGPConnection: Shutting down connection from status %d", mSocketState );
++  writeOnLog( tmp_str );
++  #endif
++
++  if( mSocketState == S_DOWN )
++    return false;
++
++  if( mSocketState >= S_CREATED )
++  {
++    closesocket( mServerSocket );
++    mServerSocket = INVALID_SOCKET;
++  }
++
++  WSACleanup();
++  mSocketState = S_DOWN;
++  return false;
++}
++
++///////////////////////////
++bool BGPConnection::sendMsg( const std::string & event ) const
++{
++  int res;
++
++  // Check socket status
++  if( mSocketState != S_UP )
++    return false;
++
++  // Complete message
++  std::string msg = event + "\r\n";
++  // Send Event
++  res = send( mServerSocket, msg.c_str(), msg.length(), 0 );
++  #ifdef P_DEBUG
++  writeOnLog( "BGPConnection: Sending: %s", event.c_str() );
++  #endif
++  if( res == SOCKET_ERROR )
++  {
++    #ifdef P_DEBUG
++    writeOnLog( "BGPConnection: Error in sending event" );
++    #endif
++    return false;
++  }
++
++  return true;
++}
++
++///////////////////////////
++bool BGPConnection::recvMsg( std::string & msg ) const
++{
++  char inBuffer[ IN_BUF_LEN ];
++  int  res;
++
++  if( mSocketState != S_UP )
++    return false;
++
++  msg = "";
++  while( true )
++  {
++    res = recv( mServerSocket, inBuffer, IN_BUF_LEN, 0 );
++
++    if( res <= 0 )
++    {
++      #ifdef P_DEBUG
++      writeOnLog( "BGPConnection: Error in receiving stream" );
++      #endif
++      msg = "SHUTDOWN";
++      return false;
++    }
++
++    msg.append( inBuffer, res );
++    if( ! msg.compare( msg.size() - 2, 2, "\r\n" ) )
++      break;
++  }
++  // Trim "\r\n"
++  msg.erase( msg.size() - 2 );
++
++  #ifdef P_DEBUG
++  writeOnLog( "BGPConnection: RECEIVED : %s", msg.c_str() );
++  #endif
++
++  return true;
++}
++
++///////////////////////////
++// P2PControl
++///////////////////////////
++P2PControl::P2PControl( const int port, const std::string bgAddress ) :
++  mEventCBMap(), mProtoState( P_DOWN )
++{
++  mConnection  = new BGPConnection( port, bgAddress );
++  mEventThread = NULL;
++}
++
++///////////////////////////
++P2PControl::~P2PControl()
++{
++  shutdown();
++}
++
++///////////////////////////
++bool P2PControl::startup()
++{
++  writeOnLog( "P2PControl::starting up..." );
++
++  // Try to connect to the BG Process.
++  // If it doesn't succeed then try to
++  // start the BG Process and try to
++  // connect againg
++  if( mProtoState == P_UP )
++    return true;
++
++  if( ! mConnection->connect() )
++    if( ! startBGProcess() || ! mConnection->connect() )
++      return mConnection->disconnect();
++
++
++  if( mEventThread != NULL ) // TODO : confusing... fix startup procedure
++    return true;
++
++  // Init syncronization event
++  mSyncEvent = CreateEvent( NULL, false, false, NULL );
++  if( mSyncEvent == NULL )
++  {
++    writeOnLog( "P2PControl: Error in creating syncronization event" );
++    return false;
++  }
++
++  // Init syncronization mutex
++  mSyncMutex = CreateMutex( NULL, false, NULL );
++  if( mSyncMutex == NULL )
++  {
++    writeOnLog( "P2PControl: Error in creating syncronization mutex" );
++    return false;
++  }
++
++  // Start the Event Loop
++  ThreadParams * threadParams = new ThreadParams;
++  threadParams->connection    = mConnection;
++  threadParams->eventMap      = &mEventCBMap;
++  threadParams->syncEvent     = &mSyncEvent;
++  threadParams->syncMutex     = &mSyncMutex;
++
++  mEventThread = CreateThread( NULL,
++                               0,
++                               &eventLoop,
++                               threadParams,
++                               0,
++                               NULL );
++
++  if( mEventThread == NULL )
++  {
++    writeOnLog( "P2PControl: Could not start event loop thread" );
++    delete threadParams;
++    return false;
++  }
++
++  if( WaitForSingleObject( mSyncEvent, INFINITE ) != WAIT_OBJECT_0 )
++    writeOnLog( "Sync Error while Starting up the event thread" );
++
++  delete threadParams;
++
++  mProtoState = P_UP;
++
++  return true;
++}
++
++///////////////////////////
++bool P2PControl::shutdown()
++{
++  DWORD res;
++  mConnection->sendMsg( "SHUTDOWN" );
++  res = WaitForSingleObject( mSyncEvent, 2000 );
++  if( res == WAIT_TIMEOUT ) writeOnLog( "Sync Error while closing thread" );
++  else writeOnLog( "P2PControl: Thread cleanly exited" );
++
++  CloseHandle( mEventThread );
++  CloseHandle( mSyncEvent );
++  CloseHandle( mSyncMutex );
++  delete mConnection;
++  mProtoState = P_DOWN;
++
++  return true;
++}
++
++///////////////////////////
++bool P2PControl::startBGProcess()
++{
++  LONG         result;
++  HKEY         hKey;
++  CHAR         BGpath[256]; // TODO : Fix this
++  DWORD        bufLen = sizeof( BGpath );
++
++  // Look in the Windows registry to get the path of the BG
++  result = RegOpenKeyEx( HKEY_LOCAL_MACHINE, PLUGIN_REG_KEY, 0, KEY_QUERY_VALUE, &hKey );
++  if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return false; }
++  result = RegQueryValueEx( hKey, BG_PATH_ELEMENT, NULL, NULL, (LPBYTE)BGpath, &bufLen);
++  if( result != ERROR_SUCCESS ) { RegCloseKey( hKey ); return false; }
++  RegCloseKey( hKey );
++
++  // Set up variables
++  STARTUPINFOA        startupInfo;
++  PROCESS_INFORMATION processInfo;
++  memset( &startupInfo, 0, sizeof( startupInfo ) );
++  memset( &processInfo, 0, sizeof( processInfo ) );
++  startupInfo.cb = sizeof( startupInfo );
++
++  writeOnLog( "Starting BG Process..." );
++  // Finally start the BG Process
++  bool started = CreateProcess( BGpath,
++                                NULL,
++                                NULL,
++                                NULL,
++                                false,
++                                CREATE_NO_WINDOW,
++                                NULL,
++                                NULL,
++                                &startupInfo,
++                                &processInfo );
++
++  // Wait the process to startup and send a 'startup' event
++  HANDLE startupEvent = CreateEvent( NULL, false, false, "startupEvent" );
++  if( startupEvent == NULL )
++  {
++    writeOnLog( "P2PControl: Error in creating syncronization event: Could not create BG Process" );
++    started = false;
++  }
++  else
++  {
++    if( started )
++    {
++      writeOnLog( "Waiting Startup event from BG" );
++      ::WaitForSingleObject( startupEvent, INFINITE );
++      writeOnLog( "BGProcess Created" );
++    }
++    else
++    {
++      writeOnLog( "Could not start BG Process" );
++      writeOnLog(  "Last Error code : %i", GetLastError() );
++    }
++
++    CloseHandle( startupEvent );
++  }
++
++  CloseHandle( processInfo.hProcess );
++  CloseHandle( processInfo.hThread );
++
++  return started;
++}
++
++///////////////////////////
++bool P2PControl::sendTorrent( const std::string torrent )
++{
++  #ifdef P_DEBUG_2
++  writeOnLog( "P2PControl: Sending Torrent..." );
++  #endif
++
++  // Send START command with the torrent
++  std::string command = "START " + torrent;
++  return mConnection->sendMsg( command );
++}
++
++///////////////////////////
++DWORD WINAPI P2PControl::eventLoop( LPVOID params )
++{
++  BGPConnection *          connection;
++  event_cb_map_t *         eventMap;
++  std::string              serverCmd, command;
++  bg_event_t               event = EV_NONE;
++  event_cb_map_t::iterator eventIt;
++  HANDLE *                 syncEvent;
++  HANDLE *                 syncMutex;
++  // Just for iterating through the events
++  std::pair< event_cb_map_t::iterator, event_cb_map_t::iterator > equalEventsIt;
++
++  // Parse parameters
++  connection = ( (ThreadParams *)params )->connection;
++  eventMap   = ( (ThreadParams *)params )->eventMap;
++  syncEvent  = ( (ThreadParams *)params )->syncEvent;
++  syncMutex  = ( (ThreadParams *)params )->syncMutex;
++
++  SetEvent( *syncEvent ); // Main thread can delete ThreadParams now
++
++  writeOnLog( "P2P Thread: STARTING LOOP" );
++  // Main Thread Loop
++  while( event != EV_CLOSE )
++  {
++    if( ! connection->recvMsg( serverCmd ) )
++    {
++      #ifdef P_DEBUG
++      writeOnLog( "P2P Thread: Unable to receive the command from BG" );
++      #endif
++      if( serverCmd != "SHUTDOWN" )
++        continue;
++    }
++
++    command = "";
++    event   = EV_NONE;
++    if( ! serverCmd.compare( 0, 4, "PLAY" ) )
++    {
++      command = serverCmd.substr( 5 );
++      event = EV_PLAY;
++    }
++    else if ( ! serverCmd.compare( 0, 5, "PAUSE" ) )
++    {
++      event = EV_PAUSE;
++    }
++    else if ( ! serverCmd.compare( 0, 6, "RESUME" ) )
++    {
++      event = EV_RESUME;
++    }
++    else if ( ! serverCmd.compare( 0, 8, "SHUTDOWN" ) )
++    {
++      writeOnLog( "P2P Thread: Received SHUTDOWN" );
++      event = EV_CLOSE;
++    }
++    else if ( ! serverCmd.compare( 0, 4, "INFO" ) )
++    {
++      command = serverCmd.substr( 5 );
++      event = EV_INFO;
++      writeOnLog( "INFO command received: %s", command.c_str() );
++    }
++    else if( ! serverCmd.compare( 0, 4, "STOP" ) )
++    {
++      event = EV_STOP;
++    }
++    else
++    {
++      #ifdef P_DEBUG
++      writeOnLog( "P2P Thread: Received wrong command: %s", serverCmd.c_str() );
++      #endif
++      continue;
++    }
++
++    writeOnLog( "P2P Thread: Command: %s", command.c_str() );
++
++    // Call all event handlers
++    if( WaitForSingleObject( *syncMutex, INFINITE ) == WAIT_FAILED )
++    { writeOnLog( "Failed to Lock the Mutex" ); continue; }
++
++    equalEventsIt = eventMap->equal_range( event );
++    for( eventIt = equalEventsIt.first; eventIt != equalEventsIt.second; ++eventIt )
++      ( (*eventIt).second )->process( command.c_str() );
++
++    if( ! ReleaseMutex( *syncMutex ) )
++    { writeOnLog( "Failed to Release the Mutex" ); continue; }
++
++    if( event == EV_CLOSE )
++      break;
++  }
++  writeOnLog( "P2P Thread: EXITING Loop" );
++  SetEvent( *syncEvent ); // Main thread can shut down now
++  return 0;
++}
++
++///////////////////////////
++/*void P2PControl::unregEventCB( bg_event_t event, EventHandlerWrap * eventCallback )
++{
++  std::pair< event_cb_map_t::iterator, event_cb_map_t::iterator > equalEventsIt;
++  // TODO
++  if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++  { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++  equalEventsIt = mEventCBMap->equal_range( event );
++  for( eventIt = equalEventsIt.first; eventIt != equalEventsIt.second; ++eventIt )
++    ( (*eventIt).second )->process( command.c_str() );
++
++  if( ! ReleaseMutex( mSyncMutex ) )
++  { writeOnLog( "Failed to Release the Mutex" ); return; }
++}*/
++
++///////////////////////////
++void P2PControl::unregEventCB( bg_event_t event )
++{
++  if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++  { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++  mEventCBMap.erase( event );
++
++  if( ! ReleaseMutex( mSyncMutex ) )
++  { writeOnLog( "Failed to Release the Mutex" ); return; }
++}
++
++#endif
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/p2pcontrol.h d10-02-02-tstreamplaylist-p10/projects/mozilla/p2pcontrol.h
+--- vlc-1.0.5/projects/mozilla/p2pcontrol.h    1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/p2pcontrol.h        2010-02-15 14:05:24.000000000 +0100
+@@ -0,0 +1,202 @@
++/*
++ * Written by Diego Andres Rabaioli
++ * see P2P-Next-LICENSE.txt for license information
++ */
++
++#ifndef _PEER2PEERCONTROL_H
++#define _PEER2PEERCONTROL_H
++
++#include <winsock2.h>
++#include <string>
++#include <map>
++
++
++////////////////////////
++//
++// This class represents the communication layer between the Plug-in
++// and the BG Process. It sends and receives events through a TCP
++// socket. It receives commands in an asynchronous way. A thread
++// implements the event loop: it's mainly blocked in a recv call,
++// as soon as it receives an event from the BG process, it calls
++// all the handlers registered for that event. The communication
++// protocol it's basic: after the connection with the remote
++// process, the P2PControl sends the MRL ( media resource
++// locator) of the torrent to the BG, when the stream is available,
++// a 'PLAY' event is sent back and the handler for that event is
++// called. Since it's intended to work as a library, external code
++// can use the P2P control by registering their handler functions.
++//
++////////////////////////
++
++void writeOnLog( const char * msg, ... );
++
++///////////////////////////
++// BGPConnection
++///////////////////////////
++
++// States enumerations
++enum sock_state_t { S_DOWN, S_STARTED, S_CREATED, S_UP };
++
++const int         IN_BUF_LEN = 512;
++const int         BG_PORT    = 62062;
++const std::string BG_ADDRESS = "127.0.0.1";
++
++class BGPConnection
++{
++ public:
++  BGPConnection( const int port, const std::string bgAddress );
++  ~BGPConnection();
++
++  bool connect();
++  bool disconnect();
++
++  bool sendMsg( const std::string & ) const;
++  bool recvMsg( std::string & ) const;
++
++  std::string  getAddress() { return mBGAddress; }
++  int          getPort()    { return mPort; }
++
++ private:
++  SOCKET       mServerSocket;
++  std::string  mBGAddress;
++  int          mPort;
++  sock_state_t mSocketState;
++};
++
++///////////////////////////
++// EventHandlerWrap
++///////////////////////////
++class EventHandlerWrap
++{
++ public:
++  virtual void process( const char * ) const = 0;
++  virtual ~EventHandlerWrap() {}
++};
++
++///////////////////////////
++class EventHandlerWrap_Static : public EventHandlerWrap
++{
++ public:
++  EventHandlerWrap_Static( void ( *handler )( const char * ) ):
++    mSHandler( handler ) {}
++
++  virtual ~EventHandlerWrap_Static() {}
++
++  virtual void process( const char * c ) const
++  {
++    mSHandler( c );
++  }
++
++ private:
++  void ( *mSHandler )( const char * );
++
++};
++
++///////////////////////////
++template < class T >
++class EventHandlerWrap_NonStatic : public EventHandlerWrap
++{
++ public:
++  EventHandlerWrap_NonStatic( T * obj, void ( T::* handler )( const char * ) ):
++    mObject( obj ), mNHandler( handler ) {}
++
++  virtual ~EventHandlerWrap_NonStatic() {}
++
++  virtual void process( const char * c ) const
++  {
++    (mObject->*mNHandler)( c );
++  }
++
++ private:
++  T * mObject;
++  void ( T::* mNHandler )( const char * );
++};
++
++///////////////////////////
++// P2PControl
++///////////////////////////
++
++// Constants
++const char PLUGIN_REG_KEY[]   = "Software\\SwarmPlugin";
++const char BG_PATH_ELEMENT[]  = "BGProcessPath";
++const char LOG_PATH_ELEMENT[] = "InstallDir";
++const char LOG_FILE_NAME[]    = "swarmplugin.log";
++
++// Event Callback types
++enum bg_event_t { EV_NONE, EV_PLAY, EV_PAUSE, EV_RESUME, EV_STOP, EV_INFO, EV_CLOSE };
++typedef std::multimap< bg_event_t, EventHandlerWrap * > event_cb_map_t;
++typedef std::pair< bg_event_t, EventHandlerWrap * >     event_cb_item_t;
++
++// Thread parameters
++typedef struct {  BGPConnection *  connection;
++                  event_cb_map_t * eventMap;
++                  HANDLE *         syncEvent;
++                  HANDLE *         syncMutex;
++               } ThreadParams;
++
++// States enumerations
++enum protocol_status_t { P_DOWN, P_UP, P_CLOSING };
++
++class P2PControl
++{
++ public:
++  P2PControl( const int port = BG_PORT, const std::string bgAddress = BG_ADDRESS );
++  ~P2PControl();
++
++  bool  startup();
++  bool  shutdown();
++
++  bool  launchBGProcess( const char * cmd = NULL );
++  bool  sendTorrent( const std::string torrent );
++
++  void  regEventCB( bg_event_t event, void (*callback)( const char * ) )
++  {
++    if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++      { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++    EventHandlerWrap * wrap = new EventHandlerWrap_Static( callback );
++    mEventCBMap.insert( event_cb_item_t( event, wrap ) );
++
++    if( ! ReleaseMutex( mSyncMutex ) )
++      { writeOnLog( "Failed to Release the Mutex" ); return; }
++  }
++
++  template < class T >
++  void  regEventCB( bg_event_t event, T * obj, void ( T::* callback)( const char * ) )
++  {
++    if( WaitForSingleObject( mSyncMutex, INFINITE ) == WAIT_FAILED )
++      { writeOnLog( "Failed to Lock the Mutex" ); return; }
++
++    EventHandlerWrap * wrap = new EventHandlerWrap_NonStatic< T >( obj, callback );
++    mEventCBMap.insert( event_cb_item_t( event, wrap ) );
++
++    if( ! ReleaseMutex( mSyncMutex ) )
++      { writeOnLog( "Failed to Release the Mutex" ); return; }
++  }
++
++  //void  unregEventCB( bg_event_t, EventHandlerWrap * );
++  void  unregEventCB( bg_event_t );
++
++  // DEBUG
++  void setProtoState( protocol_status_t newState ) { mProtoState = newState; }
++ private:
++  // Thread utilities
++  const BGPConnection *  getConnection() const { return mConnection; }
++  const event_cb_map_t * getEventMap()   const { return &mEventCBMap; }
++  // Event Loop: receives events from BG and call the registered handlers
++  static DWORD WINAPI    eventLoop( LPVOID );
++  // Back Ground Process
++  static bool            startBGProcess();
++
++  // Variable Members
++  BGPConnection *   mConnection;
++  protocol_status_t mProtoState;
++
++  // Thread and Sync
++  event_cb_map_t    mEventCBMap;  // Association event/callback
++  HANDLE            mEventThread; // Thread handler
++  HANDLE            mSyncEvent;   // Syncronize the thread at startup and shutdown time
++  HANDLE            mSyncMutex;   // Syncronize access to the Event Map
++};
++
++#endif
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/vlcplugin.cpp d10-02-02-tstreamplaylist-p10/projects/mozilla/vlcplugin.cpp
+--- vlc-1.0.5/projects/mozilla/vlcplugin.cpp   2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/vlcplugin.cpp       2010-02-15 14:05:24.000000000 +0100
+@@ -73,10 +73,17 @@
+ #endif
+ {
+     memset(&npwindow, 0, sizeof(NPWindow));
++
+ #if XP_UNIX
+     memset(&npvideo, 0, sizeof(Window));
+     memset(&npcontrol, 0, sizeof(Window));
+ #endif
++
++#ifdef XP_WIN
++    // P2P-Next
++    p2pControl = new P2PControl();
++    p2pStatus = NULL;
++#endif
+ }
+ static bool boolValue(const char *value) {
+@@ -103,7 +110,7 @@
+     HKEY h_key;
+     DWORD i_type, i_data = MAX_PATH + 1;
+     char p_data[MAX_PATH + 1];
+-    if( RegOpenKeyEx( HKEY_LOCAL_MACHINE, "Software\\VideoLAN\\VLC",
++    if( RegOpenKeyEx( HKEY_LOCAL_MACHINE, "Software\\SwarmPlugin",
+                       0, KEY_READ, &h_key ) == ERROR_SUCCESS )
+     {
+          if( RegQueryValueEx( h_key, "InstallDir", 0, &i_type,
+@@ -120,6 +127,11 @@
+     }
+     ppsz_argv[ppsz_argc++] = "--no-one-instance";
++    // P2P
++    if( ! p2pControl->startup() )
++      return NPERR_GENERIC_ERROR;
++
++
+ #endif /* XP_MACOSX */
+     /* common settings */
+@@ -129,6 +141,12 @@
+     ppsz_argv[ppsz_argc++] = "--ignore-config";
+     ppsz_argv[ppsz_argc++] = "--intf=dummy";
+     ppsz_argv[ppsz_argc++] = "--no-video-title-show";
++    ppsz_argv[ppsz_argc++] = "--no-osd";
++    // Arno: uncomment to enable logging
++    //ppsz_argv[ppsz_argc++] = "--extraintf=logger";
++    //ppsz_argv[ppsz_argc++] = "--logfile=C:\\VLC-logfile.txt";
++
++
+     const char *progid = NULL;
+@@ -253,6 +271,22 @@
+         // get absolute URL from src
+         char *psz_absurl = getAbsoluteURL(psz_target);
+         psz_target = psz_absurl ? psz_absurl : strdup(psz_target);
++
++#ifdef XP_WIN
++        writeOnLog( "VlcPlugin::init: target: %s", psz_target );
++
++
++        p2pControl->regEventCB< VlcPlugin >( EV_PLAY,   this, &VlcPlugin::onP2PPlay );
++        p2pControl->regEventCB< VlcPlugin >( EV_PAUSE,  this, &VlcPlugin::onP2PPause );
++        p2pControl->regEventCB< VlcPlugin >( EV_RESUME, this, &VlcPlugin::onP2PResume );
++        p2pControl->regEventCB< VlcPlugin >( EV_STOP,   this, &VlcPlugin::onP2PStop );
++        p2pControl->regEventCB< VlcPlugin >( EV_INFO,   this, &VlcPlugin::onP2PInfo );
++
++        if( ! p2pControl->sendTorrent( std::string( psz_target ) ) )
++        {  writeOnLog( "VlcPlugin::init: Could NOT send torrent" ); }
++        free(psz_target);
++        psz_target = NULL;
++#endif
+     }
+     /* assign plugin script root class */
+@@ -264,6 +298,9 @@
+ VlcPlugin::~VlcPlugin()
+ {
++#if XP_WIN
++    delete p2pControl; // P2P
++#endif
+     free(psz_baseURL);
+     free(psz_target);
+@@ -916,3 +953,124 @@
+ }
+ #undef BTN_SPACE
+ #endif
++#if XP_WIN
++void VlcPlugin::onP2PPlay( const char * stream )
++{
++  writeOnLog( "VlcPlugin::onP2PPlay %s", stream );
++
++  if( libvlc_instance )
++  {
++    libvlc_exception_t ex;
++    libvlc_exception_init( &ex );
++
++    int item = playlist_add(stream, &ex );
++    if( item != -1 )
++    {
++      writeOnLog( "VlcPlugin::onP2PPlay: item %d", item );
++      if( b_autoplay )
++      {
++        libvlc_exception_t ex2;
++        libvlc_exception_init( &ex2 );
++        // Arno, 2010-02-11: explicitly select item, otherwise VLC may play old one
++        // for some reason, even if the playlist appears to be empty (JS level)
++        playlist_play_item(item, &ex2); 
++        if( libvlc_exception_raised(&ex2) )
++        {
++           writeOnLog( libvlc_exception_get_message(&ex2) );
++           libvlc_exception_clear( &ex2 );
++           return;
++        }
++      }
++    }
++    else if( libvlc_exception_raised(&ex) )
++    {
++      writeOnLog( libvlc_exception_get_message(&ex) );
++      libvlc_exception_clear( &ex );
++      return;
++    }
++    b_stream = true;
++  }
++}
++
++void VlcPlugin::onP2PPause( const char * nothing )
++{
++  writeOnLog( "VlcPlugin::onP2PPause" );
++
++  libvlc_exception_t ex;
++  libvlc_exception_init( &ex );
++
++  playlist_pause( &ex );
++  if( libvlc_exception_raised(&ex) )
++  {
++    writeOnLog( libvlc_exception_get_message(&ex) );
++    libvlc_exception_clear( &ex );
++    return;
++  }
++}
++
++void VlcPlugin::onP2PResume( const char * nothing )
++{
++  writeOnLog( "VlcPlugin::onP2PResume" );
++
++  libvlc_exception_t ex;
++  libvlc_exception_init( &ex );
++
++  playlist_play( &ex );
++  if( libvlc_exception_raised(&ex) )
++  {
++    writeOnLog( libvlc_exception_get_message(&ex) );
++    libvlc_exception_clear( &ex );
++    return;
++  }
++}
++
++void VlcPlugin::onP2PStop( const char * nothing )
++{
++  writeOnLog( "VlcPlugin::onP2PStop" );
++
++  libvlc_exception_t ex;
++  libvlc_exception_init( &ex );
++
++  playlist_stop( &ex );
++  if( libvlc_exception_raised(&ex) )
++  {
++    writeOnLog( libvlc_exception_get_message(&ex) );
++    libvlc_exception_clear( &ex );
++    return;
++  }
++}
++
++void VlcPlugin::onP2PInfo( const char * infostr )
++{
++    // Save copy of status message
++      if (this->p2pStatus != NULL)
++      {
++              free(this->p2pStatus);
++      }
++      this->p2pStatus = (char *)malloc((strlen(infostr)+1)*sizeof(char));
++      strcpy(this->p2pStatus,infostr);
++}
++
++// For JavaScript API
++const char *VlcPlugin::getP2PStatus()
++{
++        if (this->p2pStatus == NULL)
++              return "";
++      else
++              return this->p2pStatus;
++}
++
++int VlcPlugin::input_set_p2ptarget( const char *mrl, libvlc_exception_t *ex )
++{
++    int item = -1;
++
++    if( ! p2pControl->sendTorrent( std::string( mrl ) ) )
++    {
++      writeOnLog( "VlcPlugin::input_set_p2ptarget: Could NOT send torrent" );
++    }
++
++    return item;
++}
++
++
++#endif
+diff -rbNU 3 vlc-1.0.5/projects/mozilla/vlcplugin.h d10-02-02-tstreamplaylist-p10/projects/mozilla/vlcplugin.h
+--- vlc-1.0.5/projects/mozilla/vlcplugin.h     2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/mozilla/vlcplugin.h 2010-02-12 08:57:20.000000000 +0100
+@@ -41,6 +41,8 @@
+ #ifdef XP_WIN
+     /* Windows stuff */
++    // P2P
++    #include "p2pcontrol.h"
+ #endif
+ #ifdef XP_MACOSX
+@@ -79,6 +81,7 @@
+     clicked_Unmute
+ } vlc_toolbar_clicked_t;
++
+ class VlcPlugin
+ {
+ public:
+@@ -195,6 +198,21 @@
+     bool  player_has_vout( libvlc_exception_t * );
++#if XP_WIN
++    // P2P-Next
++    P2PControl *   getP2PControl() { return p2pControl; }
++    void               onP2PPlay  ( const char * );
++    void               onP2PPause ( const char * );
++    void               onP2PResume( const char * );
++    void               onP2PStop  ( const char * );
++    void               onP2PInfo  ( const char * );
++
++    // For JavaScript API
++    const char *       getP2PStatus();
++    int input_set_p2ptarget( const char *, libvlc_exception_t * );
++#endif
++
++
+ private:
+     bool playlist_select(int,libvlc_exception_t *);
+     void set_player_window( libvlc_exception_t * );
+@@ -214,7 +232,11 @@
+     NPWindow  npwindow;
+ #if XP_WIN
+     WNDPROC   pf_wndproc;
+-#endif
++    // P2P
++    P2PControl * p2pControl;
++    char *             p2pStatus;
++
++    #endif
+ #if XP_UNIX
+     unsigned int     i_width, i_height;
+     unsigned int     i_tb_width, i_tb_height;
+@@ -236,12 +258,14 @@
+ /*******************************************************************************
+  * Plugin properties.
+  ******************************************************************************/
+-#define PLUGIN_NAME         "VLC Multimedia Plug-in"
++#define PLUGIN_NAME         "SwarmPlugin Multimedia Player"
+ #define PLUGIN_DESCRIPTION \
+     "Version %s, copyright 1996-2007 The VideoLAN Team" \
+     "<br><a href=\"http://www.videolan.org/\">http://www.videolan.org/</a>"
+ #define PLUGIN_MIMETYPES \
++    "application/x-ns-stream;"
++#if 0
+     /* MPEG-1 and MPEG-2 */ \
+     "audio/mpeg:mp2,mp3,mpga,mpega:MPEG audio;" \
+     "audio/x-mpeg:mp2,mp3,mpga,mpega:MPEG audio;" \
+@@ -294,5 +318,6 @@
+     "audio/x-matroska:mka:Matroska audio;" \
+     /* XSPF */ \
+     "application/xspf+xml:xspf:Playlist xspf;"
++#endif
+ #endif
+diff -rbNU 3 vlc-1.0.5/projects/P2P-Next-LICENSE.txt d10-02-02-tstreamplaylist-p10/projects/P2P-Next-LICENSE.txt
+--- vlc-1.0.5/projects/P2P-Next-LICENSE.txt    1970-01-01 01:00:00.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/projects/P2P-Next-LICENSE.txt        2009-06-17 14:38:57.000000000 +0200
+@@ -0,0 +1,180 @@
++-------------------------------------------------------------------------------
++
++    Next-Share content-delivery library.
++
++    The research leading to this library has received funding from the European
++    Community's Seventh Framework Programme in the P2P-Next project under grant
++    agreement no 216217.
++
++    All library modules are free software, unless stated otherwise; you can 
++    redistribute them and/or modify them under the terms of the GNU Lesser 
++    General Public License as published by the Free Software Foundation; in 
++    particular, version 2.1 of the License.
++
++    This library is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++    Lesser General Public License for more details.
++
++    The following library modules are Copyright (c) 2008-2012, VTT Technical Research Centre of Finland; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Norut AS; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, DACC Systems AB; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Lancaster University; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Jožef Stefan Institute; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, First Oversi Ltd.; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, TECHNISCHE UNIVERSITEIT DELFT; All rights reserved:
++      vlc/projects/activex/p2pcontrol.*
++      vlc/projects/mozilla/p2pcontrol.*
++
++    The following library modules are Copyright (c) 2008-2012, STMicroelectronics S.r.l.; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Kungliga Tekniska Högskolan (The Royal Institute of Technology); All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Markenfilm GmbH & Co. KG; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Radiotelevizija Slovenija Javni Zavvod Ljubljana; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Kendra Foundation; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Universitaet Klagenfurt; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, AG Projects; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, The British Broadcasting Corporation; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Pioneer Digital Design Centre Limited; All rights reserved:
++  
++    The following library modules are Copyright (c) 2008-2012, INSTITUT FUER RUNDFUNKTECHNIK GMBH; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Fabchannel BV; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, University Politehnica Bucharest; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, EBU-UER; All rights reserved:
++
++    The following library modules are Copyright (c) 2008-2012, Università di Roma Sapienza; All rights reserved:
++
++
++    You should have received a copy of the GNU Lesser General Public
++    License along with this library; if not, write to the Free Software
++    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++
++    VTT Technical Research Centre of Finland, 
++    Tekniikankatu 1, 
++    FIN-33710 Tampere, 
++    Finland
++
++    Norut AS,
++    Postboks 6434 
++    Forskningsparken, 
++    9294 Tromsø,
++    Norway
++
++    DACC Systems AB
++    Glimmervägen 4, 
++    SE18734, Täby,
++    Sweden
++
++    Lancaster University, 
++    University House, 
++    Bailrigg, Lancaster, LA1 4YW
++    United Kingdom
++
++    Jožef Stefan Institute, 
++    Jamova cesta 39, 
++    1000 Ljubljana, 
++    Slovenia
++
++    First Oversi Ltd.,
++    Rishon Lezion 1,
++    Petah Tikva 49723, 
++    Israel
++
++    TECHNISCHE UNIVERSITEIT DELFT, 
++    Faculty of Electrical Engineering, Mathematics and Computer Science, 
++    Mekelweg 4, 
++    2628 CD Delft, 
++    The Netherlands
++
++    STMicroelectronics S.r.l., 
++    via C.Olivetti 2, 
++    I-20041 Agrate Brianza,
++    Italy
++
++    Kungliga Tekniska Högskolan (The Royal Institute of Technology), 
++    KTH/ICT/ECS/TSLab
++    Electrum 229
++    164 40 Kista
++    Sweden
++
++    Markenfilm GmbH & Co. KG, 
++    Schulauer Moorweg 25, 
++    22880 Wedel, 
++    Germany
++
++    Radiotelevizija Slovenija Javni Zavvod Ljubljana, 
++    Kolodvorska 2, 
++    SI-1000 Ljubljana,
++    Slovenia
++
++
++    Kendra Foundation, 
++    Meadow Barn, Holne, 
++    Newton Abbot, Devon, TQ13 7SP,
++    United Kingdom
++
++
++    Universitaet Klagenfurt, 
++    Universitaetstrasse 65-67, 
++    9020 Klagenfurt, 
++    Austria
++
++    AG Projects, 
++    Dr. Leijdsstraat 92,
++    2021RK Haarlem, 
++    The Netherlands
++
++    The British Broadcasting Corporation,
++    Broadcasting House, Portland Place, 
++    London, W1A 1AA 
++    United Kingdom
++
++    Pioneer Digital Design Centre Limited, 
++    Pioneer House, Hollybush Hill, Stoke Poges, 
++    Slough, SL2 4QP
++    United Kingdom
++
++    INSTITUT FUER RUNDFUNKTECHNIK GMBH
++    Floriansmuehlstrasse 60,
++    80939 München, 
++    Germany
++
++    Fabchannel BV, 
++    Kleine-Gartmanplantsoen 21, 
++    1017 RP Amsterdam, 
++    The Netherlands
++
++    University Politehnica Bucharest, 
++    313 Splaiul Independentei, 
++    District 6, cod 060042, Bucharest,
++    Romania
++
++    EBU-UER, 
++    L'Ancienne Route 17A, 1218
++    Grand Saconnex - Geneva, 
++    Switzerland
++
++    Università di Roma Sapienza
++    Dipartimento di Informatica e Sistemistica (DIS),
++    Via Ariosto 25, 
++    00185 Rome, 
++    Italy
++
++
++------------------------------------------------------------------------------
+diff -rbNU 3 vlc-1.0.5/src/control/mediacontrol_core.c d10-02-02-tstreamplaylist-p10/src/control/mediacontrol_core.c
+--- vlc-1.0.5/src/control/mediacontrol_core.c  2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/src/control/mediacontrol_core.c      2009-08-26 10:29:01.000000000 +0200
+@@ -427,3 +427,42 @@
+     }
+     return retval;
+ }
++
++
++/* added by Ivaylo */
++
++void
++mediacontrol_set_raw_callbacks( mediacontrol_Instance *self,
++                                raw_read_fnc_ptr read_callback,
++                                raw_seek_fnc_ptr seek_callback,
++                                int i_fsize, // Arno: TODO: shouldn't this be int64 like seek's offset?
++                                int id,
++                                mediacontrol_Exception *exception )
++{
++    libvlc_media_t * p_media;
++    libvlc_exception_t ex;
++    char *psz_cmd = NULL;
++
++    mediacontrol_exception_init( exception );
++    libvlc_exception_init( &ex );
++
++
++    /* the cmd is something like
++       raw:rptr=0x12345678:sptr=0x98765432:fsize=314567789:id=4213
++      malloc 4   +5   +19        +5   +19        +6    +12       +3 +12   +1 = 86 bytes.
++     */
++    psz_cmd = ( char * )malloc( 128 * sizeof( char ) );
++    if( psz_cmd )
++    {
++        snprintf( psz_cmd, 128, "raw:rptr=0x%p:sptr=0x%p:fsize=%d:id=%d",
++            read_callback, seek_callback, i_fsize, id);
++
++        p_media = libvlc_media_new( self->p_instance, (const char *)psz_cmd, &ex );
++        HANDLE_LIBVLC_EXCEPTION_VOID( &ex );
++
++        libvlc_media_player_set_media( self->p_media_player, p_media, &ex );
++        HANDLE_LIBVLC_EXCEPTION_VOID( &ex );
++
++        free( psz_cmd ); // libvlc_media_new does strdup of psz_cmd
++    }
++}
+diff -rbNU 3 vlc-1.0.5/src/libvlc.sym d10-02-02-tstreamplaylist-p10/src/libvlc.sym
+--- vlc-1.0.5/src/libvlc.sym   2009-12-20 18:43:41.000000000 +0100
++++ d10-02-02-tstreamplaylist-p10/src/libvlc.sym       2009-08-26 10:29:01.000000000 +0200
+@@ -269,6 +269,7 @@
+ mediacontrol_set_media_position
+ mediacontrol_set_mrl
+ mediacontrol_set_rate
++mediacontrol_set_raw_callbacks
+ mediacontrol_set_visual
+ mediacontrol_snapshot
+ mediacontrol_sound_get_volume
diff --git a/instrumentation/next-share/xie8transmakedist.bat b/instrumentation/next-share/xie8transmakedist.bat
new file mode 100644 (file)
index 0000000..a210390
--- /dev/null
@@ -0,0 +1,97 @@
+REM @echo off\r
+set LIBRARYNAME=Tribler\r
+\r
+set PYTHONHOME=c:\Python265\r
+REM Arno: Add . to find our core (py 2.5)\r
+set PYTHONPATH=.;%PYTHONHOME%\r
+echo PYTHONPATH SET TO %PYTHONPATH%\r
+\r
+set NSIS="\Program Files\NSIS\makensis.exe"\r
+\r
+REM ----- Check for Python and essential site-packages\r
+\r
+IF NOT EXIST %PYTHONHOME%\python.exe (\r
+  echo .\r
+  echo Could not locate Python in %PYTHONHOME%.\r
+  echo Please modify this script or install python [www.python.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\wx-*-unicode (\r
+  echo .\r
+  echo Could not locate wxPython in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.wxpython.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\py2exe (\r
+  echo .\r
+  echo Could not locate py2exe in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.py2exe.org]\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for NSIS installer\r
+\r
+IF NOT EXIST %NSIS% (\r
+  echo .\r
+  echo Could not locate the NSIS installer at %NSIS%.\r
+  echo Please modify this script or install NSIS [nsis.sf.net]\r
+  exit /b\r
+)\r
+\r
+REM ----- Clean up\r
+\r
+call clean.bat\r
+\r
+REM ----- Build\r
+\r
+REM Arno: When adding files here, make sure tribler.nsi actually\r
+REM packs them in the installer .EXE\r
+\r
+REM Diego: building the deepest dir we get all of them.\r
+mkdir dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+%PYTHONHOME%\python.exe -O %LIBRARYNAME%\Transport\Build\Win32\setupBGexe.py py2exe\r
+\r
+REM Arno: Move py2exe results to installdir\r
+move dist\*.* dist\installdir\bgprocess\r
+copy %LIBRARYNAME%\Images\SwarmPlayerIcon.ico dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+xcopy vlc4ie8player\* dist\installdir /E /I\r
+REM Diego: replace vlc *.txt with P2P-Next License.txt\r
+del dist\installdir\*.txt\r
+type %LIBRARYNAME%\ns-LICENSE.txt %LIBRARYNAME%\binary-LICENSE-postfix.txt > %LIBRARYNAME%\binary-LICENSE.txt\r
+copy %LIBRARYNAME%\binary-LICENSE.txt dist\installdir\r
+\r
+REM Diego: sign axvlc.dll\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlayer for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "dist\installdir\activex\axvlc.dll"\r
+\r
+copy %LIBRARYNAME%\Transport\Build\Win32\IE8\heading.bmp dist\installdir\r
+REM TODO Diego: manifest?\r
+copy %LIBRARYNAME%\Transport\Build\Win32\IE8\swarmplayer_IE_only.nsi dist\installdir\r
+REM copy %LIBRARYNAME%\Transport\Build\Win32\IE8\swarmplayer.exe.manifest dist\installdir\r
+\r
+copy %PYTHONHOME%\Lib\site-packages\wx-2.8-msw-unicode\wx\msvcp71.dll dist\installdir\bgprocess\r
+\r
+copy reset*.bat dist\installdir\r
+\r
+cd dist\installdir\r
+\r
+REM Arno: Win7 gives popup if SwarmEngine is not signed\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlayer for Internet Explorer and Firefox" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" bgprocess\SwarmEngine.exe\r
+\r
+\r
+:makeinstaller\r
+%NSIS% swarmplayer_IE_only.nsi\r
+\r
+rename SwarmPlayer_*.exe SwarmPlayer_IE_*.exe\r
+move SwarmPlayer_IE*.exe ..\r
+cd ..\r
+REM Diego : sign SwarmPlayer_*.exe\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlayer for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlayer_IE*.exe"\r
+REM Arno: build .cab file. \r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\CabArc.Exe" -s 6144 n SwarmPlayer_IE.cab ..\%LIBRARYNAME%\Transport\Win32\IE8\SwarmPlayer_IE.inf\r
+REM Arno : sign SwarmPlayer*.cab\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmPlayer for Internet Explorer" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" "SwarmPlayer_IE*.cab"\r
+\r
+cd ..\r
diff --git a/instrumentation/next-share/xpitransmakedeb.sh b/instrumentation/next-share/xpitransmakedeb.sh
new file mode 100644 (file)
index 0000000..bacb6dc
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+rm -rf debian
+cp -r */Transport/Build/Ubuntu debian
+cd debian
+dch -i --check-dirname-level 0
+debuild --check-dirname-level 0
+
diff --git a/instrumentation/next-share/xpitransmakedist.bat b/instrumentation/next-share/xpitransmakedist.bat
new file mode 100644 (file)
index 0000000..df69974
--- /dev/null
@@ -0,0 +1,112 @@
+REM @echo off\r
+set LIBRARYNAME=BaseLib\r
+\r
+set PYTHONHOME=\Python254\r
+REM Arno: Add . to find our core (py 2.5)\r
+set PYTHONPATH=.;%PYTHONHOME%\r
+echo PYTHONPATH SET TO %PYTHONPATH%\r
+\r
+set XULRUNNER=..\xulrunner-sdk\r
+set ZIP7CMD="\Program Files\7-Zip\7z.exe"\r
+\r
+REM ----- Check for Python and essential site-packages\r
+\r
+IF NOT EXIST %PYTHONHOME%\python.exe (\r
+  echo .\r
+  echo Could not locate Python in %PYTHONHOME%.\r
+  echo Please modify this script or install python [www.python.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\wx-*-unicode (\r
+  echo .\r
+  echo Could not locate wxPython in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install wxPython [www.wxpython.org]\r
+  exit /b\r
+)\r
+\r
+IF NOT EXIST %PYTHONHOME%\Lib\site-packages\py2exe (\r
+  echo .\r
+  echo Could not locate py2exe in %PYTHONHOME%\Lib\site-packages.\r
+  echo Please modify this script or install py2exe [www.py2exe.org]\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for XULRUNNER\r
+\r
+IF NOT EXIST %XULRUNNER% (\r
+  echo .\r
+  echo Could not locate the XULRUNNER SDK at %XULRUNNER%.\r
+  echo Please modify this script or install from https://developer.mozilla.org/en/XULRunner\r
+  exit /b\r
+)\r
+\r
+REM ----- Check for ZIP7CMD\r
+\r
+IF NOT EXIST %ZIP7CMD% (\r
+  echo .\r
+  echo Could not locate the 7-Zip at %ZIP7CMD%.\r
+  echo Please modify this script or install from ww.7-zip.org\r
+  exit /b\r
+)\r
+\r
+\r
+REM ----- Clean up\r
+\r
+call clean.bat\r
+\r
+REM ----- Build\r
+\r
+REM Arno: When adding files here, make sure tribler.nsi actually\r
+REM packs them in the installer .EXE\r
+\r
+REM Diego: building the deepest dir we get all of them.\r
+mkdir dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+%PYTHONHOME%\python.exe -O %LIBRARYNAME%\Transport\Build\Win32\setupBGexe.py py2exe\r
+\r
+REM Arno: Move py2exe results to installdir\r
+move dist\*.* dist\installdir\bgprocess\r
+copy %LIBRARYNAME%\Images\SwarmPlayerIcon.ico dist\installdir\bgprocess\%LIBRARYNAME%\Images\r
+\r
+REM Riccardo:  move the files needed for the WebUI
+xcopy %LIBRARYNAME%\WebUI dist\installdir\bgprocess\%LIBRARYNAME%\WebUI /S /I
+del dist\installdir\bgprocess\%LIBRARYNAME%\WebUI\*.py
+\r
+REM Diego: replace vlc *.txt with P2P-Next License.txt\r
+del dist\installdir\*.txt\r
+type %LIBRARYNAME%\ns-LICENSE.txt %LIBRARYNAME%\binary-LICENSE-postfix.txt > %LIBRARYNAME%\binary-LICENSE.txt\r
+copy %LIBRARYNAME%\binary-LICENSE.txt dist\installdir\r
+\r
+copy %PYTHONHOME%\Lib\site-packages\wx-2.8-msw-unicode\wx\msvcp71.dll dist\installdir\bgprocess\r
+\r
+copy reset.bat dist\installdir\r
+\r
+REM Arno: Move swift binary to installdir\r
+copy swift.exe dist\installdir\bgprocess\r
+\r
+\r
+REM ----- Build XPI of SwarmTransport\r
+mkdir dist\installdir\components\r
+copy %LIBRARYNAME%\Transport\icon.png dist\installdir\r
+copy %LIBRARYNAME%\Transport\install.rdf dist\installdir\r
+copy %LIBRARYNAME%\Transport\chrome.manifest dist\installdir\r
+xcopy %LIBRARYNAME%\Transport\components dist\installdir\components /S /I\r
+xcopy %LIBRARYNAME%\Transport\chrome dist\installdir\chrome /S /I
+xcopy %LIBRARYNAME%\Transport\skin dist\installdir\skin /S /I\r
+\r
+REM ----- Turn .idl into .xpt\r
+%XULRUNNER%\bin\xpidl -m typelib -w -v -I %XULRUNNER%\idl -e dist\installdir\components\tribeIChannel.xpt %LIBRARYNAME%\Transport\tribeIChannel.idl\r
+%XULRUNNER%\bin\xpidl -m typelib -w -v -I %XULRUNNER%\idl -e dist\installdir\components\tribeISwarmTransport.xpt %LIBRARYNAME%\Transport\tribeISwarmTransport.idl\r
+\r
+cd dist\installdir\r
+\r
+REM Arno: Win7 gives popup if SwarmEngine is not signed\r
+"C:\Program Files\Microsoft Platform SDK for Windows Server 2003 R2\Bin\signtool.exe" sign /f c:\build\certs\swarmplayerprivatekey.pfx /p "" /d "SwarmEngine" /du "http://www.pds.ewi.tudelft.nl/code.html" /t "http://timestamp.verisign.com/scripts/timestamp.dll" bgprocess\SwarmEngine.exe\r
+\r
+REM ----- Turn installdir into .xpi\r
+%ZIP7CMD% a -tzip "SwarmPlayer.xpi" * -r -mx=9 \r
+move SwarmPlayer.xpi ..\r
+cd ..\..\r
\r
+\r
diff --git a/instrumentation/next-share/xpitransmakedist.sh b/instrumentation/next-share/xpitransmakedist.sh
new file mode 100644 (file)
index 0000000..e41a00d
--- /dev/null
@@ -0,0 +1,82 @@
+#!/bin/sh -x
+#
+# Script to build SwarmTransport on Ubuntu Linux
+#
+
+export LIBRARYNAME=BaseLib
+export XUL_VERSION=1.9.1.9
+
+xul_dir="/usr/lib/xulrunner"
+xul_dir_version="/usr/lib/xulrunner-$XUL_VERSION"
+
+if [ -f $xul_dir/xpidl ]; then
+    echo "Found XULRUNNER directory $xul_dir"
+    export XULRUNNER_IDL=/usr/share/idl/xulrunner
+    export XULRUNNER_XPIDL=/usr/lib/xulrunner/xpidl
+
+elif [ -f $xul_dir_version/xpidl ]; then
+    echo "Found XULRUNNER directory $xul_dir_version"
+    export XULRUNNER_IDL=/usr/share/idl/xulrunner-$XUL_VERSION/stable
+    export XULRUNNER_XPIDL=/usr/lib/xulrunner-$XUL_VERSION/xpidl
+else
+       echo "|==============================================================================|"
+       echo "| Failed to locate XULRUNNER directory, please modify the XUL_VERSION variable |"
+       echo "|==============================================================================|"
+       exit
+fi 
+
+# ----- Clean up
+
+/bin/rm -rf dist
+
+# ----- Build
+
+# Diego: building the deepest dir we get all of them.
+mkdir -p dist/installdir/bgprocess/$LIBRARYNAME/Images
+
+cp -r $LIBRARYNAME dist/installdir/bgprocess
+
+rm dist/installdir/bgprocess/$LIBRARYNAME/Category/porncat.txt
+rm dist/installdir/bgprocess/$LIBRARYNAME/Category/filter_terms.filter
+rm dist/installdir/bgprocess/$LIBRARYNAME/*.txt
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Main
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Subscriptions
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Test/
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Web2/
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Images/*
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Video/Images
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Tools
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Plugin/*.html
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/*/Build
+rm -rf `find dist/installdir/bgprocess/$LIBRARYNAME -name .svn`
+rm -rf `find dist/installdir/bgprocess/$LIBRARYNAME -name \*.pyc`
+
+cp $LIBRARYNAME/Images/SwarmPlayerIcon.ico dist/installdir/bgprocess/$LIBRARYNAME/Images
+cp $LIBRARYNAME/ns-LICENSE.txt dist/installdir
+cp $LIBRARYNAME/ns-LICENSE.txt dist/installdir/LICENSE.txt
+
+# ----- Build XPI of SwarmTransport
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/icon.png dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/install.rdf dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/chrome.manifest dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/components dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/skin dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/chrome dist/installdir
+mv dist/installdir/bgprocess/$LIBRARYNAME/Transport/bgprocess/* dist/installdir/bgprocess
+rm -rf dist/installdir/bgprocess/$LIBRARYNAME/Transport/bgprocess
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.html
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.tstream
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.sh
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.idl
+rm dist/installdir/bgprocess/$LIBRARYNAME/Transport/*.txt
+
+# ----- Turn .idl into .xpt
+$XULRUNNER_XPIDL -m typelib -w -v -I $XULRUNNER_IDL -e dist/installdir/components/tribeIChannel.xpt $LIBRARYNAME/Transport/tribeIChannel.idl
+$XULRUNNER_XPIDL -m typelib -w -v -I $XULRUNNER_IDL -e dist/installdir/components/tribeISwarmTransport.xpt $LIBRARYNAME/Transport/tribeISwarmTransport.idl
+
+cd dist/installdir
+# ----- Turn installdir into .xpi
+zip -9 -r SwarmPlayer.xpi * 
+mv SwarmPlayer.xpi ..
+cd ../..
diff --git a/instrumentation/next-share/xpitransmakedistmac.sh b/instrumentation/next-share/xpitransmakedistmac.sh
new file mode 100644 (file)
index 0000000..a0edb26
--- /dev/null
@@ -0,0 +1,92 @@
+#!/bin/sh -x
+#
+# Written by Riccardo Petrocco
+# see LICENSE.txt for license information
+#
+# Script to build SwarmTransport on Mac
+#
+# Build notes:
+# - using libraries from macbinaries
+# - simplejson needs to be installed for py2app to work
+# - The latest version of XULrunner, working with the latest
+#   Firefox, has a bug with libidl:
+#   - install libidl with macports: /opt/local/bin/port
+#   - link it: ln -s /opt/local/lib/libintl.8.dylib /opt/local/lib/libintl.3.dylib
+#
+
+APPNAME=SwarmPlayer
+PYTHON_VER=2.5
+PWD:=${shell pwd}
+ARCH:=${shell arch}
+
+PYTHON=python${PYTHON_VER}
+
+
+export LIBRARYNAME=BaseLib
+
+xul_dir="../xulrunner-sdk/sdk/bin"
+
+if [ -f $xul_dir/xpidl ]; then
+    echo "Found XULRUNNER directory $xul_dir"
+    export XULRUNNER_IDL=../xulrunner-sdk/idl
+    export XULRUNNER_XPIDL=../xulrunner-sdk/sdk/bin/xpidl
+
+else
+       echo "|==============================================================================|"
+       echo "| Failed to locate XULRUNNER directory, please modify the xul_dir variable |"
+       echo "|==============================================================================|"
+       exit
+fi 
+
+# ----- TODO check if we have the macbinaries
+
+macbinaries=${PWD}/macbinaries
+echo $macbinaries
+
+if [ ! -d $macbinaries ]; then
+    echo "No macbinaries"
+    exit
+
+else
+    echo "Found macbinaries directory $macbinaries" 
+fi
+
+# ----- Set python paths TODO dynamic checkout
+export PYTHONPATH=$macbinaries:${PWD}:$macbinaries/lib/python2.5/site-packages/
+# apparently not needed.. TODO
+export DYLD_LIBRARY_PATH=$macbinaries
+
+
+
+# ----- Clean up
+/bin/rm -rf dist build
+
+# ----- Build
+${PYTHON} -OO - < ${LIBRARYNAME}/Transport/Build/Mac/setupBGapp.py py2app 
+
+mkdir -p dist/installdir/bgprocess
+mv dist/SwarmPlayer.app dist/installdir/bgprocess/.
+chmod 777 dist/installdir/bgprocess/SwarmPlayer.app/Contents/MacOS/SwarmPlayer
+
+# ----- Build XPI of SwarmTransport
+cp $LIBRARYNAME/Transport/icon.png dist/installdir
+cp $LIBRARYNAME/Transport/install.rdf dist/installdir
+cp $LIBRARYNAME/Transport/chrome.manifest dist/installdir
+cp -rf $LIBRARYNAME/Transport/components dist/installdir
+cp -rf $LIBRARYNAME/Transport/skin dist/installdir
+cp -rf $LIBRARYNAME/Transport/chrome dist/installdir
+rm -rf `find dist/installdir -name .svn`
+
+
+# ----- Turn .idl into .xpt
+export DYLD_LIBRARY_PATH=
+
+$XULRUNNER_XPIDL -m typelib -w -v -I $XULRUNNER_IDL -e dist/installdir/components/tribeIChannel.xpt $LIBRARYNAME/Transport/tribeIChannel.idl
+$XULRUNNER_XPIDL -m typelib -w -v -I $XULRUNNER_IDL -e dist/installdir/components/tribeISwarmTransport.xpt $LIBRARYNAME/Transport/tribeISwarmTransport.idl
+
+cd dist/installdir
+# ----- Turn installdir into .xpi
+zip -9 -r SwarmPlayer.xpi * 
+mv SwarmPlayer.xpi ..
+cd ../..
+